diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 072afc90..4cb96d2c 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -10,12 +10,12 @@ on: jobs: # Go services - fluid CLI - fluid: + fluid-cli: name: Fluid CLI runs-on: ubuntu-latest defaults: run: - working-directory: fluid + working-directory: fluid-cli steps: - uses: actions/checkout@v4 @@ -24,12 +24,7 @@ jobs: uses: actions/setup-go@v5 with: go-version: "1.24" - cache-dependency-path: fluid/go.sum - - - name: Install system dependencies - run: | - sudo apt-get update - sudo apt-get install -y libvirt-dev + cache-dependency-path: fluid-cli/go.sum - name: Download dependencies run: go mod download @@ -38,22 +33,22 @@ jobs: uses: golangci/golangci-lint-action@v6 with: version: latest - working-directory: fluid - args: --build-tags=libvirt --timeout=10m + working-directory: fluid-cli + args: --timeout=10m - name: Test - run: go test -v -race --tags libvirt ./... + run: go test -v -race ./... - name: Build - run: go build --tags libvirt -o bin/fluid ./cmd/fluid + run: go build -o bin/fluid ./cmd/fluid-cli - # Go services - fluid-remote API - fluid-remote: - name: Fluid Remote API + # Go services - fluid daemon + fluid-daemon: + name: Fluid Daemon runs-on: ubuntu-latest defaults: run: - working-directory: fluid-remote + working-directory: fluid-daemon steps: - uses: actions/checkout@v4 @@ -62,12 +57,39 @@ jobs: uses: actions/setup-go@v5 with: go-version: "1.24" - cache-dependency-path: fluid-remote/go.sum + cache-dependency-path: fluid-daemon/go.sum - - name: Install system dependencies - run: | - sudo apt-get update - sudo apt-get install -y libvirt-dev + - name: Download dependencies + run: go mod download + + - name: Lint + uses: golangci/golangci-lint-action@v6 + with: + version: latest + working-directory: fluid-daemon + + - name: Test + run: go test -v -race ./... + + - name: Build + run: go build -o bin/fluid-daemon ./cmd/fluid-daemon + + # Go services - API + api: + name: API + runs-on: ubuntu-latest + defaults: + run: + working-directory: api + + steps: + - uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: "1.24" + cache-dependency-path: api/go.sum - name: Download dependencies run: go mod download @@ -76,14 +98,13 @@ jobs: uses: golangci/golangci-lint-action@v6 with: version: latest - working-directory: fluid-remote - args: --build-tags=libvirt + working-directory: api - name: Test - run: go test -v -race --tags libvirt ./... + run: go test -v -race ./... - name: Build - run: go build --tags libvirt -o bin/fluid-remote ./cmd/api + run: go build -o bin/api ./cmd/server # Python SDK sdk: @@ -177,38 +198,3 @@ jobs: - name: Build run: bun run build - - # Landing page - landing-page: - name: Landing Page - runs-on: ubuntu-latest - defaults: - run: - working-directory: landing-page - - steps: - - uses: actions/checkout@v4 - - - name: Set up Bun - uses: oven-sh/setup-bun@v2 - with: - bun-version: latest - - - name: Install dependencies - run: bun install - - - name: Lint - run: bun run lint - - - name: Astro check - run: bun run check - - - name: Type check - run: bun run typecheck - - - name: Build - run: bun run build - env: - PUBLIC_POSTHOG_API_KEY: ${{ secrets.POSTHOG_API_KEY }} - PUBLIC_POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }} - PUBLIC_POSTHOG_DEFAULTS: ${{ secrets.POSTHOG_DEFAULTS }} diff --git a/.goreleaser.yaml b/.goreleaser.yaml index e1be765e..dde2b2f4 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -3,15 +3,34 @@ version: 2 project_name: fluid before: hooks: - - sh -c "cd fluid-remote && go mod tidy" - - sh -c "cd fluid && go mod tidy" + - sh -c "cd api && go mod tidy" + - sh -c "cd fluid-daemon && go mod tidy" + - sh -c "cd fluid-cli && go mod tidy" builds: - - id: fluid-remote - dir: fluid-remote - main: ./cmd/api/main.go - binary: fluid-remote + - id: api + dir: api + main: ./cmd/server + binary: api env: - - CGO_ENABLED=1 + - CGO_ENABLED=0 + goos: + - linux + goarch: + - amd64 + - arm64 + flags: + - -trimpath + ldflags: + - -s -w + - -X main.version={{ .Version }} + - -X main.commit={{ .Commit }} + - -X main.date={{ .Date }} + - id: fluid-daemon + dir: fluid-daemon + main: ./cmd/fluid-daemon + binary: fluid-daemon + env: + - CGO_ENABLED=0 goos: - linux goarch: @@ -19,34 +38,17 @@ builds: - arm64 flags: - -trimpath - tags: - - libvirt ldflags: - -s -w - -X main.version={{ .Version }} - -X main.commit={{ .Commit }} - -X main.date={{ .Date }} - overrides: - - goos: linux - goarch: arm64 - env: - - CC=aarch64-linux-gnu-gcc - - CXX=aarch64-linux-gnu-g++ - - CGO_ENABLED=1 - - CGO_LDFLAGS=-L/usr/lib/aarch64-linux-gnu - - PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig - - goos: linux - goarch: amd64 - env: - - CC=gcc - - CXX=g++ - - CGO_ENABLED=1 - id: fluid - dir: fluid - main: ./cmd/fluid + dir: fluid-cli + main: ./cmd/fluid-cli binary: fluid env: - - CGO_ENABLED=1 + - CGO_ENABLED=0 goos: - linux goarch: @@ -54,34 +56,25 @@ builds: - arm64 flags: - -trimpath - tags: - - libvirt ldflags: - -s -w - -X main.version={{ .Version }} - -X main.commit={{ .Commit }} - -X main.date={{ .Date }} - overrides: - - goos: linux - goarch: arm64 - env: - - CC=aarch64-linux-gnu-gcc - - CXX=aarch64-linux-gnu-g++ - - CGO_ENABLED=1 - - CGO_LDFLAGS=-L/usr/lib/aarch64-linux-gnu - - PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig - - goos: linux - goarch: amd64 - env: - - CC=gcc - - CXX=g++ - - CGO_ENABLED=1 archives: - - id: fluid-remote + - id: api builds: - - fluid-remote + - api name_template: >- - fluid-remote_{{ .Version }}_{{ .Os }}_{{ .Arch }} + api_{{ .Version }}_{{ .Os }}_{{ .Arch }} + files: + - README.md + - LICENSE + - id: fluid-daemon + builds: + - fluid-daemon + name_template: >- + fluid-daemon_{{ .Version }}_{{ .Os }}_{{ .Arch }} files: - README.md - LICENSE @@ -134,15 +127,15 @@ changelog: - "^docs:" - "^test:" nfpms: - - id: fluid-remote - package_name: fluid-remote + - id: api + package_name: api builds: - - fluid-remote + - api bindir: /usr/local/bin vendor: "Fluid.sh" homepage: "https://github.com/aspectrr/fluid.sh" maintainer: "Collin Pfeifer " - description: "Control-plane API for managing libvirt-based VM sandboxes" + description: "Control-plane API for fluid.sh sandbox management" license: "MIT" formats: - deb @@ -150,6 +143,21 @@ nfpms: dependencies: - openssh-client - postgresql-client + - id: fluid-daemon + package_name: fluid-daemon + builds: + - fluid-daemon + bindir: /usr/local/bin + vendor: "Fluid.sh" + homepage: "https://github.com/aspectrr/fluid.sh" + maintainer: "Collin Pfeifer " + description: "Background daemon for managing VM sandboxes on a host" + license: "MIT" + formats: + - deb + - rpm + dependencies: + - openssh-client - id: fluid package_name: fluid builds: diff --git a/CLAUDE.md b/CLAUDE.md index ac27b8b9..39490284 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,6 +1,6 @@ # fluid.sh -Autonomous AI agents for infrastructure -- with human approval. +Autonomous AI agents for infrastructure - with human approval. ## What This Is @@ -9,11 +9,12 @@ fluid.sh lets AI agents do infrastructure work in isolated VM sandboxes. Agent w ## Project Structure ``` -fluid/ # Go CLI & API - VM management via libvirt -web/ # React - UI for monitoring/approval -sdk/ # Python SDK - Build agents -examples/ # Working agent examples -landing-page/ # Astro - Marketing site (fluid.sh) +fluid-cli/ # Go CLI - Interactive TUI agent + MCP server +fluid-daemon/ # Go - Background sandbox management daemon +api/ # Go - Control plane REST API + gRPC server +web/ # React - Dashboard UI for monitoring/approval +demo-server/ # Go - WebSocket demo server for interactive docs +proto/ # Protobuf definitions for gRPC services ``` ## Testing Required @@ -23,15 +24,17 @@ Every code change needs tests. See project-specific AGENTS.md files for details. ## Quick Reference ```bash -docker-compose up --build # Start everything -cd fluid && make test # Test API -cd sdk/fluid-sdk-py && pytest # Test SDK +mprocs # Start all services for dev +cd fluid-cli && make test # Test CLI +cd fluid-daemon && make test # Test daemon +cd api && make test # Test API +cd web && bun run build # Build web ``` ## Project Docs -- @fluid/AGENTS.md -- @sdk/AGENTS.md +- @fluid-cli/AGENTS.md - @web/AGENTS.md -- @examples/agent-example/AGENTS.md -- @landing-page/AGENTS.md +- @api/AGENTS.md +- @fluid-daemon/AGENTS.md +- @demo-server/AGENTS.md diff --git a/README.md b/README.md index 596c9a3a..692fee3f 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,17 @@
-# fluid.sh 🌊 +# 🌊 fluid.sh -### Claude Code for Managing and Debugging VMs +### Claude Code for Debugging VMs [![Commit Activity](https://img.shields.io/github/commit-activity/m/aspectrr/fluid.sh?color=blue)](https://github.com/aspectrr/fluid.sh/commits/main) [![License](https://img.shields.io/github/license/aspectrr/fluid.sh?color=blue)](https://github.com/aspectrr/fluid.sh/blob/main/LICENSE) [![Discord](https://img.shields.io/discord/1465124928650215710?label=discord)](https://discord.gg/4WGGXJWm8J) [![GitHub stars](https://img.shields.io/github/stars/aspectrr/fluid.sh)](https://github.com/aspectrr/fluid.sh) -Fluid comes in two flavors: -A local [CLI Agent](#fluid-cli) that connects to remote KVM hosts from your local host +Fluid is an AI agent built for the core steps of debugging infrastructure. Read-Only mode for getting context, Create a sandbox and make edits to test changes. Create an Ansible Playbook to recreate on prod. -An [Agent API](#fluid-remote) that connects to KVM hosts and can handle tens to thousands of concurrent agent sessions. - -Choose your own adventure πŸ§™β€β™‚οΈ - -[Features](#features) [Quick Start](#quick-start) [Demo](#demo-of-cli-agent) [Documentation](#documentation) +[Features](#features) | [Quick Start](#quick-start) | [Demo](#demo) | [Docs](https://fluid.sh/docs/quickstart)
@@ -24,36 +19,21 @@ Choose your own adventure πŸ§™β€β™‚οΈ ## Problem -AI agents are ready to do infrastructure work, but they can't touch prod: - -- Agents can install packages, configure services, write scripts--autonomously -- But one mistake on production and you're getting paged at 3 AM to fix it -- So we limit agents to chatbots instead of letting them manage and debug on their own +AI agents can install packages, configure services, write scripts - autonomously. But one mistake on production and you're getting paged at 3 AM. So we limit agents to chatbots instead of letting them do real work. ## Solution -**fluid.sh** lets AI agents work autonomously in isolated VMs, then a human approves before anything touches production: +**fluid.sh** gives agents full root access in isolated VM sandboxes. They work autonomously. When done, a human reviews the diff and approves an auto-generated Ansible playbook before anything touches production. ``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Fluid Workflow β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ Agent │────►│ Sandbox VM │────►│ Human │────►│Productionβ”‚ β”‚ -β”‚ β”‚ Task β”‚ β”‚ (autonomous) β”‚ β”‚ Approval β”‚ β”‚ Server β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ β”‚ β”‚ -β”‚ β€’ Full root access β€’ Review diff β”‚ -β”‚ β€’ Install packages β€’ Approve Ansible β”‚ -β”‚ β€’ Edit configs β€’ One-click apply β”‚ -β”‚ β€’ Run services β”‚ -β”‚ β€’ Snapshot/restore β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +Agent Task --> Sandbox VM (autonomous) --> Human Approval --> Production + - Full root access - Review diff + - Install packages - Approve Ansible + - Edit configs - One-click apply + - Run services ``` -## Fluid CLI - -### Demo of CLI Agent +## Demo [![CLI Agent Demo](https://img.youtube.com/vi/ZSUBGXNTz34/0.jpg)](https://www.youtube.com/watch?v=ZSUBGXNTz34) @@ -61,930 +41,151 @@ AI agents are ready to do infrastructure work, but they can't touch prod: | Feature | Description | |---------|-------------| -| **Autonomous Execution** | Agents run commands, install packages, edit configs--no hand-holding | -| **Full VM Isolation** | Each agent gets a dedicated KVM virtual machine with root access | -| **Human-in-the-Loop** | Blocking approval workflow before any production changes | -| **Ansible Export** | Auto-generate playbooks from agent work for production apply | -| **Python SDK** | First-class SDK for building autonomous agents | - -## πŸ„ Quick Start - -### Prerequisites - -- Must have Go 1.24+ installed. -- Access to Remote Libvirt Host via SSH - - If you are able to access a Libvirt host via SSH then this will work. - -### Onboarding - -To install you can either -```bash -curl -fsSL https://fluid.sh/install.sh | bash -``` -or -```bash -go install github.com/aspectrr/fluid.sh/fluid/cmd/fluid@latest -``` - -They do the same thing. - -Next you can run - -```bash -fluid -``` - -to start onboarding. - -Onboarding will take you through adding remote hosts, generating SSH CAs for the agent to access sandboxes, and getting your LLM API key setup. - -[![Fluid CLI Onboarding](https://img.youtube.com/vi/wbevPJGmukw/0.jpg)](https://www.youtube.com/watch?v=wbevPJGmukw) - -### Sandbox Creation Limits - -When a Libvirt host does not have enough memory available to create a sandbox, the sandbox creation event will cause an approval screen to prompting the user for approval. This is used to track memory and CPU, and useful for not straining your existing hardware. These limits can be configured with `/settings`. - -### Internet Access - -All internet connections are blocked by default. Any command that reaches out of the sandbox require human approval first. - -### Context Compaction - -Context limits are set in `/settings` and used to configure when compaction takes place. Context is calculated with a rough heuristic of `0.33 tokens per char`. This is meant as a rough estimate but this is likely to be fixed and updated in further iterations. - - -### Safety and Potentially Destructive Actions - -The agent has access to the following tools during execution: - -### Sandboxes - -| Tool | Only Usable in Sandbox | Only Can Act on Sandboxes | Potentially Destructive|Description | -|--------|----------|------|----|---| -| `list_sandboxes` | `No` | `No` |`No`|List sandboxes with IP addresses | -| `create_sandbox` | `No` | `No, acts on libvirt host`|`Yes` |Create new sandbox VM by cloning from source VM | -| `destroy_sandbox` | `No` | `Yes` |`Yes`|Destroy sandbox and storage | -| `start_sandbox` | `No` | `Yes` |`Yes`|Start a stopped sandbox VM | -| `stop_sandbox` | `No` | `Yes` |`Yes`|Stop a started sandbox VM | - -### Commands - -| Tool | Only Usable in Sandbox | Only can act on Sandboxes | Potentially Destructive | Description | -|--------|----------|-------|-|------| -| `run_command` | `Yes` | `Yes` | `Yes` | Execute a command inside a sandbox via SSH | -| `edit_file` | `Yes` | `Yes` | `Yes` | Edit file on sandbox | -| `read_file` | `Yes` | `Yes` | `No` | Read file on sandbox | - -## Ansible -| Tool | Only Usable in Sandbox | Only can act on Sandboxes| Potentially Destructive | Description | -|--------|----------|-------|-|------| -| `create_playbook` | `No` | `No` | `No` | Create Ansible Playbook | -| `add_playbook_task` | `No` | `No` |`No` | Add Ansible task to playbook | -| `list_playbooks` | `No` |`No`| `No`| List Ansible playbooks | -| `get_playbook` | `No` | `No` | `No`| Get playbook contents | - - -## Read-Only Mode - -You can cycle between `EDIT` and `READ-ONLY` mode in the CLI via `Shift-Tab`. - -![Edit Mode](./edit_mode.png) -![Read Only Mode](./read_only_mode.png) - -Read only mode will give access to the model to only tools that are not potentially destructive: - -### Sandboxes - -| Tool | Only Usable in Sandbox | Only Can Act on Sandboxes | Potentially Destructive|Description | -|--------|----------|------|----|---| -| `list_sandboxes` | `No` | `No` |`No`|List sandboxes with IP addresses | - -### Commands - -| Tool | Only Usable in Sandbox | Only can act on Sandboxes | Potentially Destructive | Description | -|--------|----------|-------|-|------| -| `read_file` | `Yes` | `Yes` | `No` | Read file on sandbox | - -## Ansible -| Tool | Only Usable in Sandbox | Only can act on Sandboxes| Potentially Destructive | Description | -|--------|----------|-------|-|------| -| `create_playbook` | `No` | `No` | `No` | Create Ansible Playbook | -| `add_playbook_task` | `No` | `No` |`No` | Add Ansible task to playbook | -| `list_playbooks` | `No` |`No`| `No`| List Ansible playbooks | -| `get_playbook` | `No` | `No` | `No`| Get playbook contents | - - -## Source VM Read-Only Access - -In addition to read-only mode within sandboxes, Fluid supports reading source/golden VMs directly -- no sandbox required. This lets the agent inspect production VMs for debugging and investigation without any risk of modification. - -### Preparing a Source VM - -To enable read-only access on a source VM: - -```bash -fluid source prepare -``` - -This sets up a defense-in-depth security model on the VM: - -1. Installs a restricted shell script at `/usr/local/bin/fluid-readonly-shell` -2. Creates a `fluid-readonly` user with the restricted shell (no interactive login) -3. Configures SSH CA trust so the agent authenticates with ephemeral certificates -4. Sets up authorized principals for the `fluid-readonly` user -5. Restarts sshd to apply changes - -All steps are idempotent -- running `prepare` multiple times is safe. This is also run automatically during onboarding or when investigating a VM in read-only mode. - -### Source VM Tools - -When in read-only mode, the agent has access to these source VM tools: - -| Tool | Only Usable in Sandbox | Only Can Act on Sandboxes | Potentially Destructive | Description | -|--------|----------|------|----|---| -| `run_source_command` | `No` | `No` | `No` | Execute a read-only diagnostic command on a source VM | -| `read_source_file` | `No` | `No` | `No` | Read the contents of a file on a source VM | - -### Allowed Read-Only Commands - -Commands are validated against an allowlist before execution. Allowed categories include: - -| Category | Commands | -|----------|----------| -| File inspection | `cat`, `ls`, `find`, `head`, `tail`, `stat`, `file`, `wc`, `du`, `tree`, `strings`, `md5sum`, `sha256sum` | -| Process/system | `ps`, `top`, `pgrep`, `systemctl status`, `journalctl`, `dmesg` | -| Network | `ss`, `netstat`, `ip`, `ifconfig`, `dig`, `nslookup`, `ping` | -| Disk | `df`, `lsblk`, `blkid` | -| Package query | `dpkg -l`, `rpm -q`, `apt list`, `pip list` | -| System info | `uname`, `hostname`, `uptime`, `free`, `lscpu`, `lsmod` | -| Pipe targets | `grep`, `awk`, `sed`, `sort`, `uniq`, `cut`, `tr` | - -Subcommands are also restricted. For example, `systemctl` only allows `status`, `show`, `list-units`, `is-active`, and `is-enabled`. - -### Security Model - -Source VM access uses two layers of protection: - -- **Client-side validation** -- Commands are checked against the allowlist before being sent. Shell metacharacters (`$(...)`, backticks, process substitution, output redirection, newlines) are blocked. Piped and chained commands have each segment validated individually. -- **Server-side restricted shell** -- The `fluid-readonly` user's login shell blocks destructive commands (`rm`, `kill`, `sudo`, `apt install`, etc.), prevents command substitution and output redirection, and denies interactive login. - -## Issues - -Please reach out on Discord with any problems or questions you encounter! - [Discord](https://discord.gg/4WGGXJWm8J) - - -## Fluid Remote - -*(⚠️WIP Not Production Ready⚠️)* - -Fluid-Remote is the API version of Fluid. Allowing you to run agents autonomously on your infrastructure from the UI or API calls. Instead of just one agent in your terminal, control hundreds. Talk to Fluid in your favorite apps and spawn tasks to run async, getting your approval before continuining. Run Ansible playbooks from anywhere. - -### Demo of Fluid Remote - -[![Fluid Remote Demo](https://img.youtube.com/vi/nAlqRMhZxP0/0.jpg)](https://www.youtube.com/watch?v=nAlqRMhZxP0) - -## SDK Example - -```python -from fluid import Fluid - -client = Fluid("http://localhost:8080") -sandbox = None - -try: - # Agent gets its own VM with full root access - sandbox = client.sandbox.create_sandbox( - source_vm_name="ubuntu-base", - agent_id="nginx-setup-agent", - auto_start=True, - wait_for_ip=True - ).sandbox - - run_agent("Install nginx and configure TLS, create an Ansible playbook to recreate the task.", sandbox.id) - - # NOW the human reviews: - # - Diff between snapshots shows exactly what changed - # - Auto-generated Ansible playbook ready to apply - # - Human approves -> playbook runs on production - # - Human rejects -> nothing happens, agent tries again - -finally: - if(sandbox): - # Clean up sandbox - client.sandbox.destroy_sandbox(sandbox.id) -``` +| **Autonomous Execution** | Agents run commands, install packages, edit configs - no hand-holding | +| **Full VM Isolation** | Each agent gets a dedicated KVM virtual machine with root access | +| **Interactive TUI** | Natural language interface - just type what you want done | +| **Human-in-the-Loop** | Blocking approval workflow before any production changes | +| **Ansible Export** | Auto-generate playbooks from agent work for production apply | +| **MCP Integration** | Use fluid tools from Claude Code, Cursor, Windsurf | +| **Read-Only Mode** | Inspect source VMs safely without risk of modification | +| **Multi-Host** | Scale across hosts with the daemon + control plane | ## Quick Start -### Prerequisites - -`fluid-remote` is setup to be ran on a control plane on the same network as the VM hosts it needs to connect with. It will also need a postgres instance running on the control plan to keep tack of commands run, sandboxes, and other auditting. - -If you need another way of accessing VMs, open an issue and we will get back to you. - -### Installation - -The recommended deployment model is a **single control node** running the `fluid-remote` API and PostgreSQL, with SSH access to one or more libvirt/KVM hosts. - -### **Warning: It is reccomended to NOT use Docker** -There is a Docker container and a `docker-compose.yml` file in this repo for `fluid-remote`, purely in the off-chance that you would prefer to host in a container VS install a system process. -The reason not to use docker is due to the networking issues that arise. `fluid-remote` uses SSH to connect to libvirt and in testing, containers can interfere with connections to hosts. If you must use Docker, please use host-mode for the network, vs Docker's internal network. Please reach out in the [Discord](https://discord.gg/4WGGXJWm8J) if you want support implimenting this. - ---- - -## Architecture Overview - -``` -+--------------------+ SSH +------------------+ -| Control Node |----------------->| KVM / libvirt | -| | | Hosts | -| - fluid-remote | | | -| - PostgreSQL | | - libvirtd | -+--------------------+ +------------------+ -``` - -The control node: - -* Runs the `fluid-remote` API -* Stores audit logs and metadata in PostgreSQL -* Connects to hosts over SSH to execute libvirt operations - -The hypervisor hosts: - -* Run KVM + libvirt only -* Do not run agents or additional services - ---- - -## Requirements - -### Control Node - -* Linux (x86_64) -* systemd -* PostgreSQL 14+ -* SSH client - -### Hypervisor Hosts - -* Linux -* KVM enabled -* libvirt installed and running -* SSH access from control node - -### Network - -* Private management network between control node and hosts -* Public or tenant-facing network configured on hosts for VMs - ---- - -## Production Installation (Recommended) +### Install -This method installs a **static binary** and runs it as a systemd service. No container runtime is required. - -### 1. Import the GPG public key -```bash -# Import from keyserver -gpg --keyserver keys.openpgp.org --recv-keys B27DED65CFB30427EE85F8209DD0911D6CB0B643 - -# OR import from file -curl https://raw.githubusercontent.com/aspectrr/fluid.sh/main/public-key.asc | gpg --import -``` - -### 2. Download release assets ```bash -VERSION=0.1.0 -wget https://github.com/aspectrr/fluid.sh/releases/download/v${VERSION}/fluid-remote_${VERSION}_linux_amd64.tar.gz -wget https://github.com/aspectrr/fluid.sh/releases/download/v${VERSION}/checksums.txt -wget https://github.com/aspectrr/fluid.sh/releases/download/v${VERSION}/checksums.txt.sig -``` - -### 3. Verify signature and checksum -```bash -# Verify GPG signature -gpg --verify checksums.txt.sig checksums.txt - -# Verify file checksum -sha256sum -c checksums.txt --ignore-missing -``` - -### 4. Extract and install -```bash -tar -xzf fluid-remote_${VERSION}_linux_amd64.tar.gz -sudo install -m 755 fluid-remote /usr/local/bin/ -``` - -## System User and Directories - -Create a dedicated system user and required directories: - -```bash -useradd --system --home /var/lib/fluid-remote --shell /usr/sbin/nologin fluid-remote - -mkdir -p /etc/fluid-remote \ - /var/lib/fluid-remote \ - /var/log/fluid-remote - -chown -R fluid-remote:fluid-remote \ - /var/lib/fluid-remote \ - /var/log/fluid-remote -``` - -Filesystem layout: - -``` -/usr/local/bin/fluid-remote -/etc/fluid-remote/config.yaml -/var/lib/fluid-remote/ -/var/log/fluid-remote/ +curl -fsSL https://fluid.sh/install.sh | bash ``` ---- - -## PostgreSQL Setup - -PostgreSQL runs **locally on the control node** and is bound to localhost only. - -### Create Database and User +Or with Go: ```bash -sudo -u postgres psql -# Generate strong password -openssl rand -base64 16 +go install github.com/aspectrr/fluid.sh/fluid-cli/cmd/fluid-cli@latest ``` -```sql -CREATE DATABASE fluid; -CREATE USER fluid WITH PASSWORD 'strong-password'; -GRANT ALL PRIVILEGES ON DATABASE fluid TO fluid; -``` - -Ensure PostgreSQL is listening only on localhost: - -```conf -listen_addresses = '127.0.0.1' -``` - ---- - -## Configuration - -Create the main configuration file: +### Launch the TUI ```bash -vim /etc/fluid/config.yaml -``` - -Example: - -```yaml -server: - listen: 127.0.0.1:8080 - -database: - host: 127.0.0.1 - port: 5432 - name: fluid - user: fluid - password: strong-password - -hosts: - - name: kvm-01 - address: 10.0.0.11 - - name: kvm-02 - address: 10.0.0.12 -``` - ---- - -## SSH Access to Hosts - -The control node requires SSH access to each libvirt host. - -Recommended approach: - -* Generate a dedicated SSH key for `fluid` -* Grant limited sudo or libvirt access on hosts - -```bash -sudo -u fluid ssh-keygen -t ed25519 +fluid ``` -On each host, allow execution of `virsh` via sudo or libvirt permissions. +On first run, onboarding walks you through host setup, SSH CA generation, and LLM API key configuration. ---- - -## systemd Service - -Create the service unit: - -```bash -vim /etc/systemd/system/fluid-remote.service -``` +### Architecture -```ini -[Unit] -Description=fluid-remote control plane -After=network.target postgresql.service - -[Service] -User=fluid-remote -Group=fluid-remote -ExecStart=/usr/local/bin/fluid-remote \ - --config /etc/fluid-remote/config.yaml -Restart=on-failure -RestartSec=5 -LimitNOFILE=65536 - -[Install] -WantedBy=multi-user.target ``` - -Enable and start: - -```bash -systemctl daemon-reload -systemctl enable fluid-remote -systemctl start fluid-remote +fluid (TUI/MCP) ---> fluid-daemon (gRPC :9091) ---> libvirt/KVM + | + +--- control-plane (optional, multi-host) + | + +--- web dashboard ``` ---- +- **fluid-cli**: Interactive TUI agent + MCP server +- **fluid-daemon**: Background service managing sandboxes via libvirt +- **control-plane (api)**: Multi-host orchestration, REST API, web dashboard +- **web**: React dashboard for monitoring and approval -## Verifying the Installation +### MCP Integration -Check service status: +Connect Claude Code, Codex, or Cursor to fluid via MCP: -```bash -systemctl status fluid-remote +```json +{ + "mcpServers": { + "fluid": { + "command": "fluid", + "args": ["mcp"] + } + } +} ``` -Basic health checks: +17 tools available: `create_sandbox`, `destroy_sandbox`, `run_command`, `edit_file`, `read_file`, `create_playbook`, and more. See the [full reference](https://fluid.sh/docs/cli-reference). -```bash -curl http://localhost:8080/health -curl http://localhost:8080/v1/hosts -``` - ---- - -## Upgrade Strategy - -* Download the new binary -* Verify checksum -* Replace `/usr/local/bin/fluid-remote` -* Restart the systemd service - -PostgreSQL migrations are handled automatically on startup. - ---- - -## Uninstallation - -```bash -systemctl stop fluid-remote -systemctl disable fluid-remote -rm /usr/local/bin/fluid-remote -rm /etc/systemd/system/fluid-remote.service -``` +### TUI Slash Commands -(Optional) Remove data and user: +| Command | Description | +|---------|-------------| +| `/vms` | List available VMs | +| `/sandboxes` | List active sandboxes | +| `/hosts` | List configured hosts | +| `/playbooks` | List Ansible playbooks | +| `/settings` | Open configuration | +| `/compact` | Compact conversation | +| `/context` | Show token usage | +| `/clear` | Clear history | +| `/help` | Show help | -```bash -userdel fluid-remote -rm -rf /etc/fluid-remote /var/lib/fluid-remote /var/log/fluid-remote -``` +Toggle between edit and read-only mode with `Shift+Tab`. -## Contributing Quickstart +Copy text by dragging and holding `Shift`. -### **Note: As the lovely contributors that you are, I host two Ubuntu VMs with libvirt installed for testing in the cloud for fluid-remote/fluid. If you would like access to these rather than the Mac workaround, please reach out in [Discord](https://discord.gg/4WGGXJWm8J) and I will add your public keys to them. They reset every hour to prevent long-running malicious processes from staying put.** +## Development ### Prerequisites -- **mprocs** - For local dev -- **libvirt/KVM** - For virtual machine management -- **macOS**: - - **qemu** - `brew install qemu` (the hypervisor) - - **libvirt** - `brew install libvirt` (VM management daemon) - - **socket_vmnet** - `brew install socket_vmnet` (VM networking) - - **cdrtools** - `brew install cdrtools` (provides `mkisofs` for cloud-init) +- **mprocs** - Multi-process runner for local dev +- **Go 1.24+** +- **libvirt/KVM** - See [local setup docs](https://fluid.sh/docs/local-setup) ### 30-Second Start ```bash -# Clone and start git clone https://github.com/aspectrr/fluid.sh.git cd fluid.sh mprocs - -# Services available at: -# API: http://localhost:8080 -# Web UI: http://localhost:5173 ``` ---- - -## Platform Setup - -
-Mac +Services: +- Web UI: http://localhost:5173 +- API: http://localhost:8080 -You will need to install qemu, libvirt, socket_vmnet, and cdrtools on Mac: - -```bash -# Install qemu, libvirt, socket_vmnet, and cdrtools -brew install qemu libvirt socket_vmnet cdrtools +### Project Structure -# Set up SSH CA (Needed for Sanbox VMs) -cd fluid.sh -./fluid-remote/scripts/setup-ssh-ca.sh --dir .ssh-ca - -# Create image directories -sudo mkdir -p /var/lib/libvirt/images/{base,jobs} -sudo chown -R $(whoami) /var/lib/libvirt/images/{base,jobs} - -# Verify libvirt is running -virsh -c qemu:///session list --all - -# Set up SSH CA (Needed for Sandbox VMs) -cd fluid.sh -./fluid-remote/scripts/reset-libvirt-macos.sh - -# Set up libvirt VM (ARM64 Ubuntu) -SSH_CA_PUB_PATH=.ssh-ca/ssh_ca.pub SSH_CA_KEY_PATH=.ssh-ca/ssh_ca ./scripts/reset-libvirt-macos.sh - -# Start services -mprocs ``` - -**What happens:** -1. A SSH CA is generated and then is used to build the golden VM -2. libvirt runs on the machine and is queried by the fluid-remote API -4. Test VMs run on your root machine - -**Architecture:** -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Apple Silicon Mac β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ fluid-remote β”‚ β”‚ -β”‚ β”‚ API + Web UI │────► β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ β”‚ β”‚ libvirt/QEMU (ARM64) β”‚ β”‚ -β”‚ β”‚ LIBVIRT_URI= β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ -β”‚ β”‚ qemu+tcp:// β”‚ β”‚ β”‚ sandbox β”‚ β”‚ sandbox β”‚ ... β”‚ β”‚ -β”‚ β”‚ localhost:16509 β”‚ β”‚ β”‚ VM (arm) β”‚ β”‚ VM (arm) β”‚ β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -**Create ARM64 test VMs:** -```bash -./fluid-remote/scripts/reset-libvirt-macos.sh -``` - -**Default test VM credentials:** -- Username: `testuser` / Password: `testpassword` -- Username: `root` / Password: `rootpassword` - -
- -
-Linux x86_64 (On-Prem / Bare Metal) - -Direct libvirt access for best performance: - -```bash -# Install libvirt and dependencies (Ubuntu/Debian) -sudo apt update -sudo apt install -y \ - qemu-kvm qemu-utils libvirt-daemon-system \ - libvirt-clients virtinst bridge-utils ovmf \ - cpu-checker cloud-image-utils genisoimage - -# Or on Fedora/RHEL -sudo dnf install -y \ - qemu-kvm qemu-img libvirt libvirt-client \ - virt-install bridge-utils edk2-ovmf \ - cloud-utils genisoimage - -# Enable and start libvirtd -sudo systemctl enable --now libvirtd - -# Add your user to libvirt group -sudo usermod -aG libvirt,kvm $(whoami) -newgrp libvirt # or log out and back in - -# Verify KVM is available -kvm-ok - -# Create image directories -sudo mkdir -p /var/lib/libvirt/images/{base,jobs} - -# Create environment file -cat > .env << 'EOF' -LIBVIRT_URI=qemu:///system -LIBVIRT_NETWORK=default -DATABASE_URL=postgresql://fluid:fluid@localhost:5432/fluid -BASE_IMAGE_DIR=/var/lib/libvirt/images/base -SANDBOX_WORKDIR=/var/lib/libvirt/images/jobs -EOF - -# Start the default network -sudo virsh net-autostart default -sudo virsh net-start default - -# Verify -virsh -c qemu:///system list --all - -# Start services -docker-compose up --build +fluid-cli/ # Go - Interactive TUI agent + MCP server +fluid-daemon/ # Go - Background sandbox management daemon +api/ # Go - Control plane REST API + gRPC +web/ # React - Dashboard UI +demo-server/ # Go - WebSocket demo server +proto/ # Protobuf definitions ``` -**Architecture:** -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Linux x86_64 Host β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ fluid-remote β”‚ β”‚ PostgreSQL β”‚ β”‚ Web UI β”‚ β”‚ -β”‚ β”‚ API (Go) β”‚ β”‚ (Docker) β”‚ β”‚ (React) β”‚ β”‚ -β”‚ β”‚ :8080 β”‚ β”‚ :5432 β”‚ β”‚ :5173 β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ β”‚ -β”‚ β”‚ LIBVIRT_URI=qemu:///system β”‚ -β”‚ β–Ό β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ libvirt/KVM (native) β”‚ β”‚ -β”‚ β”‚ β”‚ β”‚ -β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ -β”‚ β”‚ β”‚ sandbox-1 β”‚ β”‚ sandbox-2 β”‚ β”‚ sandbox-N β”‚ ... β”‚ β”‚ -β”‚ β”‚ β”‚ (x86_64) β”‚ β”‚ (x86_64) β”‚ β”‚ (x86_64) β”‚ β”‚ β”‚ -β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -**Create a base VM image:** -```bash -# Download Ubuntu cloud image -cd /var/lib/libvirt/images/base -sudo wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img - -# Create test VM using the provided script -./fluid-remote/scripts/setup-ssh-ca.sh --dir [ssh-ca-dir] -./fluid-remote/scripts/reset-libvirt-macos.sh [vm-name] [ca-pub-path] [ca-key-path] -``` - -**Default test VM credentials:** -- Username: `testuser` / Password: `testpassword` -- Username: `root` / Password: `rootpassword` - -
- -
-Linux ARM64 (Ampere, Graviton, Raspberry Pi) - -Native ARM64 Linux with libvirt: - -```bash -# Install libvirt and dependencies (Ubuntu/Debian ARM64) -sudo apt update -sudo apt install -y \ - qemu-kvm qemu-utils qemu-efi-aarch64 \ - libvirt-daemon-system libvirt-clients \ - virtinst bridge-utils cloud-image-utils genisoimage - -# Enable and start libvirtd -sudo systemctl enable --now libvirtd - -# Add your user to libvirt group -sudo usermod -aG libvirt,kvm $(whoami) -newgrp libvirt - -# Create environment file -cat > .env << 'EOF' -LIBVIRT_URI=qemu:///system -LIBVIRT_NETWORK=default -DATABASE_URL=postgresql://fluid:fluid@localhost:5432/fluid -BASE_IMAGE_DIR=/var/lib/libvirt/images/base -SANDBOX_WORKDIR=/var/lib/libvirt/images/jobs -EOF - -# Start the default network -sudo virsh net-autostart default -sudo virsh net-start default - -# Start services -docker-compose up --build -``` - -**Download ARM64 cloud images:** -```bash -cd /var/lib/libvirt/images/base -sudo wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-arm64.img -``` - -**Architecture is the same as x86_64 but with ARM64 VMs.** - -**Default test VM credentials:** -- Username: `testuser` / Password: `testpassword` -- Username: `root` / Password: `rootpassword` - -
- -
-Remote libvirt Server - -Connect to a remote libvirt host over SSH or TCP: - -```bash -# SSH connection (recommended - secure) -export LIBVIRT_URI="qemu+ssh://user@remote-host/system" - -# Or with specific SSH key -export LIBVIRT_URI="qemu+ssh://user@remote-host/system?keyfile=/path/to/key" - -# TCP connection (less secure - ensure network is trusted) -export LIBVIRT_URI="qemu+tcp://remote-host:16509/system" - -# Test connection -virsh -c "$LIBVIRT_URI" list --all - -# Create .env file -cat > .env << EOF -LIBVIRT_URI=${LIBVIRT_URI} -LIBVIRT_NETWORK=default -DATABASE_URL=postgresql://fluid:fluid@localhost:5432/fluid -EOF - -# Start services -docker-compose up --build -``` - -**Remote server setup (on the libvirt host):** -```bash -# For SSH access, ensure SSH is enabled and user has libvirt access -sudo usermod -aG libvirt remote-user - -# For TCP access (development only!), configure /etc/libvirt/libvirtd.conf: -# listen_tls = 0 -# listen_tcp = 1 -# auth_tcp = "none" # WARNING: No authentication! -# Then restart: sudo systemctl restart libvirtd -``` - -
- ---- -## API Reference - -### Sandbox Lifecycle - -| Method | Endpoint | Description | -|--------|----------|-------------| -| `POST` | `/v1/sandboxes` | Create a new sandbox | -| `GET` | `/v1/sandboxes/{id}` | Get sandbox details | -| `POST` | `/v1/sandboxes/{id}/start` | Start a sandbox | -| `POST` | `/v1/sandboxes/{id}/stop` | Stop a sandbox | -| `DELETE` | `/v1/sandboxes/{id}` | Destroy a sandbox | - -### Command Execution - -| Method | Endpoint | Description | -|--------|----------|-------------| -| `POST` | `/v1/sandboxes/{id}/command` | Run SSH command | -| `POST` | `/api/v1/tmux/panes/send-keys` | Send keystrokes to tmux | -| `POST` | `/api/v1/tmux/panes/read` | Read tmux pane content | - -### Snapshots - -| Method | Endpoint | Description | -|--------|----------|-------------| -| `POST` | `/v1/sandboxes/{id}/snapshots` | Create snapshot | -| `GET` | `/v1/sandboxes/{id}/snapshots` | List snapshots | -| `POST` | `/v1/sandboxes/{id}/snapshots/{name}/restore` | Restore snapshot | - -### Human Approval - -| Method | Endpoint | Description | -|--------|----------|-------------| -| `POST` | `/api/v1/human/ask` | Request approval (blocking) | - -## Security Model - -### Isolation Layers - -1. **VM Isolation** - Each sandbox is a separate KVM virtual machine -2. **Network Isolation** - VMs run on isolated virtual networks -3. **SSH Certificates** - Ephemeral credentials that auto-expire (1-10 minutes) -4. **Human Approval** - Gate sensitive operations - -### Safety Features - -- Command allowlists/denylists -- Path restrictions for file access -- Timeout limits on all operations -- Output size limits -- Full audit trail -- Snapshot rollback - -### SSH Host Key Verification - -The control node connects to hypervisor hosts via SSH. You **must** configure proper host key verification to prevent man-in-the-middle attacks. - -**Required: Configure `~/.ssh/config` on the control node:** - -```ssh-config -# /home/fluid-remote/.ssh/config (for the fluid-remote user) - -# Global defaults - strict verification -Host * - StrictHostKeyChecking yes - UserKnownHostsFile ~/.ssh/known_hosts - -# Hypervisor hosts - explicitly trusted -Host kvm-01 - HostName 10.0.0.11 - User root - IdentityFile ~/.ssh/id_ed25519 - -Host kvm-02 - HostName 10.0.0.12 - User root - IdentityFile ~/.ssh/id_ed25519 -``` - -**Pre-populate known_hosts before first use:** - -```bash -# As the fluid-remote user, add each host's key -sudo -u fluid-remote ssh-keyscan -H 10.0.0.11 >> /home/fluid-remote/.ssh/known_hosts -sudo -u fluid-remote ssh-keyscan -H 10.0.0.12 >> /home/fluid-remote/.ssh/known_hosts - -# Verify the fingerprints match your hosts -sudo -u fluid-remote ssh-keygen -lf /home/fluid-remote/.ssh/known_hosts -``` - -**Warning:** Never use `StrictHostKeyChecking=no` in production. This disables host verification and exposes you to MITM attacks. - -## Documentation - -- [Docs from Previous Issues](./docs/) - Documentation on common issues working with the project -- [Scripts Reference](./scripts/README.md) - Setup and utility scripts -- [SSH Certificates](.scripts/README.md#ssh-certificate-based-access) - Ephemeral credential system -- [Agent Connection Flow](./docs/agent-connection-flow.md) - How agents connect to sandboxes -- [Examples](./examples/) - Working examples - -## Development - -To run the API locally, first build the `fluid-remote` binary: - -```bash -# Build the API binary -cd fluid-remote && make build -``` - -Then, use `mprocs` to run all the services together for local development. +### Running Tests ```bash -# Install mprocs for multi-service development -brew install mprocs # macOS -cargo install mprocs # Linux - -# Start all services with hot-reload -mprocs - -# Or run individual services -cd fluid-remote && make run -cd web && bun run dev +cd fluid-cli && make test +cd fluid-daemon && make test +cd api && make test +cd web && bun run build ``` -### Running Tests +## Enterprise -```bash -# Go services -(cd fluid-remote && make test) +For teams with security and compliance requirements, fluid.sh supports: -# Python SDK -(cd sdk/fluid-py && pytest) +- **Encrypted snapshots at rest** - Source images encrypted on sandbox hosts with configurable TTL and secure wipe on eviction +- **Network isolation** - Sandboxes boot into isolated networks with no route to production by default, explicit allowlists for service access +- **RBAC** - Control which users and teams can create sandboxes from which source VMs +- **Audit logging** - Full trail of every snapshot pull, sandbox creation, and destruction +- **Secrets scrubbing** - Configurable per source VM: scrub credentials before sandbox creation or keep exact replica for auth debugging +- **Scoped daemon credentials** - Read-only snapshot capability on production hosts, nothing else -# All checks -(cd fluid-remote && make check) -``` +If you need these, reach out to [Collin](mailto:cpfeifer@madcactus.org) to learn more about an enterprise plan. -## Contributing +## Contributing 1. Fork the repository 2. Create a feature branch 3. Make changes with tests -4. Run `make check` -5. Submit a pull request +4. Submit a pull request All contributions must maintain the security model and include appropriate tests. +Reach out on [Discord](https://discord.gg/4WGGXJWm8J) with questions or for access to test VMs. + ## License MIT License - see [LICENSE](LICENSE) for details. @@ -993,9 +194,8 @@ MIT License - see [LICENSE](LICENSE) for details. [![Star History Chart](https://api.star-history.com/svg?repos=aspectrr/fluid.sh&type=date&legend=top-left)](https://www.star-history.com/#aspectrr/fluid.sh&type=date&legend=top-left) -
-Made with ❀️ by Collin & [Contributors](https://github.com/aspectrr/fluid.sh/graphs/contributors) +Made with ❀️ by Collin, Claude & [Contributors](https://github.com/aspectrr/fluid.sh/graphs/contributors)
diff --git a/api/.gitignore b/api/.gitignore new file mode 100644 index 00000000..51a41ee3 --- /dev/null +++ b/api/.gitignore @@ -0,0 +1,2 @@ +.env +bin/ diff --git a/api/AGENTS.md b/api/AGENTS.md new file mode 100644 index 00000000..921c705f --- /dev/null +++ b/api/AGENTS.md @@ -0,0 +1,100 @@ +# API (Control Plane) - Development Guide + +The control plane server for fluid.sh. Provides REST API, gRPC streaming to daemons, multi-host orchestration, web dashboard backend, and agent execution. + +## Architecture + +``` +Web Dashboard / SDK + | + v (REST API) +api server (:8080) + | + +--- PostgreSQL (state) + | + v (gRPC stream) +fluid-daemon (per host) +``` + +## Tech Stack + +- **Language**: Go +- **REST**: Standard library HTTP + custom router +- **gRPC**: Bidirectional streaming to daemons +- **Database**: PostgreSQL +- **Auth**: OAuth, password, host token authentication + +## Project Structure + +``` +api/ + cmd/server/main.go # Entry point + internal/ + agent/ # LLM agent executor + tools + auth/ # OAuth, password, host auth, middleware + config/ # Configuration loading + error/ # Error response helpers + grpc/ # gRPC server for daemon connections + json/ # JSON encode/decode helpers + orchestrator/ # Multi-host sandbox orchestration + registry/ # Source VM registry + rest/ # REST API handlers + store/ # PostgreSQL store + Makefile +``` + +## Quick Start + +```bash +# Build +make build + +# Run (requires PostgreSQL) +./bin/api + +# Run tests +make test +``` + +## Makefile Targets + +| Target | Description | +|--------|-------------| +| `all` | Run fmt, vet, test, and build (default) | +| `build` | Build the API binary | +| `run` | Build and run the API | +| `clean` | Clean build artifacts | +| `fmt` | Format code | +| `vet` | Run go vet | +| `test` | Run tests | +| `test-coverage` | Run tests with coverage | +| `check` | Run all code quality checks | +| `deps` | Download dependencies | +| `tidy` | Tidy and verify dependencies | +| `install` | Install to GOPATH/bin | + +## Database Setup + +```bash +# Create database +sudo -u postgres psql -c "CREATE DATABASE fluid;" +sudo -u postgres psql -c "CREATE USER fluid WITH PASSWORD 'fluid';" +sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE fluid TO fluid;" + +# Schema is auto-migrated on startup via GORM AutoMigrate +``` + +## Development + +### Prerequisites + +- Go 1.24+ +- PostgreSQL 14+ + +### Testing + +```bash +make test # Run all tests +make test-coverage # Tests with coverage report +make check # Run all quality checks +``` diff --git a/api/Dockerfile b/api/Dockerfile new file mode 100644 index 00000000..d635f282 --- /dev/null +++ b/api/Dockerfile @@ -0,0 +1,19 @@ +FROM golang:1.24-alpine AS builder + +WORKDIR /app + +# Copy proto dependency first +COPY proto/gen/go ../proto/gen/go + +# Copy api module +COPY api/go.mod api/go.sum ./ +RUN go mod download +COPY api/ . +RUN CGO_ENABLED=0 go build -o /api ./cmd/server + +FROM alpine:3.20 +RUN apk add --no-cache ca-certificates curl +COPY --from=builder /api /usr/local/bin/api + +EXPOSE 8080 9090 +ENTRYPOINT ["api"] diff --git a/api/Makefile b/api/Makefile new file mode 100644 index 00000000..7e5c4f71 --- /dev/null +++ b/api/Makefile @@ -0,0 +1,59 @@ +BINARY_NAME=api +BUILD_DIR=bin + +.PHONY: all build run clean fmt vet lint test test-coverage check deps tidy install help + +all: fmt vet test build + +build: + go build -o $(BUILD_DIR)/$(BINARY_NAME) ./cmd/server + +run: build + ./$(BUILD_DIR)/$(BINARY_NAME) + +clean: + rm -rf $(BUILD_DIR) + +fmt: + go fmt ./... + +vet: + go vet ./... + +lint: + golangci-lint run --allow-parallel-runners ./... + +test: + go test ./... -v + +test-coverage: + go test ./... -coverprofile=coverage.out + go tool cover -html=coverage.out -o coverage.html + +check: fmt vet lint test + +deps: + go mod download + +tidy: + go mod tidy + +install: + go install ./cmd/server + +help: + @echo "Available targets:" + @echo " all - Run fmt, vet, test, and build (default)" + @echo " build - Build the API binary" + @echo " run - Build and run the API" + @echo " clean - Clean build artifacts" + @echo " fmt - Format code" + @echo " vet - Run go vet" + @echo " lint - Run golangci-lint" + @echo " test - Run tests" + @echo " test-coverage - Run tests with coverage" + @echo " check - Run all code quality checks" + @echo " deps - Download dependencies" + @echo " tidy - Tidy and verify dependencies" + @echo " install - Install to GOPATH/bin" + @echo " help - Show this help message" diff --git a/api/cmd/server/main.go b/api/cmd/server/main.go new file mode 100644 index 00000000..5bb561b2 --- /dev/null +++ b/api/cmd/server/main.go @@ -0,0 +1,215 @@ +package main + +import ( + "context" + "log/slog" + "net/http" + "net/url" + "os" + "os/signal" + "strings" + "syscall" + "time" + + "github.com/aspectrr/fluid.sh/api/docs" + "github.com/aspectrr/fluid.sh/api/internal/auth" + "github.com/aspectrr/fluid.sh/api/internal/config" + grpcServer "github.com/aspectrr/fluid.sh/api/internal/grpc" + "github.com/aspectrr/fluid.sh/api/internal/orchestrator" + "github.com/aspectrr/fluid.sh/api/internal/registry" + "github.com/aspectrr/fluid.sh/api/internal/rest" + "github.com/aspectrr/fluid.sh/api/internal/store" + postgresStore "github.com/aspectrr/fluid.sh/api/internal/store/postgres" + "github.com/aspectrr/fluid.sh/api/internal/telemetry" + + "github.com/joho/godotenv" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// @title Fluid API +// @version 1.0 +// @description API for managing sandboxes, organizations, billing, and hosts +// @host api.fluid.sh +// @BasePath /v1 +// @securityDefinitions.apikey CookieAuth +// @in cookie +// @name session +func main() { + // Load .env if present (no error if missing - production uses real env vars) + _ = godotenv.Load() + + ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer stop() + + cfg := config.Load() + if err := cfg.Validate(); err != nil { + slog.Error("invalid configuration", "error", err) + os.Exit(1) + } + + logger := setupLogger(cfg.Logging.Level, cfg.Logging.Format) + slog.SetDefault(logger) + + redactedDB := cfg.Database.URL + if u, err := url.Parse(cfg.Database.URL); err == nil && u.User != nil { + u.User = url.UserPassword("***", "***") + redactedDB = u.String() + } + + logger.Info("starting fluid API", + "rest_addr", cfg.API.Addr, + "grpc_addr", cfg.GRPC.Address, + "db", redactedDB, + ) + + // 1. Initialize shared Postgres store. + st, err := postgresStore.New(ctx, store.Config{ + DatabaseURL: cfg.Database.URL, + MaxOpenConns: cfg.Database.MaxOpenConns, + MaxIdleConns: cfg.Database.MaxIdleConns, + ConnMaxLifetime: cfg.Database.ConnMaxLifetime, + AutoMigrate: cfg.Database.AutoMigrate, + EncryptionKey: cfg.EncryptionKey, + }) + if err != nil { + logger.Error("failed to initialize store", "error", err) + os.Exit(1) + } + defer func() { + if cerr := st.Close(); cerr != nil { + logger.Error("failed to close store", "error", cerr) + } + }() + + // 2. Initialize host registry (in-memory). + reg := registry.New() + + // 3. Initialize gRPC server with host token auth. + grpcOpts := []grpc.ServerOption{ + grpc.StreamInterceptor(auth.HostTokenStreamInterceptor(st)), + } + if cfg.GRPC.TLSCertFile != "" && cfg.GRPC.TLSKeyFile != "" { + creds, err := credentials.NewServerTLSFromFile(cfg.GRPC.TLSCertFile, cfg.GRPC.TLSKeyFile) + if err != nil { + logger.Error("failed to load gRPC TLS credentials", "error", err) + os.Exit(1) + } + grpcOpts = append([]grpc.ServerOption{grpc.Creds(creds)}, grpcOpts...) + logger.Info("gRPC TLS enabled") + } else { + logger.Warn("gRPC server running WITHOUT TLS - host bearer tokens will be sent in plaintext") + } + + grpcSrv, err := grpcServer.NewServer( + cfg.GRPC.Address, + reg, + st, + logger, + cfg.Orchestrator.HeartbeatTimeout, + grpcOpts..., + ) + if err != nil { + logger.Error("failed to initialize gRPC server", "error", err) + os.Exit(1) + } + + // 4. Initialize orchestrator. + orch := orchestrator.New( + reg, + st, + grpcSrv.Handler(), + logger, + cfg.Orchestrator.DefaultTTL, + cfg.Orchestrator.HeartbeatTimeout, + ) + + // 5. Agent client - commented out, not yet ready for integration. + // var agentClient *agent.Client + // if cfg.Agent.OpenRouterAPIKey != "" { + // agentClient = agent.NewClient(cfg.Agent, st, orch, logger) + // logger.Info("agent client initialized", "model", cfg.Agent.DefaultModel) + // } else { + // logger.Warn("OPENROUTER_API_KEY not set, agent chat disabled") + // } + + // 6. Initialize telemetry. + tel := telemetry.New(cfg.PostHog.APIKey, cfg.PostHog.Endpoint) + defer tel.Close() + + // 7. Initialize REST server. + srv := rest.NewServer(st, cfg, orch, tel, docs.OpenAPIYAML) + + httpSrv := &http.Server{ + Addr: cfg.API.Addr, + Handler: srv.Router, + ReadHeaderTimeout: 15 * time.Second, + ReadTimeout: cfg.API.ReadTimeout, + WriteTimeout: cfg.API.WriteTimeout, + IdleTimeout: cfg.API.IdleTimeout, + } + + // 8. Start gRPC server in background. + grpcErrCh := make(chan error, 1) + go func() { + logger.Info("gRPC server listening", "addr", cfg.GRPC.Address) + if err := grpcSrv.Start(); err != nil { + grpcErrCh <- err + } + }() + + // 9. Start REST server in background. + httpErrCh := make(chan error, 1) + go func() { + logger.Info("HTTP server listening", "addr", cfg.API.Addr) + if err := httpSrv.ListenAndServe(); err != nil && err != http.ErrServerClosed { + httpErrCh <- err + } + }() + + // 10. Wait for signal or error. + select { + case <-ctx.Done(): + logger.Info("shutdown signal received") + case err := <-grpcErrCh: + logger.Error("gRPC server error", "error", err) + case err := <-httpErrCh: + logger.Error("HTTP server error", "error", err) + } + + // 11. Graceful shutdown: stop HTTP first (drain in-flight requests), + // then stop gRPC so streaming daemons stay connected during drain. + shutdownCtx, cancel := context.WithTimeout(context.Background(), cfg.API.ShutdownTimeout) + defer cancel() + if err := httpSrv.Shutdown(shutdownCtx); err != nil { + logger.Error("HTTP server graceful shutdown failed", "error", err) + _ = httpSrv.Close() + } else { + logger.Info("HTTP server shut down gracefully") + } + + grpcSrv.Stop() + logger.Info("gRPC server stopped") +} + +func setupLogger(levelStr, format string) *slog.Logger { + var level slog.Level + switch strings.ToLower(levelStr) { + case "debug": + level = slog.LevelDebug + case "warn", "warning": + level = slog.LevelWarn + case "error": + level = slog.LevelError + default: + level = slog.LevelInfo + } + + var handler slog.Handler + if strings.ToLower(format) == "json" { + handler = slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: level}) + } else { + handler = slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: level}) + } + return slog.New(handler) +} diff --git a/api/docs/embed.go b/api/docs/embed.go new file mode 100644 index 00000000..ae39efe1 --- /dev/null +++ b/api/docs/embed.go @@ -0,0 +1,6 @@ +package docs + +import _ "embed" + +//go:embed openapi.yaml +var OpenAPIYAML []byte diff --git a/api/docs/openapi.yaml b/api/docs/openapi.yaml new file mode 100644 index 00000000..c5c86a7c --- /dev/null +++ b/api/docs/openapi.yaml @@ -0,0 +1,1947 @@ +openapi: 3.0.1 +info: + contact: {} + description: "API for managing sandboxes, organizations, billing, and hosts" + title: Fluid API + version: "1.0" +servers: +- url: //api.fluid.sh/v1 +paths: + /auth/github: + get: + description: Redirect to GitHub OAuth authorization page + responses: + "302": + content: {} + description: Redirect to GitHub + "501": + content: + '*/*': + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Implemented + summary: GitHub OAuth login + tags: + - Auth + /auth/github/callback: + get: + description: "Handle GitHub OAuth callback, create or link user, set session\ + \ cookie, and redirect to dashboard" + parameters: + - description: OAuth authorization code + in: query + name: code + required: true + schema: + type: string + - description: OAuth CSRF state parameter + in: query + name: state + required: true + schema: + type: string + responses: + "302": + content: {} + description: Redirect to dashboard + "400": + content: + '*/*': + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "500": + content: + '*/*': + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + summary: GitHub OAuth callback + tags: + - Auth + /auth/google: + get: + description: Redirect to Google OAuth authorization page + responses: + "302": + content: {} + description: Redirect to Google + "501": + content: + '*/*': + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Implemented + summary: Google OAuth login + tags: + - Auth + /auth/google/callback: + get: + description: "Handle Google OAuth callback, create or link user, set session\ + \ cookie, and redirect to dashboard" + parameters: + - description: OAuth authorization code + in: query + name: code + required: true + schema: + type: string + - description: OAuth CSRF state parameter + in: query + name: state + required: true + schema: + type: string + responses: + "302": + content: {} + description: Redirect to dashboard + "400": + content: + '*/*': + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "500": + content: + '*/*': + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + summary: Google OAuth callback + tags: + - Auth + /auth/login: + post: + description: "Authenticate with email and password, returns a session cookie" + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/rest.loginRequest" + description: Login credentials + required: true + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/rest.authResponse" + description: OK + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "401": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Unauthorized + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + summary: Log in + tags: + - Auth + x-codegen-request-body-name: request + /auth/logout: + post: + description: Invalidate the current session and clear the session cookie + responses: + "200": + content: + application/json: + schema: + additionalProperties: + type: string + type: object + description: OK + security: + - CookieAuth: [] + summary: Log out + tags: + - Auth + /auth/me: + get: + description: Return the currently authenticated user + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/rest.authResponse" + description: OK + "401": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Unauthorized + security: + - CookieAuth: [] + summary: Get current user + tags: + - Auth + /auth/onboarding: + post: + description: Create the user's first organization during onboarding + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/rest.onboardingRequest" + description: Onboarding details + required: true + responses: + "201": + content: + application/json: + schema: + $ref: "#/components/schemas/rest.orgResponse" + description: Created + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "409": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Conflict + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Complete onboarding + tags: + - Auth + x-codegen-request-body-name: request + /auth/register: + post: + description: Create a new user account and return a session cookie + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/rest.registerRequest" + description: Registration details + required: true + responses: + "201": + content: + application/json: + schema: + $ref: "#/components/schemas/rest.authResponse" + description: Created + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "409": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Conflict + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + summary: Register a new user + tags: + - Auth + x-codegen-request-body-name: request + /health: + get: + description: Returns API health status + responses: + "200": + content: + application/json: + schema: + additionalProperties: + type: string + type: object + description: OK + summary: Health check + tags: + - Health + /orgs: + get: + description: List all organizations the current user belongs to + responses: + "200": + content: + application/json: + schema: + additionalProperties: true + type: object + description: OK + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: List organizations + tags: + - Organizations + post: + description: Create a new organization and add the current user as owner + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/rest.createOrgRequest" + description: Organization details + required: true + responses: + "201": + content: + application/json: + schema: + $ref: "#/components/schemas/rest.orgResponse" + description: Created + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "409": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Conflict + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Create organization + tags: + - Organizations + x-codegen-request-body-name: request + /orgs/{slug}: + delete: + description: Delete an organization (owner only) + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + additionalProperties: + type: string + type: object + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Delete organization + tags: + - Organizations + get: + description: Get organization details by slug + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/rest.orgResponse" + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Get organization + tags: + - Organizations + patch: + description: Update organization details (owner or admin only) + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/rest.updateOrgRequest" + description: Fields to update + required: true + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/rest.orgResponse" + description: OK + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Update organization + tags: + - Organizations + x-codegen-request-body-name: request + /orgs/{slug}/hosts: + get: + description: List all connected sandbox hosts + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + additionalProperties: true + type: object + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: List hosts + tags: + - Hosts + /orgs/{slug}/hosts/{hostID}: + get: + description: Get details of a specific connected host + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Host ID + in: path + name: hostID + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/orchestrator.HostInfo" + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + security: + - CookieAuth: [] + summary: Get host + tags: + - Hosts + /orgs/{slug}/hosts/tokens: + get: + description: List all host tokens for the organization + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + additionalProperties: true + type: object + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: List host tokens + tags: + - Host Tokens + post: + description: Generate a new host authentication token (owner or admin only) + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/rest.createHostTokenRequest" + description: Token details + required: true + responses: + "201": + content: + application/json: + schema: + $ref: "#/components/schemas/rest.hostTokenResponse" + description: Created + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Create host token + tags: + - Host Tokens + x-codegen-request-body-name: request + /orgs/{slug}/hosts/tokens/{tokenID}: + delete: + description: Delete a host token (owner or admin only) + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Token ID + in: path + name: tokenID + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + additionalProperties: + type: string + type: object + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + security: + - CookieAuth: [] + summary: Delete host token + tags: + - Host Tokens + /orgs/{slug}/members: + get: + description: List all members of an organization + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + additionalProperties: true + type: object + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: List members + tags: + - Members + post: + description: Add a user to an organization (owner or admin only) + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/rest.addMemberRequest" + description: Member details + required: true + responses: + "201": + content: + application/json: + schema: + $ref: "#/components/schemas/rest.memberResponse" + description: Created + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "409": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Conflict + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Add member + tags: + - Members + x-codegen-request-body-name: request + /orgs/{slug}/members/{memberID}: + delete: + description: Remove a member from an organization (owner or admin only) + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Member ID + in: path + name: memberID + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + additionalProperties: + type: string + type: object + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Remove member + tags: + - Members + /orgs/{slug}/sandboxes: + get: + description: List all sandboxes in the organization + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + additionalProperties: true + type: object + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: List sandboxes + tags: + - Sandboxes + post: + description: Create a new sandbox in the organization from a source VM or base + image + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/orchestrator.CreateSandboxRequest" + description: Sandbox configuration + required: true + responses: + "201": + content: + application/json: + schema: + $ref: "#/components/schemas/store.Sandbox" + description: Created + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Create sandbox + tags: + - Sandboxes + x-codegen-request-body-name: request + /orgs/{slug}/sandboxes/{sandboxID}: + delete: + description: Destroy a sandbox and release its resources + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Sandbox ID + in: path + name: sandboxID + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + additionalProperties: true + type: object + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Destroy sandbox + tags: + - Sandboxes + get: + description: Get sandbox details by ID + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Sandbox ID + in: path + name: sandboxID + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/store.Sandbox" + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + security: + - CookieAuth: [] + summary: Get sandbox + tags: + - Sandboxes + /orgs/{slug}/sandboxes/{sandboxID}/commands: + get: + description: List all commands executed in a sandbox + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Sandbox ID + in: path + name: sandboxID + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + additionalProperties: true + type: object + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: List commands + tags: + - Sandboxes + /orgs/{slug}/sandboxes/{sandboxID}/ip: + get: + description: Get the IP address of a sandbox + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Sandbox ID + in: path + name: sandboxID + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + additionalProperties: true + type: object + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + security: + - CookieAuth: [] + summary: Get sandbox IP + tags: + - Sandboxes + /orgs/{slug}/sandboxes/{sandboxID}/run: + post: + description: Execute a command in a sandbox + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Sandbox ID + in: path + name: sandboxID + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/orchestrator.RunCommandRequest" + description: Command to run + required: true + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/store.Command" + description: OK + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Run command + tags: + - Sandboxes + x-codegen-request-body-name: request + /orgs/{slug}/sandboxes/{sandboxID}/snapshot: + post: + description: Create a snapshot of a sandbox + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Sandbox ID + in: path + name: sandboxID + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/orchestrator.SnapshotRequest" + description: Snapshot details + required: true + responses: + "201": + content: + application/json: + schema: + $ref: "#/components/schemas/orchestrator.SnapshotResponse" + description: Created + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Create snapshot + tags: + - Sandboxes + x-codegen-request-body-name: request + /orgs/{slug}/sandboxes/{sandboxID}/start: + post: + description: Start a stopped sandbox + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Sandbox ID + in: path + name: sandboxID + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + additionalProperties: true + type: object + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Start sandbox + tags: + - Sandboxes + /orgs/{slug}/sandboxes/{sandboxID}/stop: + post: + description: Stop a running sandbox + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Sandbox ID + in: path + name: sandboxID + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + additionalProperties: true + type: object + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Stop sandbox + tags: + - Sandboxes + /orgs/{slug}/sources/{vm}/prepare: + post: + description: Prepare a source VM for sandbox cloning + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Source VM name + in: path + name: vm + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/orchestrator.PrepareRequest" + description: SSH credentials + required: true + responses: + "200": + content: + application/json: + schema: + additionalProperties: true + type: object + description: OK + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Prepare source VM + tags: + - Source VMs + x-codegen-request-body-name: request + /orgs/{slug}/sources/{vm}/read: + post: + description: Read a file from a source VM + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Source VM name + in: path + name: vm + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/orchestrator.ReadSourceRequest" + description: File path + required: true + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/orchestrator.SourceFileResult" + description: OK + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Read source file + tags: + - Source VMs + x-codegen-request-body-name: request + /orgs/{slug}/sources/{vm}/run: + post: + description: Execute a read-only command on a source VM + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + - description: Source VM name + in: path + name: vm + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/orchestrator.RunSourceRequest" + description: Command to run + required: true + responses: + "200": + content: + application/json: + schema: + $ref: "#/components/schemas/orchestrator.SourceCommandResult" + description: OK + "400": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Bad Request + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: Run source command + tags: + - Source VMs + x-codegen-request-body-name: request + /orgs/{slug}/vms: + get: + description: List all source VMs across connected hosts + parameters: + - description: Organization slug + in: path + name: slug + required: true + schema: + type: string + responses: + "200": + content: + application/json: + schema: + additionalProperties: true + type: object + description: OK + "403": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Forbidden + "404": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Not Found + "500": + content: + application/json: + schema: + $ref: "#/components/schemas/error.ErrorResponse" + description: Internal Server Error + security: + - CookieAuth: [] + summary: List source VMs + tags: + - Source VMs +components: + schemas: + error.ErrorResponse: + example: + code: 0 + details: details + error: error + properties: + code: + type: integer + details: + type: string + error: + type: string + type: object + orchestrator.CreateSandboxRequest: + properties: + agent_id: + type: string + live: + type: boolean + memory_mb: + type: integer + name: + type: string + network: + type: string + org_id: + type: string + source_host_id: + type: string + source_vm: + type: string + ttl_seconds: + type: integer + vcpus: + type: integer + type: object + orchestrator.HostInfo: + example: + hostname: hostname + available_disk_mb: 1 + available_memory_mb: 5 + active_sandboxes: 0 + base_images: + - base_images + - base_images + last_heartbeat: last_heartbeat + available_cpus: 6 + host_id: host_id + status: status + properties: + active_sandboxes: + type: integer + available_cpus: + type: integer + available_disk_mb: + type: integer + available_memory_mb: + type: integer + base_images: + items: + type: string + type: array + host_id: + type: string + hostname: + type: string + last_heartbeat: + type: string + status: + type: string + type: object + orchestrator.PrepareRequest: + properties: + ssh_key_path: + type: string + ssh_user: + type: string + type: object + orchestrator.ReadSourceRequest: + properties: + path: + type: string + type: object + orchestrator.RunCommandRequest: + properties: + command: + type: string + env: + additionalProperties: + type: string + type: object + timeout_seconds: + type: integer + type: object + orchestrator.RunSourceRequest: + properties: + command: + type: string + timeout_seconds: + type: integer + type: object + orchestrator.SnapshotRequest: + properties: + name: + type: string + type: object + orchestrator.SnapshotResponse: + example: + snapshot_name: snapshot_name + snapshot_id: snapshot_id + sandbox_id: sandbox_id + created_at: created_at + properties: + created_at: + type: string + sandbox_id: + type: string + snapshot_id: + type: string + snapshot_name: + type: string + type: object + orchestrator.SourceCommandResult: + example: + stdout: stdout + exit_code: 0 + stderr: stderr + source_vm: source_vm + properties: + exit_code: + type: integer + source_vm: + type: string + stderr: + type: string + stdout: + type: string + type: object + orchestrator.SourceFileResult: + example: + path: path + content: content + source_vm: source_vm + properties: + content: + type: string + path: + type: string + source_vm: + type: string + type: object + rest.addMemberRequest: + properties: + email: + type: string + role: + type: string + type: object + rest.authResponse: + example: + user: + email_verified: true + avatar_url: avatar_url + id: id + display_name: display_name + email: email + properties: + user: + $ref: "#/components/schemas/rest.userResponse" + type: object + rest.createHostTokenRequest: + properties: + name: + type: string + type: object + rest.createOrgRequest: + properties: + name: + type: string + slug: + type: string + type: object + rest.hostTokenResponse: + example: + name: name + created_at: created_at + id: id + token: token + properties: + created_at: + type: string + id: + type: string + name: + type: string + token: + description: Only set on creation. + type: string + type: object + rest.loginRequest: + properties: + email: + type: string + password: + type: string + type: object + rest.memberResponse: + example: + role: role + user_id: user_id + created_at: created_at + id: id + properties: + created_at: + type: string + id: + type: string + role: + type: string + user_id: + type: string + type: object + rest.onboardingRequest: + properties: + org_name: + type: string + referral_source: + type: string + role: + type: string + use_cases: + items: + type: string + type: array + type: object + rest.orgResponse: + example: + stripe_customer_id: stripe_customer_id + owner_id: owner_id + name: name + created_at: created_at + id: id + slug: slug + properties: + created_at: + type: string + id: + type: string + name: + type: string + owner_id: + type: string + slug: + type: string + stripe_customer_id: + type: string + type: object + rest.registerRequest: + properties: + display_name: + type: string + email: + type: string + password: + type: string + type: object + rest.updateOrgRequest: + properties: + name: + type: string + type: object + rest.userResponse: + example: + email_verified: true + avatar_url: avatar_url + id: id + display_name: display_name + email: email + properties: + avatar_url: + type: string + display_name: + type: string + email: + type: string + email_verified: + type: boolean + id: + type: string + type: object + store.Command: + example: + duration_ms: 0 + stdout: stdout + exit_code: 6 + sandbox_id: sandbox_id + started_at: started_at + id: id + stderr: stderr + command: command + ended_at: ended_at + properties: + command: + type: string + duration_ms: + type: integer + ended_at: + type: string + exit_code: + type: integer + id: + type: string + sandbox_id: + type: string + started_at: + type: string + stderr: + type: string + stdout: + type: string + type: object + store.Sandbox: + example: + agent_id: agent_id + tap_device: tap_device + ttl_seconds: 6 + created_at: created_at + ip_address: ip_address + vcpus: 1 + deleted_at: deleted_at + host_id: host_id + base_image: base_image + updated_at: updated_at + mac_address: mac_address + org_id: org_id + name: name + memory_mb: 0 + bridge: bridge + id: id + state: CREATING + source_vm: source_vm + properties: + agent_id: + type: string + base_image: + type: string + bridge: + type: string + created_at: + type: string + deleted_at: + type: string + host_id: + type: string + id: + type: string + ip_address: + type: string + mac_address: + type: string + memory_mb: + type: integer + name: + type: string + org_id: + type: string + source_vm: + type: string + state: + $ref: "#/components/schemas/store.SandboxState" + tap_device: + type: string + ttl_seconds: + type: integer + updated_at: + type: string + vcpus: + type: integer + type: object + store.SandboxState: + enum: + - CREATING + - RUNNING + - STOPPED + - DESTROYED + - ERROR + type: string + x-enum-varnames: + - SandboxStateCreating + - SandboxStateRunning + - SandboxStateStopped + - SandboxStateDestroyed + - SandboxStateError +x-original-swagger-version: "2.0" diff --git a/fluid-remote/go.mod b/api/go.mod similarity index 50% rename from fluid-remote/go.mod rename to api/go.mod index 81a033c2..4ce58afa 100644 --- a/fluid-remote/go.mod +++ b/api/go.mod @@ -1,4 +1,4 @@ -module github.com/aspectrr/fluid.sh/fluid-remote +module github.com/aspectrr/fluid.sh/api go 1.24.0 @@ -6,24 +6,24 @@ toolchain go1.24.4 require ( github.com/MarceloPetrucio/go-scalar-api-reference v0.0.0-20240521013641-ce5d2efe0e06 - github.com/beevik/etree v1.4.0 - github.com/go-chi/chi/v5 v5.0.10 + github.com/aspectrr/fluid.sh/proto/gen/go v0.0.0-00010101000000-000000000000 + github.com/go-chi/chi/v5 v5.2.5 github.com/google/uuid v1.6.0 - github.com/gorilla/websocket v1.5.3 github.com/jackc/pgconn v1.14.3 - github.com/posthog/posthog-go v1.9.0 - github.com/stretchr/testify v1.11.1 - gopkg.in/yaml.v3 v3.0.1 - gorm.io/datatypes v1.2.7 + github.com/joho/godotenv v1.5.1 + github.com/posthog/posthog-go v1.10.0 + github.com/stripe/stripe-go/v82 v82.5.1 + golang.org/x/crypto v0.46.0 + golang.org/x/oauth2 v0.34.0 + golang.org/x/sync v0.19.0 + golang.org/x/time v0.14.0 + google.golang.org/grpc v1.79.1 gorm.io/driver/postgres v1.6.0 gorm.io/gorm v1.31.1 - libvirt.org/go/libvirt v1.11010.0 ) require ( - filippo.io/edwards25519 v1.1.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-sql-driver/mysql v1.8.1 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect github.com/goccy/go-json v0.10.5 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect @@ -35,11 +35,11 @@ require ( github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/rogpeppe/go-internal v1.14.1 // indirect - golang.org/x/crypto v0.32.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/text v0.21.0 // indirect - gorm.io/driver/mysql v1.5.6 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/text v0.32.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/protobuf v1.36.10 // indirect ) + +replace github.com/aspectrr/fluid.sh/proto/gen/go => ../proto/gen/go diff --git a/api/go.sum b/api/go.sum new file mode 100644 index 00000000..850f4442 --- /dev/null +++ b/api/go.sum @@ -0,0 +1,105 @@ +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +github.com/MarceloPetrucio/go-scalar-api-reference v0.0.0-20240521013641-ce5d2efe0e06 h1:W4Yar1SUsPmmA51qoIRb174uDO/Xt3C48MB1YX9Y3vM= +github.com/MarceloPetrucio/go-scalar-api-reference v0.0.0-20240521013641-ce5d2efe0e06/go.mod h1:/wotfjM8I3m8NuIHPz3S8k+CCYH80EqDT8ZeNLqMQm0= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug= +github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= +github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posthog/posthog-go v1.10.0 h1:wfoy7Jfb4LigCoHYyMZoiJmmEoCLOkSaYfDxM/NtCqY= +github.com/posthog/posthog-go v1.10.0/go.mod h1:wB3/9Q7d9gGb1P/yf/Wri9VBlbP8oA8z++prRzL5OcY= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/stripe/stripe-go/v82 v82.5.1 h1:05q6ZDKoe8PLMpQV072obF74HCgP4XJeJYoNuRSX2+8= +github.com/stripe/stripe-go/v82 v82.5.1/go.mod h1:majCQX6AfObAvJiHraPi/5udwHi4ojRvJnnxckvHrX8= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= +google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4= +gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo= +gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg= +gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs= diff --git a/api/internal/agent/models.go b/api/internal/agent/models.go new file mode 100644 index 00000000..2b055b09 --- /dev/null +++ b/api/internal/agent/models.go @@ -0,0 +1,212 @@ +package agent + +// Model caching - commented out, not yet ready for integration. +/* +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "slices" + "strconv" + "strings" + "sync" + "time" +) + +// ModelInfo represents a model with pricing and capability info. +type ModelInfo struct { + ID string `json:"id"` + Name string `json:"name"` + InputCostPer1K float64 `json:"input_cost_per_1k"` + OutputCostPer1K float64 `json:"output_cost_per_1k"` + ContextLimit int `json:"context_limit"` + OutputLimit int `json:"output_limit"` + ToolCall bool `json:"tool_call"` + Reasoning bool `json:"reasoning"` +} + +// ModelCache caches the list of available models with a TTL. +type ModelCache struct { + mu sync.RWMutex + models []ModelInfo + fetchedAt time.Time + ttl time.Duration +} + +// NewModelCache creates a new model cache with the given TTL. +func NewModelCache(ttl time.Duration) *ModelCache { + return &ModelCache{ttl: ttl} +} + +// GetModels returns cached models or re-fetches if the cache is stale. +func (mc *ModelCache) GetModels(ctx context.Context) ([]ModelInfo, error) { + mc.mu.RLock() + if len(mc.models) > 0 && time.Since(mc.fetchedAt) < mc.ttl { + models := make([]ModelInfo, len(mc.models)) + copy(models, mc.models) + mc.mu.RUnlock() + return models, nil + } + mc.mu.RUnlock() + + models, err := fetchFromOpenRouter(ctx) + if err != nil { + // Return fallback if cache is empty + mc.mu.RLock() + defer mc.mu.RUnlock() + if len(mc.models) > 0 { + result := make([]ModelInfo, len(mc.models)) + copy(result, mc.models) + return result, nil + } + return fallbackModels(), nil + } + + mc.mu.Lock() + mc.models = models + mc.fetchedAt = time.Now() + mc.mu.Unlock() + + result := make([]ModelInfo, len(models)) + copy(result, models) + return result, nil +} + +// openRouterModelsResponse is the response from GET /api/v1/models. +type openRouterModelsResponse struct { + Data []openRouterModelEntry `json:"data"` +} + +type openRouterModelEntry struct { + ID string `json:"id"` + Name string `json:"name"` + ContextLength int `json:"context_length"` + Pricing openRouterPricing `json:"pricing"` + TopProvider *openRouterTop `json:"top_provider"` + SupportedParameters []string `json:"supported_parameters"` +} + +type openRouterPricing struct { + Prompt string `json:"prompt"` + Completion string `json:"completion"` +} + +type openRouterTop struct { + ContextLength int `json:"context_length"` + MaxCompletionTokens int `json:"max_completion_tokens"` +} + +// fetchFromOpenRouter fetches models from OpenRouter's public API, +// groups by provider prefix, and returns the top model per provider. +func fetchFromOpenRouter(ctx context.Context) ([]ModelInfo, error) { + ctx, cancel := context.WithTimeout(ctx, 15*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "GET", "https://openrouter.ai/api/v1/models", nil) + if err != nil { + return nil, fmt.Errorf("create request: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("fetch models: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("openrouter models API returned %d", resp.StatusCode) + } + + var data openRouterModelsResponse + if err := json.NewDecoder(resp.Body).Decode(&data); err != nil { + return nil, fmt.Errorf("decode response: %w", err) + } + + // Group by provider prefix, pick top model per provider + // "top" = highest input cost among models with tool_call support + type candidate struct { + entry openRouterModelEntry + inputPer1K float64 + } + + topByProvider := make(map[string]candidate) + + for _, entry := range data.Data { + parts := strings.SplitN(entry.ID, "/", 2) + if len(parts) != 2 { + continue + } + provider := parts[0] + + hasToolCall := slices.Contains(entry.SupportedParameters, "tool_choice") + if !hasToolCall { + continue + } + + inputPerToken, err := strconv.ParseFloat(entry.Pricing.Prompt, 64) + if err != nil || inputPerToken <= 0 { + continue + } + + inputPer1K := inputPerToken * 1000.0 + + existing, ok := topByProvider[provider] + if !ok || inputPer1K > existing.inputPer1K { + topByProvider[provider] = candidate{entry: entry, inputPer1K: inputPer1K} + } + } + + models := make([]ModelInfo, 0, len(topByProvider)) + for _, c := range topByProvider { + outputPerToken, _ := strconv.ParseFloat(c.entry.Pricing.Completion, 64) + + outputLimit := 0 + if c.entry.TopProvider != nil { + outputLimit = c.entry.TopProvider.MaxCompletionTokens + } + + hasReasoning := slices.Contains(c.entry.SupportedParameters, "reasoning") || + slices.Contains(c.entry.SupportedParameters, "include_reasoning") + + models = append(models, ModelInfo{ + ID: c.entry.ID, + Name: c.entry.Name, + InputCostPer1K: c.inputPer1K, + OutputCostPer1K: outputPerToken * 1000.0, + ContextLimit: c.entry.ContextLength, + OutputLimit: outputLimit, + ToolCall: true, + Reasoning: hasReasoning, + }) + } + + // Sort by input cost descending for consistent ordering + slices.SortFunc(models, func(a, b ModelInfo) int { + if a.InputCostPer1K > b.InputCostPer1K { + return -1 + } + if a.InputCostPer1K < b.InputCostPer1K { + return 1 + } + return strings.Compare(a.ID, b.ID) + }) + + if len(models) == 0 { + return nil, fmt.Errorf("no models with tool_call support found") + } + + return models, nil +} + +// fallbackModels returns hardcoded models when the API is unreachable. +func fallbackModels() []ModelInfo { + return []ModelInfo{ + {ID: "anthropic/claude-sonnet-4", Name: "Claude Sonnet 4", InputCostPer1K: 0.003, OutputCostPer1K: 0.015, ContextLimit: 200000, OutputLimit: 64000, ToolCall: true, Reasoning: false}, + {ID: "anthropic/claude-haiku-4", Name: "Claude Haiku 4", InputCostPer1K: 0.0008, OutputCostPer1K: 0.004, ContextLimit: 200000, OutputLimit: 64000, ToolCall: true, Reasoning: false}, + {ID: "openai/gpt-4o", Name: "GPT-4o", InputCostPer1K: 0.0025, OutputCostPer1K: 0.01, ContextLimit: 128000, OutputLimit: 16384, ToolCall: true, Reasoning: false}, + {ID: "openai/gpt-4o-mini", Name: "GPT-4o Mini", InputCostPer1K: 0.00015, OutputCostPer1K: 0.0006, ContextLimit: 128000, OutputLimit: 16384, ToolCall: true, Reasoning: false}, + {ID: "google/gemini-2.5-pro", Name: "Gemini 2.5 Pro", InputCostPer1K: 0.00125, OutputCostPer1K: 0.01, ContextLimit: 1000000, OutputLimit: 65536, ToolCall: true, Reasoning: true}, + } +} +*/ diff --git a/api/internal/agent/models_test.go b/api/internal/agent/models_test.go new file mode 100644 index 00000000..fcad6c64 --- /dev/null +++ b/api/internal/agent/models_test.go @@ -0,0 +1,132 @@ +package agent + +// Model caching tests - commented out, not yet ready for integration. +/* +import ( + "testing" + "time" +) + +// AvailableModels tests - commented out, function is in commented-out openrouter.go + +func TestAvailableModels_NonEmpty(t *testing.T) { + models := AvailableModels() + if len(models) == 0 { + t.Fatal("expected non-empty list of models") + } +} + +func TestAvailableModels_HasExpectedFields(t *testing.T) { + models := AvailableModels() + for i, m := range models { + if _, ok := m["id"]; !ok { + t.Errorf("model %d: missing 'id' field", i) + } + if _, ok := m["name"]; !ok { + t.Errorf("model %d: missing 'name' field", i) + } + if _, ok := m["input_cost_per_1k"]; !ok { + t.Errorf("model %d: missing 'input_cost_per_1k' field", i) + } + if _, ok := m["output_cost_per_1k"]; !ok { + t.Errorf("model %d: missing 'output_cost_per_1k' field", i) + } + } +} + +func TestAvailableModels_ContainsClaude(t *testing.T) { + models := AvailableModels() + found := false + for _, m := range models { + if id, ok := m["id"].(string); ok && id == "anthropic/claude-sonnet-4" { + found = true + break + } + } + if !found { + t.Error("expected AvailableModels to contain anthropic/claude-sonnet-4") + } +} + +func TestFallbackModels_NonEmpty(t *testing.T) { + models := fallbackModels() + if len(models) == 0 { + t.Fatal("expected non-empty fallback models") + } +} + +func TestFallbackModels_AllHaveToolCall(t *testing.T) { + models := fallbackModels() + for _, m := range models { + if !m.ToolCall { + t.Errorf("model %s: expected ToolCall=true", m.ID) + } + } +} + +func TestFallbackModels_FieldsPopulated(t *testing.T) { + models := fallbackModels() + for _, m := range models { + if m.ID == "" { + t.Error("expected non-empty ID") + } + if m.Name == "" { + t.Errorf("model %s: expected non-empty Name", m.ID) + } + if m.InputCostPer1K <= 0 { + t.Errorf("model %s: expected positive InputCostPer1K, got %f", m.ID, m.InputCostPer1K) + } + if m.OutputCostPer1K <= 0 { + t.Errorf("model %s: expected positive OutputCostPer1K, got %f", m.ID, m.OutputCostPer1K) + } + if m.ContextLimit <= 0 { + t.Errorf("model %s: expected positive ContextLimit, got %d", m.ID, m.ContextLimit) + } + if m.OutputLimit <= 0 { + t.Errorf("model %s: expected positive OutputLimit, got %d", m.ID, m.OutputLimit) + } + } +} + +func TestNewModelCache(t *testing.T) { + mc := NewModelCache(5 * time.Minute) + if mc == nil { + t.Fatal("expected non-nil ModelCache") + } + if mc.ttl != 5*time.Minute { + t.Errorf("expected ttl 5m, got %v", mc.ttl) + } +} + +func TestModelCache_GetModels_ReturnsFallbackWhenEmpty(t *testing.T) { + // Use a very short TTL so cache is always stale + mc := NewModelCache(0) + + // GetModels will try to fetch from OpenRouter (which will fail in tests) + // and should fall back to fallbackModels + models, err := mc.GetModels(t.Context()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(models) == 0 { + t.Fatal("expected non-empty models from fallback") + } +} + +func TestModelCache_GetModels_ReturnsCopy(t *testing.T) { + mc := NewModelCache(0) + + models1, _ := mc.GetModels(t.Context()) + models2, _ := mc.GetModels(t.Context()) + + if len(models1) == 0 || len(models2) == 0 { + t.Skip("no models returned, cannot test copy behavior") + } + + // Modifying one should not affect the other + models1[0].Name = "mutated" + if models2[0].Name == "mutated" { + t.Error("expected GetModels to return independent copies") + } +} +*/ diff --git a/api/internal/auth/hostauth.go b/api/internal/auth/hostauth.go new file mode 100644 index 00000000..2cdc2c13 --- /dev/null +++ b/api/internal/auth/hostauth.go @@ -0,0 +1,100 @@ +package auth + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "strings" + + "github.com/aspectrr/fluid.sh/api/internal/store" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type hostOrgKey struct{} +type hostTokenIDKey struct{} + +// OrgIDFromContext returns the org ID attached to a gRPC context by the +// host token auth interceptor. +func OrgIDFromContext(ctx context.Context) string { + v, _ := ctx.Value(hostOrgKey{}).(string) + return v +} + +// TokenIDFromContext returns the host token ID attached to a gRPC context by +// the host token auth interceptor. +func TokenIDFromContext(ctx context.Context) string { + v, _ := ctx.Value(hostTokenIDKey{}).(string) + return v +} + +// HashToken produces a SHA-256 hex digest of a raw bearer token. +func HashToken(raw string) string { + h := sha256.Sum256([]byte(raw)) + return hex.EncodeToString(h[:]) +} + +// HostTokenStreamInterceptor returns a gRPC stream server interceptor that +// validates bearer tokens from host daemons. On success it attaches the +// host token's org_id to the stream context. +func HostTokenStreamInterceptor(st store.Store) grpc.StreamServerInterceptor { + return func( + srv any, + ss grpc.ServerStream, + info *grpc.StreamServerInfo, + handler grpc.StreamHandler, + ) error { + md, ok := metadata.FromIncomingContext(ss.Context()) + if !ok { + return status.Error(codes.Unauthenticated, "missing metadata") + } + + vals := md.Get("authorization") + if len(vals) == 0 { + return status.Error(codes.Unauthenticated, "missing authorization header") + } + + raw := vals[0] + // Strip "Bearer " prefix if present. + if after, found := strings.CutPrefix(raw, "Bearer "); found { + raw = after + } + + hash := HashToken(raw) + token, err := st.GetHostTokenByHash(ss.Context(), hash) + if err != nil { + return status.Error(codes.Unauthenticated, "invalid host token") + } + + // Attach org_id and token_id to stream context. + ctx := context.WithValue(ss.Context(), hostOrgKey{}, token.OrgID) + ctx = context.WithValue(ctx, hostTokenIDKey{}, token.ID) + wrapped := &wrappedStream{ServerStream: ss, ctx: ctx} + return handler(srv, wrapped) + } +} + +// WithTokenID returns a context carrying the given token ID. +// Exported for use in tests that bypass the interceptor. +func WithTokenID(ctx context.Context, tokenID string) context.Context { + return context.WithValue(ctx, hostTokenIDKey{}, tokenID) +} + +// WithOrgID returns a context carrying the given org ID. +// Exported for use in tests that bypass the interceptor. +func WithOrgID(ctx context.Context, orgID string) context.Context { + return context.WithValue(ctx, hostOrgKey{}, orgID) +} + +// wrappedStream overrides Context() to return an enriched context. +type wrappedStream struct { + grpc.ServerStream + ctx context.Context +} + +func (w *wrappedStream) Context() context.Context { + return w.ctx +} diff --git a/api/internal/auth/hostauth_test.go b/api/internal/auth/hostauth_test.go new file mode 100644 index 00000000..2dcafee8 --- /dev/null +++ b/api/internal/auth/hostauth_test.go @@ -0,0 +1,164 @@ +package auth + +import ( + "context" + "fmt" + "testing" + + "github.com/aspectrr/fluid.sh/api/internal/store" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +func TestHashToken_Deterministic(t *testing.T) { + h1 := HashToken("my-secret-token") + h2 := HashToken("my-secret-token") + if h1 != h2 { + t.Fatalf("HashToken not deterministic: %q != %q", h1, h2) + } +} + +func TestHashToken_Length(t *testing.T) { + h := HashToken("anything") + if len(h) != 64 { + t.Fatalf("HashToken length = %d, want 64", len(h)) + } +} + +func TestOrgIDFromContext_Empty(t *testing.T) { + ctx := context.Background() + if got := OrgIDFromContext(ctx); got != "" { + t.Fatalf("OrgIDFromContext on fresh context = %q, want empty", got) + } +} + +func TestOrgIDFromContext_WithValue(t *testing.T) { + ctx := context.WithValue(context.Background(), hostOrgKey{}, "org-123") + if got := OrgIDFromContext(ctx); got != "org-123" { + t.Fatalf("OrgIDFromContext = %q, want %q", got, "org-123") + } +} + +// --- mock gRPC server stream --- + +type mockServerStream struct { + grpc.ServerStream + ctx context.Context +} + +func (m *mockServerStream) Context() context.Context { + return m.ctx +} + +// --- HostTokenStreamInterceptor tests --- + +func TestHostTokenStreamInterceptor_MissingMetadata(t *testing.T) { + st := &mockStore{} + interceptor := HostTokenStreamInterceptor(st) + + // Context with no metadata at all. + ss := &mockServerStream{ctx: context.Background()} + + err := interceptor(nil, ss, &grpc.StreamServerInfo{}, func(_ any, _ grpc.ServerStream) error { + t.Fatal("handler should not be called") + return nil + }) + + if err == nil { + t.Fatal("expected error, got nil") + } + if s, ok := status.FromError(err); !ok || s.Code() != codes.Unauthenticated { + t.Fatalf("expected Unauthenticated, got %v", err) + } +} + +func TestHostTokenStreamInterceptor_MissingAuthorization(t *testing.T) { + st := &mockStore{} + interceptor := HostTokenStreamInterceptor(st) + + // Metadata present but no "authorization" key. + md := metadata.New(map[string]string{"other": "value"}) + ctx := metadata.NewIncomingContext(context.Background(), md) + ss := &mockServerStream{ctx: ctx} + + err := interceptor(nil, ss, &grpc.StreamServerInfo{}, func(_ any, _ grpc.ServerStream) error { + t.Fatal("handler should not be called") + return nil + }) + + if err == nil { + t.Fatal("expected error, got nil") + } + if s, ok := status.FromError(err); !ok || s.Code() != codes.Unauthenticated { + t.Fatalf("expected Unauthenticated, got %v", err) + } +} + +func TestHostTokenStreamInterceptor_InvalidToken(t *testing.T) { + st := &mockStore{ + getHostTokenByHashFn: func(_ context.Context, _ string) (*store.HostToken, error) { + return nil, fmt.Errorf("not found") + }, + } + interceptor := HostTokenStreamInterceptor(st) + + md := metadata.New(map[string]string{"authorization": "Bearer bad-token"}) + ctx := metadata.NewIncomingContext(context.Background(), md) + ss := &mockServerStream{ctx: ctx} + + err := interceptor(nil, ss, &grpc.StreamServerInfo{}, func(_ any, _ grpc.ServerStream) error { + t.Fatal("handler should not be called") + return nil + }) + + if err == nil { + t.Fatal("expected error, got nil") + } + if s, ok := status.FromError(err); !ok || s.Code() != codes.Unauthenticated { + t.Fatalf("expected Unauthenticated, got %v", err) + } +} + +func TestHostTokenStreamInterceptor_ValidToken(t *testing.T) { + rawToken := "valid-host-token" + expectedOrgID := "org-456" + + st := &mockStore{ + getHostTokenByHashFn: func(_ context.Context, hash string) (*store.HostToken, error) { + want := HashToken(rawToken) + if hash != want { + return nil, fmt.Errorf("unexpected hash") + } + return &store.HostToken{ + ID: "tok-1", + OrgID: expectedOrgID, + Name: "test-host", + }, nil + }, + } + interceptor := HostTokenStreamInterceptor(st) + + md := metadata.New(map[string]string{"authorization": "Bearer " + rawToken}) + ctx := metadata.NewIncomingContext(context.Background(), md) + ss := &mockServerStream{ctx: ctx} + + var handlerCalled bool + err := interceptor(nil, ss, &grpc.StreamServerInfo{}, func(_ any, stream grpc.ServerStream) error { + handlerCalled = true + orgID := OrgIDFromContext(stream.Context()) + if orgID != expectedOrgID { + t.Fatalf("OrgIDFromContext = %q, want %q", orgID, expectedOrgID) + } + return nil + }) + + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !handlerCalled { + t.Fatal("handler was not called") + } +} diff --git a/api/internal/auth/middleware.go b/api/internal/auth/middleware.go new file mode 100644 index 00000000..fe331f63 --- /dev/null +++ b/api/internal/auth/middleware.go @@ -0,0 +1,47 @@ +package auth + +import ( + "context" + "net/http" + + "fmt" + serverError "github.com/aspectrr/fluid.sh/api/internal/error" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +type userKey struct{} + +func UserFromContext(ctx context.Context) *store.User { + u, _ := ctx.Value(userKey{}).(*store.User) + return u +} + +// RequireAuth is middleware that validates session cookie and loads user into context. +func RequireAuth(st store.Store, secureCookies bool) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cookie, err := r.Cookie(SessionCookieName) + if err != nil { + serverError.RespondError(w, http.StatusUnauthorized, fmt.Errorf("authentication required")) + return + } + + sess, err := st.GetSession(r.Context(), HashSessionToken(cookie.Value)) + if err != nil { + ClearSessionCookie(w, secureCookies) + serverError.RespondError(w, http.StatusUnauthorized, fmt.Errorf("invalid or expired session")) + return + } + + user, err := st.GetUser(r.Context(), sess.UserID) + if err != nil { + ClearSessionCookie(w, secureCookies) + serverError.RespondError(w, http.StatusUnauthorized, fmt.Errorf("user not found")) + return + } + + ctx := context.WithValue(r.Context(), userKey{}, user) + next.ServeHTTP(w, r.WithContext(ctx)) + }) + } +} diff --git a/api/internal/auth/middleware_test.go b/api/internal/auth/middleware_test.go new file mode 100644 index 00000000..c4f837ee --- /dev/null +++ b/api/internal/auth/middleware_test.go @@ -0,0 +1,119 @@ +package auth + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +func TestRequireAuth_NoCookie(t *testing.T) { + st := &mockStore{} + handler := RequireAuth(st, true)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Fatal("handler should not be called") + })) + + req := httptest.NewRequest(http.MethodGet, "/protected", nil) + w := httptest.NewRecorder() + handler.ServeHTTP(w, req) + + if w.Code != http.StatusUnauthorized { + t.Fatalf("status = %d, want %d", w.Code, http.StatusUnauthorized) + } +} + +func TestRequireAuth_InvalidSession(t *testing.T) { + st := &mockStore{ + getSessionFn: func(_ context.Context, _ string) (*store.Session, error) { + return nil, fmt.Errorf("session not found") + }, + } + handler := RequireAuth(st, true)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Fatal("handler should not be called") + })) + + req := httptest.NewRequest(http.MethodGet, "/protected", nil) + req.AddCookie(&http.Cookie{Name: SessionCookieName, Value: "bad-session-id"}) + w := httptest.NewRecorder() + handler.ServeHTTP(w, req) + + if w.Code != http.StatusUnauthorized { + t.Fatalf("status = %d, want %d", w.Code, http.StatusUnauthorized) + } +} + +func TestRequireAuth_UserNotFound(t *testing.T) { + st := &mockStore{ + getSessionFn: func(_ context.Context, id string) (*store.Session, error) { + return &store.Session{ID: id, UserID: "user-gone"}, nil + }, + getUserFn: func(_ context.Context, _ string) (*store.User, error) { + return nil, fmt.Errorf("user not found") + }, + } + handler := RequireAuth(st, true)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Fatal("handler should not be called") + })) + + req := httptest.NewRequest(http.MethodGet, "/protected", nil) + req.AddCookie(&http.Cookie{Name: SessionCookieName, Value: "valid-session"}) + w := httptest.NewRecorder() + handler.ServeHTTP(w, req) + + if w.Code != http.StatusUnauthorized { + t.Fatalf("status = %d, want %d", w.Code, http.StatusUnauthorized) + } +} + +func TestRequireAuth_ValidSession(t *testing.T) { + expectedUser := &store.User{ID: "user-1", Email: "test@example.com"} + + rawToken := "good-token" + hashedToken := HashSessionToken(rawToken) + + st := &mockStore{ + getSessionFn: func(_ context.Context, id string) (*store.Session, error) { + if id != hashedToken { + return nil, fmt.Errorf("not found") + } + return &store.Session{ID: id, UserID: expectedUser.ID}, nil + }, + getUserFn: func(_ context.Context, id string) (*store.User, error) { + if id != expectedUser.ID { + return nil, fmt.Errorf("not found") + } + return expectedUser, nil + }, + } + + var handlerCalled bool + handler := RequireAuth(st, true)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handlerCalled = true + u := UserFromContext(r.Context()) + if u == nil { + t.Fatal("UserFromContext returned nil") + } + if u.ID != expectedUser.ID { + t.Fatalf("user ID = %q, want %q", u.ID, expectedUser.ID) + } + if u.Email != expectedUser.Email { + t.Fatalf("user Email = %q, want %q", u.Email, expectedUser.Email) + } + w.WriteHeader(http.StatusOK) + })) + + req := httptest.NewRequest(http.MethodGet, "/protected", nil) + req.AddCookie(&http.Cookie{Name: SessionCookieName, Value: rawToken}) + w := httptest.NewRecorder() + handler.ServeHTTP(w, req) + + if !handlerCalled { + t.Fatal("inner handler was not called") + } + if w.Code != http.StatusOK { + t.Fatalf("status = %d, want %d", w.Code, http.StatusOK) + } +} diff --git a/api/internal/auth/oauth.go b/api/internal/auth/oauth.go new file mode 100644 index 00000000..72a3d685 --- /dev/null +++ b/api/internal/auth/oauth.go @@ -0,0 +1,87 @@ +package auth + +import ( + "crypto/rand" + "crypto/subtle" + "encoding/hex" + "fmt" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/github" + "golang.org/x/oauth2/google" +) + +const ( + OAuthStateCookieName = "fluid_oauth_state" + oauthStateLen = 32 + oauthStateMaxAge = 600 // 10 minutes +) + +func GenerateOAuthState() (string, error) { + b := make([]byte, oauthStateLen) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("generate oauth state: %w", err) + } + return hex.EncodeToString(b), nil +} + +func SetOAuthStateCookie(w http.ResponseWriter, state string, secure bool) { + http.SetCookie(w, &http.Cookie{ + Name: OAuthStateCookieName, + Value: state, + Path: "/", + HttpOnly: true, + Secure: secure, + SameSite: http.SameSiteLaxMode, + MaxAge: oauthStateMaxAge, + }) +} + +func ClearOAuthStateCookie(w http.ResponseWriter) { + http.SetCookie(w, &http.Cookie{ + Name: OAuthStateCookieName, + Value: "", + Path: "/", + HttpOnly: true, + MaxAge: -1, + }) +} + +func ValidateOAuthState(r *http.Request) error { + state := r.URL.Query().Get("state") + if state == "" { + return fmt.Errorf("missing state parameter") + } + + cookie, err := r.Cookie(OAuthStateCookieName) + if err != nil { + return fmt.Errorf("missing oauth state cookie") + } + + if subtle.ConstantTimeCompare([]byte(state), []byte(cookie.Value)) != 1 { + return fmt.Errorf("state mismatch") + } + + return nil +} + +func GitHubOAuthConfig(clientID, clientSecret, redirectURL string) *oauth2.Config { + return &oauth2.Config{ + ClientID: clientID, + ClientSecret: clientSecret, + RedirectURL: redirectURL, + Scopes: []string{"user:email"}, + Endpoint: github.Endpoint, + } +} + +func GoogleOAuthConfig(clientID, clientSecret, redirectURL string) *oauth2.Config { + return &oauth2.Config{ + ClientID: clientID, + ClientSecret: clientSecret, + RedirectURL: redirectURL, + Scopes: []string{"openid", "email", "profile"}, + Endpoint: google.Endpoint, + } +} diff --git a/api/internal/auth/oauth_test.go b/api/internal/auth/oauth_test.go new file mode 100644 index 00000000..b54ff4c7 --- /dev/null +++ b/api/internal/auth/oauth_test.go @@ -0,0 +1,154 @@ +package auth + +import ( + "encoding/hex" + "net/http" + "net/http/httptest" + "testing" + + "golang.org/x/oauth2/github" + "golang.org/x/oauth2/google" +) + +func TestGitHubOAuthConfig(t *testing.T) { + cfg := GitHubOAuthConfig("gh-id", "gh-secret", "http://localhost/callback") + + if cfg.ClientID != "gh-id" { + t.Fatalf("ClientID = %q, want %q", cfg.ClientID, "gh-id") + } + if cfg.ClientSecret != "gh-secret" { + t.Fatalf("ClientSecret = %q, want %q", cfg.ClientSecret, "gh-secret") + } + if cfg.RedirectURL != "http://localhost/callback" { + t.Fatalf("RedirectURL = %q, want %q", cfg.RedirectURL, "http://localhost/callback") + } + + wantScopes := []string{"user:email"} + if len(cfg.Scopes) != len(wantScopes) { + t.Fatalf("Scopes length = %d, want %d", len(cfg.Scopes), len(wantScopes)) + } + for i, s := range cfg.Scopes { + if s != wantScopes[i] { + t.Fatalf("Scopes[%d] = %q, want %q", i, s, wantScopes[i]) + } + } + + if cfg.Endpoint != github.Endpoint { + t.Fatalf("Endpoint does not match github.Endpoint") + } +} + +func TestGoogleOAuthConfig(t *testing.T) { + cfg := GoogleOAuthConfig("g-id", "g-secret", "http://localhost/google/callback") + + if cfg.ClientID != "g-id" { + t.Fatalf("ClientID = %q, want %q", cfg.ClientID, "g-id") + } + if cfg.ClientSecret != "g-secret" { + t.Fatalf("ClientSecret = %q, want %q", cfg.ClientSecret, "g-secret") + } + if cfg.RedirectURL != "http://localhost/google/callback" { + t.Fatalf("RedirectURL = %q, want %q", cfg.RedirectURL, "http://localhost/google/callback") + } + + wantScopes := []string{"openid", "email", "profile"} + if len(cfg.Scopes) != len(wantScopes) { + t.Fatalf("Scopes length = %d, want %d", len(cfg.Scopes), len(wantScopes)) + } + for i, s := range cfg.Scopes { + if s != wantScopes[i] { + t.Fatalf("Scopes[%d] = %q, want %q", i, s, wantScopes[i]) + } + } + + if cfg.Endpoint != google.Endpoint { + t.Fatalf("Endpoint does not match google.Endpoint") + } +} + +func TestGenerateOAuthState(t *testing.T) { + state, err := GenerateOAuthState() + if err != nil { + t.Fatalf("GenerateOAuthState() error = %v", err) + } + + // Should be 64 hex chars (32 bytes) + if len(state) != 64 { + t.Fatalf("state length = %d, want 64", len(state)) + } + + if _, err := hex.DecodeString(state); err != nil { + t.Fatalf("state is not valid hex: %v", err) + } + + // Two calls should produce different values + state2, _ := GenerateOAuthState() + if state == state2 { + t.Fatal("two calls returned the same state") + } +} + +func TestSetAndClearOAuthStateCookie(t *testing.T) { + w := httptest.NewRecorder() + SetOAuthStateCookie(w, "test-state", true) + + cookies := w.Result().Cookies() + if len(cookies) == 0 { + t.Fatal("SetOAuthStateCookie did not set any cookie") + } + + c := cookies[0] + if c.Name != OAuthStateCookieName { + t.Fatalf("cookie Name = %q, want %q", c.Name, OAuthStateCookieName) + } + if c.Value != "test-state" { + t.Fatalf("cookie Value = %q, want %q", c.Value, "test-state") + } + if !c.HttpOnly { + t.Fatal("cookie should be HttpOnly") + } + if !c.Secure { + t.Fatal("cookie should be Secure when secure=true") + } + if c.MaxAge != oauthStateMaxAge { + t.Fatalf("cookie MaxAge = %d, want %d", c.MaxAge, oauthStateMaxAge) + } + + w2 := httptest.NewRecorder() + ClearOAuthStateCookie(w2) + cookies2 := w2.Result().Cookies() + if len(cookies2) == 0 { + t.Fatal("ClearOAuthStateCookie did not set any cookie") + } + if cookies2[0].MaxAge != -1 { + t.Fatalf("cleared cookie MaxAge = %d, want -1", cookies2[0].MaxAge) + } +} + +func TestValidateOAuthState(t *testing.T) { + // Missing state param + req := httptest.NewRequest("GET", "/callback", nil) + if err := ValidateOAuthState(req); err == nil { + t.Fatal("expected error for missing state param") + } + + // Missing cookie + req = httptest.NewRequest("GET", "/callback?state=abc", nil) + if err := ValidateOAuthState(req); err == nil { + t.Fatal("expected error for missing cookie") + } + + // Mismatched state + req = httptest.NewRequest("GET", "/callback?state=abc", nil) + req.AddCookie(&http.Cookie{Name: OAuthStateCookieName, Value: "xyz"}) + if err := ValidateOAuthState(req); err == nil { + t.Fatal("expected error for mismatched state") + } + + // Matching state + req = httptest.NewRequest("GET", "/callback?state=matching-value", nil) + req.AddCookie(&http.Cookie{Name: OAuthStateCookieName, Value: "matching-value"}) + if err := ValidateOAuthState(req); err != nil { + t.Fatalf("ValidateOAuthState() unexpected error = %v", err) + } +} diff --git a/api/internal/auth/password.go b/api/internal/auth/password.go new file mode 100644 index 00000000..b206e425 --- /dev/null +++ b/api/internal/auth/password.go @@ -0,0 +1,17 @@ +package auth + +import "golang.org/x/crypto/bcrypt" + +const bcryptCost = 12 + +func HashPassword(password string) (string, error) { + hash, err := bcrypt.GenerateFromPassword([]byte(password), bcryptCost) + if err != nil { + return "", err + } + return string(hash), nil +} + +func VerifyPassword(hash, password string) error { + return bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)) +} diff --git a/api/internal/auth/password_test.go b/api/internal/auth/password_test.go new file mode 100644 index 00000000..8cd71b08 --- /dev/null +++ b/api/internal/auth/password_test.go @@ -0,0 +1,54 @@ +package auth + +import ( + "testing" +) + +func TestHashPassword_NonEmpty(t *testing.T) { + hash, err := HashPassword("s3cret") + if err != nil { + t.Fatalf("HashPassword returned error: %v", err) + } + if hash == "" { + t.Fatal("HashPassword returned empty string") + } +} + +func TestVerifyPassword_Correct(t *testing.T) { + password := "correct-horse-battery-staple" + hash, err := HashPassword(password) + if err != nil { + t.Fatalf("HashPassword returned error: %v", err) + } + + if err := VerifyPassword(hash, password); err != nil { + t.Fatalf("VerifyPassword failed with correct password: %v", err) + } +} + +func TestVerifyPassword_Wrong(t *testing.T) { + hash, err := HashPassword("real-password") + if err != nil { + t.Fatalf("HashPassword returned error: %v", err) + } + + if err := VerifyPassword(hash, "wrong-password"); err == nil { + t.Fatal("VerifyPassword should fail with wrong password") + } +} + +func TestHashPassword_DifferentSalts(t *testing.T) { + password := "same-password" + h1, err := HashPassword(password) + if err != nil { + t.Fatalf("first HashPassword returned error: %v", err) + } + h2, err := HashPassword(password) + if err != nil { + t.Fatalf("second HashPassword returned error: %v", err) + } + + if h1 == h2 { + t.Fatal("HashPassword produced identical hashes for the same input - bcrypt salt is not working") + } +} diff --git a/api/internal/auth/session.go b/api/internal/auth/session.go new file mode 100644 index 00000000..7f783e21 --- /dev/null +++ b/api/internal/auth/session.go @@ -0,0 +1,80 @@ +package auth + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "fmt" + "net/http" + "time" + + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +// HashSessionToken returns the SHA-256 hex digest of a raw session token. +// The raw token is returned to the user in a cookie; the hash is stored in the DB. +func HashSessionToken(raw string) string { + h := sha256.Sum256([]byte(raw)) + return hex.EncodeToString(h[:]) +} + +const ( + SessionCookieName = "fluid_session" + sessionTokenLen = 32 +) + +func generateSessionToken() (string, error) { + b := make([]byte, sessionTokenLen) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("generate session token: %w", err) + } + return hex.EncodeToString(b), nil +} + +// CreateSession generates a random token, stores its SHA-256 hash as the +// session ID in the database, and returns both the raw token (for the cookie) +// and the session record. +func CreateSession(ctx context.Context, st store.Store, userID, ip, ua string, ttl time.Duration) (rawToken string, sess *store.Session, err error) { + raw, err := generateSessionToken() + if err != nil { + return "", nil, err + } + + sess = &store.Session{ + ID: HashSessionToken(raw), + UserID: userID, + IPAddress: ip, + UserAgent: ua, + ExpiresAt: time.Now().UTC().Add(ttl), + } + + if err := st.CreateSession(ctx, sess); err != nil { + return "", nil, fmt.Errorf("create session: %w", err) + } + return raw, sess, nil +} + +func SetSessionCookie(w http.ResponseWriter, token string, ttl time.Duration, secure bool) { + http.SetCookie(w, &http.Cookie{ + Name: SessionCookieName, + Value: token, + Path: "/", + HttpOnly: true, + Secure: secure, + SameSite: http.SameSiteStrictMode, + MaxAge: int(ttl.Seconds()), + }) +} + +func ClearSessionCookie(w http.ResponseWriter, secure bool) { + http.SetCookie(w, &http.Cookie{ + Name: SessionCookieName, + Value: "", + Path: "/", + HttpOnly: true, + Secure: secure, + SameSite: http.SameSiteStrictMode, + MaxAge: -1, + }) +} diff --git a/api/internal/auth/session_test.go b/api/internal/auth/session_test.go new file mode 100644 index 00000000..36c27e96 --- /dev/null +++ b/api/internal/auth/session_test.go @@ -0,0 +1,68 @@ +package auth + +import ( + "net/http/httptest" + "testing" + "time" +) + +func TestSessionCookieName(t *testing.T) { + if SessionCookieName != "fluid_session" { + t.Fatalf("SessionCookieName = %q, want %q", SessionCookieName, "fluid_session") + } +} + +func TestSetSessionCookie(t *testing.T) { + w := httptest.NewRecorder() + ttl := 24 * time.Hour + SetSessionCookie(w, "test-token", ttl, true) + + resp := w.Result() + cookies := resp.Cookies() + if len(cookies) == 0 { + t.Fatal("SetSessionCookie did not set any cookie") + } + + c := cookies[0] + + if c.Name != SessionCookieName { + t.Fatalf("cookie Name = %q, want %q", c.Name, SessionCookieName) + } + if c.Value != "test-token" { + t.Fatalf("cookie Value = %q, want %q", c.Value, "test-token") + } + if c.Path != "/" { + t.Fatalf("cookie Path = %q, want %q", c.Path, "/") + } + if !c.HttpOnly { + t.Fatal("cookie HttpOnly = false, want true") + } + if !c.Secure { + t.Fatal("cookie Secure = false, want true") + } + + expectedMaxAge := int(ttl.Seconds()) + if c.MaxAge != expectedMaxAge { + t.Fatalf("cookie MaxAge = %d, want %d", c.MaxAge, expectedMaxAge) + } +} + +func TestClearSessionCookie(t *testing.T) { + w := httptest.NewRecorder() + ClearSessionCookie(w, true) + + resp := w.Result() + cookies := resp.Cookies() + if len(cookies) == 0 { + t.Fatal("ClearSessionCookie did not set any cookie") + } + + c := cookies[0] + + if c.Name != SessionCookieName { + t.Fatalf("cookie Name = %q, want %q", c.Name, SessionCookieName) + } + if c.MaxAge != -1 { + t.Fatalf("cookie MaxAge = %d, want -1", c.MaxAge) + } +} diff --git a/api/internal/auth/testhelpers_test.go b/api/internal/auth/testhelpers_test.go new file mode 100644 index 00000000..6199453e --- /dev/null +++ b/api/internal/auth/testhelpers_test.go @@ -0,0 +1,327 @@ +package auth + +import ( + "context" + "time" + + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +// mockStore implements store.Store for testing. Only the methods used in tests +// have real implementations; everything else panics so unexpected calls surface +// immediately. +type mockStore struct { + // Settable hooks for the methods under test. + getSessionFn func(ctx context.Context, id string) (*store.Session, error) + getUserFn func(ctx context.Context, id string) (*store.User, error) + getHostTokenByHashFn func(ctx context.Context, hash string) (*store.HostToken, error) +} + +// ---- store.Store lifecycle methods ---- + +func (m *mockStore) Config() store.Config { return store.Config{} } +func (m *mockStore) Ping(context.Context) error { return nil } +func (m *mockStore) WithTx(_ context.Context, _ func(store.DataStore) error) error { + panic("mockStore: WithTx not implemented") +} +func (m *mockStore) Close() error { return nil } + +// ---- User ---- + +func (m *mockStore) CreateUser(context.Context, *store.User) error { + panic("mockStore: CreateUser not implemented") +} +func (m *mockStore) GetUser(ctx context.Context, id string) (*store.User, error) { + if m.getUserFn != nil { + return m.getUserFn(ctx, id) + } + panic("mockStore: GetUser not implemented") +} +func (m *mockStore) GetUserByEmail(context.Context, string) (*store.User, error) { + panic("mockStore: GetUserByEmail not implemented") +} +func (m *mockStore) UpdateUser(context.Context, *store.User) error { + panic("mockStore: UpdateUser not implemented") +} + +// ---- OAuth ---- + +func (m *mockStore) CreateOAuthAccount(context.Context, *store.OAuthAccount) error { + panic("mockStore: CreateOAuthAccount not implemented") +} +func (m *mockStore) GetOAuthAccount(context.Context, string, string) (*store.OAuthAccount, error) { + panic("mockStore: GetOAuthAccount not implemented") +} +func (m *mockStore) GetOAuthAccountsByUser(context.Context, string) ([]*store.OAuthAccount, error) { + panic("mockStore: GetOAuthAccountsByUser not implemented") +} + +// ---- Session ---- + +func (m *mockStore) CreateSession(context.Context, *store.Session) error { + panic("mockStore: CreateSession not implemented") +} +func (m *mockStore) GetSession(ctx context.Context, id string) (*store.Session, error) { + if m.getSessionFn != nil { + return m.getSessionFn(ctx, id) + } + panic("mockStore: GetSession not implemented") +} +func (m *mockStore) DeleteSession(context.Context, string) error { + panic("mockStore: DeleteSession not implemented") +} +func (m *mockStore) DeleteExpiredSessions(context.Context) error { + panic("mockStore: DeleteExpiredSessions not implemented") +} + +// ---- Organization ---- + +func (m *mockStore) CreateOrganization(context.Context, *store.Organization) error { + panic("mockStore: CreateOrganization not implemented") +} +func (m *mockStore) GetOrganization(context.Context, string) (*store.Organization, error) { + panic("mockStore: GetOrganization not implemented") +} +func (m *mockStore) GetOrganizationBySlug(context.Context, string) (*store.Organization, error) { + panic("mockStore: GetOrganizationBySlug not implemented") +} +func (m *mockStore) ListOrganizationsByUser(context.Context, string) ([]*store.Organization, error) { + panic("mockStore: ListOrganizationsByUser not implemented") +} +func (m *mockStore) UpdateOrganization(context.Context, *store.Organization) error { + panic("mockStore: UpdateOrganization not implemented") +} +func (m *mockStore) DeleteOrganization(context.Context, string) error { + panic("mockStore: DeleteOrganization not implemented") +} + +// ---- OrgMember ---- + +func (m *mockStore) CreateOrgMember(context.Context, *store.OrgMember) error { + panic("mockStore: CreateOrgMember not implemented") +} +func (m *mockStore) GetOrgMember(context.Context, string, string) (*store.OrgMember, error) { + panic("mockStore: GetOrgMember not implemented") +} +func (m *mockStore) GetOrgMemberByID(context.Context, string, string) (*store.OrgMember, error) { + panic("mockStore: GetOrgMemberByID not implemented") +} +func (m *mockStore) ListOrgMembers(context.Context, string) ([]*store.OrgMember, error) { + panic("mockStore: ListOrgMembers not implemented") +} +func (m *mockStore) DeleteOrgMember(context.Context, string, string) error { + panic("mockStore: DeleteOrgMember not implemented") +} + +// ---- Subscription ---- + +func (m *mockStore) CreateSubscription(context.Context, *store.Subscription) error { + panic("mockStore: CreateSubscription not implemented") +} +func (m *mockStore) GetSubscriptionByOrg(context.Context, string) (*store.Subscription, error) { + panic("mockStore: GetSubscriptionByOrg not implemented") +} +func (m *mockStore) UpdateSubscription(context.Context, *store.Subscription) error { + panic("mockStore: UpdateSubscription not implemented") +} + +// ---- Usage ---- + +func (m *mockStore) CreateUsageRecord(context.Context, *store.UsageRecord) error { + panic("mockStore: CreateUsageRecord not implemented") +} +func (m *mockStore) ListUsageRecords(context.Context, string, time.Time, time.Time) ([]*store.UsageRecord, error) { + panic("mockStore: ListUsageRecords not implemented") +} + +// ---- Host ---- + +func (m *mockStore) CreateHost(context.Context, *store.Host) error { + panic("mockStore: CreateHost not implemented") +} +func (m *mockStore) GetHost(context.Context, string) (*store.Host, error) { + panic("mockStore: GetHost not implemented") +} +func (m *mockStore) ListHosts(context.Context) ([]store.Host, error) { + panic("mockStore: ListHosts not implemented") +} +func (m *mockStore) ListHostsByOrg(context.Context, string) ([]store.Host, error) { + panic("mockStore: ListHostsByOrg not implemented") +} +func (m *mockStore) UpdateHost(context.Context, *store.Host) error { + panic("mockStore: UpdateHost not implemented") +} +func (m *mockStore) UpdateHostHeartbeat(context.Context, string, int32, int64, int64) error { + panic("mockStore: UpdateHostHeartbeat not implemented") +} + +// ---- Sandbox ---- + +func (m *mockStore) CreateSandbox(context.Context, *store.Sandbox) error { + panic("mockStore: CreateSandbox not implemented") +} +func (m *mockStore) GetSandbox(context.Context, string) (*store.Sandbox, error) { + panic("mockStore: GetSandbox not implemented") +} +func (m *mockStore) GetSandboxByOrg(context.Context, string, string) (*store.Sandbox, error) { + panic("mockStore: GetSandboxByOrg not implemented") +} +func (m *mockStore) ListSandboxes(context.Context) ([]store.Sandbox, error) { + panic("mockStore: ListSandboxes not implemented") +} +func (m *mockStore) ListSandboxesByOrg(context.Context, string) ([]store.Sandbox, error) { + panic("mockStore: ListSandboxesByOrg not implemented") +} +func (m *mockStore) UpdateSandbox(context.Context, *store.Sandbox) error { + panic("mockStore: UpdateSandbox not implemented") +} +func (m *mockStore) DeleteSandbox(context.Context, string) error { + panic("mockStore: DeleteSandbox not implemented") +} +func (m *mockStore) GetSandboxesByHostID(context.Context, string) ([]store.Sandbox, error) { + panic("mockStore: GetSandboxesByHostID not implemented") +} +func (m *mockStore) CountSandboxesByHostIDs(context.Context, []string) (map[string]int, error) { + panic("mockStore: CountSandboxesByHostIDs not implemented") +} +func (m *mockStore) ListExpiredSandboxes(context.Context, time.Duration) ([]store.Sandbox, error) { + panic("mockStore: ListExpiredSandboxes not implemented") +} + +// ---- Command ---- + +func (m *mockStore) CreateCommand(context.Context, *store.Command) error { + panic("mockStore: CreateCommand not implemented") +} +func (m *mockStore) ListSandboxCommands(context.Context, string) ([]store.Command, error) { + panic("mockStore: ListSandboxCommands not implemented") +} + +// ---- SourceHost ---- + +func (m *mockStore) CreateSourceHost(context.Context, *store.SourceHost) error { + panic("mockStore: CreateSourceHost not implemented") +} +func (m *mockStore) GetSourceHost(context.Context, string) (*store.SourceHost, error) { + panic("mockStore: GetSourceHost not implemented") +} +func (m *mockStore) ListSourceHostsByOrg(context.Context, string) ([]*store.SourceHost, error) { + panic("mockStore: ListSourceHostsByOrg not implemented") +} +func (m *mockStore) DeleteSourceHost(context.Context, string) error { + panic("mockStore: DeleteSourceHost not implemented") +} + +// ---- HostToken ---- + +func (m *mockStore) CreateHostToken(context.Context, *store.HostToken) error { + panic("mockStore: CreateHostToken not implemented") +} +func (m *mockStore) GetHostTokenByHash(ctx context.Context, hash string) (*store.HostToken, error) { + if m.getHostTokenByHashFn != nil { + return m.getHostTokenByHashFn(ctx, hash) + } + panic("mockStore: GetHostTokenByHash not implemented") +} +func (m *mockStore) ListHostTokensByOrg(context.Context, string) ([]store.HostToken, error) { + panic("mockStore: ListHostTokensByOrg not implemented") +} +func (m *mockStore) DeleteHostToken(context.Context, string, string) error { + panic("mockStore: DeleteHostToken not implemented") +} + +// // ---- Agent Conversations ---- + +// func (m *mockStore) CreateAgentConversation(context.Context, *store.AgentConversation) error { +// panic("mockStore: CreateAgentConversation not implemented") +// } +// func (m *mockStore) GetAgentConversation(context.Context, string) (*store.AgentConversation, error) { +// panic("mockStore: GetAgentConversation not implemented") +// } +// func (m *mockStore) ListAgentConversationsByOrg(context.Context, string) ([]*store.AgentConversation, error) { +// panic("mockStore: ListAgentConversationsByOrg not implemented") +// } +// func (m *mockStore) DeleteAgentConversation(context.Context, string) error { +// panic("mockStore: DeleteAgentConversation not implemented") +// } + +// // ---- Agent Messages ---- + +// func (m *mockStore) CreateAgentMessage(context.Context, *store.AgentMessage) error { +// panic("mockStore: CreateAgentMessage not implemented") +// } +// func (m *mockStore) ListAgentMessages(context.Context, string) ([]*store.AgentMessage, error) { +// panic("mockStore: ListAgentMessages not implemented") +// } + +// // ---- Playbooks ---- + +// func (m *mockStore) CreatePlaybook(context.Context, *store.Playbook) error { +// panic("mockStore: CreatePlaybook not implemented") +// } +// func (m *mockStore) GetPlaybook(context.Context, string) (*store.Playbook, error) { +// panic("mockStore: GetPlaybook not implemented") +// } +// func (m *mockStore) ListPlaybooksByOrg(context.Context, string) ([]*store.Playbook, error) { +// panic("mockStore: ListPlaybooksByOrg not implemented") +// } +// func (m *mockStore) UpdatePlaybook(context.Context, *store.Playbook) error { +// panic("mockStore: UpdatePlaybook not implemented") +// } +// func (m *mockStore) DeletePlaybook(context.Context, string) error { +// panic("mockStore: DeletePlaybook not implemented") +// } + +// // ---- Playbook Tasks ---- + +// func (m *mockStore) CreatePlaybookTask(context.Context, *store.PlaybookTask) error { +// panic("mockStore: CreatePlaybookTask not implemented") +// } +// func (m *mockStore) GetPlaybookTask(context.Context, string) (*store.PlaybookTask, error) { +// panic("mockStore: GetPlaybookTask not implemented") +// } +// func (m *mockStore) ListPlaybookTasks(context.Context, string) ([]*store.PlaybookTask, error) { +// panic("mockStore: ListPlaybookTasks not implemented") +// } +// func (m *mockStore) UpdatePlaybookTask(context.Context, *store.PlaybookTask) error { +// panic("mockStore: UpdatePlaybookTask not implemented") +// } +// func (m *mockStore) DeletePlaybookTask(context.Context, string) error { +// panic("mockStore: DeletePlaybookTask not implemented") +// } +// func (m *mockStore) ReorderPlaybookTasks(context.Context, string, []string) error { +// panic("mockStore: ReorderPlaybookTasks not implemented") +// } + +// ---- Billing helpers ---- + +func (m *mockStore) GetOrganizationByStripeCustomerID(context.Context, string) (*store.Organization, error) { + panic("mockStore: GetOrganizationByStripeCustomerID not implemented") +} +func (m *mockStore) GetModelMeter(context.Context, string) (*store.ModelMeter, error) { + panic("mockStore: GetModelMeter not implemented") +} +func (m *mockStore) CreateModelMeter(context.Context, *store.ModelMeter) error { + panic("mockStore: CreateModelMeter not implemented") +} +func (m *mockStore) GetOrgModelSubscription(context.Context, string, string) (*store.OrgModelSubscription, error) { + panic("mockStore: GetOrgModelSubscription not implemented") +} +func (m *mockStore) CreateOrgModelSubscription(context.Context, *store.OrgModelSubscription) error { + panic("mockStore: CreateOrgModelSubscription not implemented") +} +func (m *mockStore) SumTokenUsage(context.Context, string, time.Time, time.Time) (float64, error) { + panic("mockStore: SumTokenUsage not implemented") +} +func (m *mockStore) ListActiveSubscriptions(context.Context) ([]*store.Subscription, error) { + panic("mockStore: ListActiveSubscriptions not implemented") +} +func (m *mockStore) GetSubscriptionByStripeID(context.Context, string) (*store.Subscription, error) { + panic("mockStore: GetSubscriptionByStripeID not implemented") +} +func (m *mockStore) AcquireAdvisoryLock(context.Context, int64) error { + panic("mockStore: AcquireAdvisoryLock not implemented") +} +func (m *mockStore) ReleaseAdvisoryLock(context.Context, int64) error { + panic("mockStore: ReleaseAdvisoryLock not implemented") +} diff --git a/api/internal/billing/meters.go b/api/internal/billing/meters.go new file mode 100644 index 00000000..38d9d4a5 --- /dev/null +++ b/api/internal/billing/meters.go @@ -0,0 +1,122 @@ +package billing + +import ( + "context" + "fmt" + "log/slog" + "regexp" + "strings" + "time" + + "github.com/stripe/stripe-go/v82" + stripeMeterEvent "github.com/stripe/stripe-go/v82/billing/meterevent" + + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +// MeterManager handles Stripe meter/price creation and usage reporting. +type MeterManager struct { + store store.DataStore + stripeKey string + markup float64 + logger *slog.Logger +} + +// NewMeterManager creates a new MeterManager. +func NewMeterManager( + st store.DataStore, + stripeKey string, + markup float64, + logger *slog.Logger, +) *MeterManager { + if logger == nil { + logger = slog.Default() + } + // Set the Stripe key once at initialization rather than per-call + // to avoid a race condition when multiple goroutines set the global. + if stripeKey != "" { + stripe.Key = stripeKey + } + return &MeterManager{ + store: st, + stripeKey: stripeKey, + markup: markup, + logger: logger.With("component", "billing"), + } +} + +// Markup returns the configured billing markup multiplier. +func (mm *MeterManager) Markup() float64 { return mm.markup } + +var nonAlphaNum = regexp.MustCompile(`[^a-z0-9]+`) + +// sanitizeEventName converts a model ID like "anthropic/claude-sonnet-4" to "anthropic_claude_sonnet_4". +func sanitizeEventName(modelID string) string { + s := strings.ToLower(modelID) + s = nonAlphaNum.ReplaceAllString(s, "_") + s = strings.Trim(s, "_") + return s +} + +// ReportResourceUsage reports non-token resource usage (sandboxes, source VMs, daemons) to Stripe. +// The caller is responsible for subtracting free tier counts before calling this. +func (mm *MeterManager) ReportResourceUsage(ctx context.Context, stripeCustomerID, eventName string, value int64) { + if value <= 0 || stripeCustomerID == "" { + return + } + + _, err := stripeMeterEvent.New(&stripe.BillingMeterEventParams{ + EventName: stripe.String(eventName), + Payload: map[string]string{ + "stripe_customer_id": stripeCustomerID, + "value": fmt.Sprintf("%d", value), + }, + Identifier: stripe.String(fmt.Sprintf("%s_%s_%d", stripeCustomerID, eventName, time.Now().UTC().Truncate(time.Hour).Unix())), + }) + if err != nil { + mm.logger.Warn("failed to report resource meter event", + "error", err, + "event_name", eventName, + "value", value, + ) + return + } + + mm.logger.Debug("reported resource usage to stripe", + "event_name", eventName, + "value", value, + "customer", stripeCustomerID, + ) +} + +// LLM token metering - commented out, not yet ready for integration. +/* +import ( + "context" + "errors" + "math" + "sync" + + stripeBillingMeter "github.com/stripe/stripe-go/v82/billing/meter" + stripePrice "github.com/stripe/stripe-go/v82/price" + stripeProduct "github.com/stripe/stripe-go/v82/product" + stripeSubItem "github.com/stripe/stripe-go/v82/subscriptionitem" + + "github.com/aspectrr/fluid.sh/api/internal/agent" +) + +// EnsureModelMeter returns an existing ModelMeter or creates Stripe objects and stores a new one. +func (mm *MeterManager) EnsureModelMeter(ctx context.Context, modelID string) (*store.ModelMeter, error) { + ... +} + +// EnsureOrgSubscriptionItems adds subscription items for a model to an org's subscription. +func (mm *MeterManager) EnsureOrgSubscriptionItems(ctx context.Context, orgID, modelID string) error { + ... +} + +// ReportUsage reports token usage to Stripe billing meters, respecting the free tier. +func (mm *MeterManager) ReportUsage(ctx context.Context, orgID, modelID string, inputTokens, outputTokens int) error { + ... +} +*/ diff --git a/api/internal/billing/meters_test.go b/api/internal/billing/meters_test.go new file mode 100644 index 00000000..1b339d90 --- /dev/null +++ b/api/internal/billing/meters_test.go @@ -0,0 +1,183 @@ +package billing + +import ( + "testing" +) + +// --------------------------------------------------------------------------- +// Tests for sanitizeEventName +// --------------------------------------------------------------------------- + +func TestSanitizeEventName(t *testing.T) { + tests := []struct { + name string + input string + want string + }{ + { + name: "slash separated model ID", + input: "anthropic/claude-sonnet-4", + want: "anthropic_claude_sonnet_4", + }, + { + name: "already clean", + input: "gpt4o", + want: "gpt4o", + }, + { + name: "multiple special chars in a row", + input: "vendor//model--v2", + want: "vendor_model_v2", + }, + { + name: "uppercase converted to lowercase", + input: "OpenAI/GPT-4", + want: "openai_gpt_4", + }, + { + name: "dots and colons", + input: "meta.llama:3.1", + want: "meta_llama_3_1", + }, + { + name: "leading separators trimmed", + input: "/leading-model", + want: "leading_model", + }, + { + name: "trailing separators trimmed", + input: "trailing-model/", + want: "trailing_model", + }, + { + name: "leading and trailing separators", + input: "--model--", + want: "model", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := sanitizeEventName(tc.input) + if got != tc.want { + t.Errorf("sanitizeEventName(%q) = %q, want %q", tc.input, got, tc.want) + } + }) + } +} + +// --------------------------------------------------------------------------- +// Token billing tests - commented out, not yet ready for integration. +// --------------------------------------------------------------------------- +/* +func newTestMeterManager(ms *mockDataStore, freeTokens int) *MeterManager { + mc := agent.NewModelCache(time.Hour) + // Pass empty stripeKey so we never touch real Stripe. + return NewMeterManager(ms, mc, "", 1.5, freeTokens, nil) +} + +func TestReportUsage_ZeroTokens(t *testing.T) { + ms := &mockDataStore{ + org: &store.Organization{ID: "org-1", StripeCustomerID: "cus_123"}, + } + mm := newTestMeterManager(ms, 1000) + + err := mm.ReportUsage(context.Background(), "org-1", "anthropic/claude-sonnet-4", 0, 0) + if err != nil { + t.Fatalf("expected nil error for zero tokens, got: %v", err) + } +} + +func TestReportUsage_NoStripeCustomer(t *testing.T) { + ms := &mockDataStore{ + org: &store.Organization{ID: "org-1", StripeCustomerID: ""}, + } + mm := newTestMeterManager(ms, 1000) + + err := mm.ReportUsage(context.Background(), "org-1", "anthropic/claude-sonnet-4", 100, 50) + if err != nil { + t.Fatalf("expected nil error when org has no Stripe customer, got: %v", err) + } +} + +func TestReportUsage_NoActiveSub(t *testing.T) { + ms := &mockDataStore{ + org: &store.Organization{ID: "org-1", StripeCustomerID: "cus_123"}, + subErr: store.ErrNotFound, + } + mm := newTestMeterManager(ms, 1000) + + err := mm.ReportUsage(context.Background(), "org-1", "anthropic/claude-sonnet-4", 100, 50) + if err != nil { + t.Fatalf("expected nil error when subscription not found, got: %v", err) + } +} + +func TestReportUsage_AllFree(t *testing.T) { + ms := &mockDataStore{ + org: &store.Organization{ID: "org-1", StripeCustomerID: "cus_123"}, + sub: &store.Subscription{ + Status: store.SubStatusActive, + StripeSubscriptionID: "sub_123", + }, + sumUsage: 200, + } + mm := newTestMeterManager(ms, 10000) + + err := mm.ReportUsage(context.Background(), "org-1", "anthropic/claude-sonnet-4", 100, 50) + if err != nil { + t.Fatalf("expected nil error when all usage is within free tier, got: %v", err) + } +} + +func TestReportUsage_AllBillable(t *testing.T) { + ms := &mockDataStore{ + org: &store.Organization{ID: "org-1", StripeCustomerID: "cus_123"}, + sub: &store.Subscription{ + Status: store.SubStatusActive, + StripeSubscriptionID: "sub_123", + }, + sumUsage: 5000, + } + mm := newTestMeterManager(ms, 1000) + + err := mm.ReportUsage(context.Background(), "org-1", "anthropic/claude-sonnet-4", 120, 80) + if err == nil { + t.Fatal("expected non-nil error from Stripe API path, got nil") + } +} + +func TestReportUsage_ExactlyAtFreeTierBoundary(t *testing.T) { + ms := &mockDataStore{ + org: &store.Organization{ID: "org-1", StripeCustomerID: "cus_123"}, + sub: &store.Subscription{ + Status: store.SubStatusActive, + StripeSubscriptionID: "sub_123", + }, + sumUsage: 1000, + } + mm := newTestMeterManager(ms, 1000) + + err := mm.ReportUsage(context.Background(), "org-1", "anthropic/claude-sonnet-4", 120, 80) + if err != nil { + t.Fatalf("expected nil error at exact free tier boundary, got: %v", err) + } +} + +func TestReportUsage_OneTokenOverFreeTier(t *testing.T) { + ms := &mockDataStore{ + org: &store.Organization{ID: "org-1", StripeCustomerID: "cus_123"}, + sub: &store.Subscription{ + Status: store.SubStatusActive, + StripeSubscriptionID: "sub_123", + }, + sumUsage: 1001, + } + mm := newTestMeterManager(ms, 1000) + + err := mm.ReportUsage(context.Background(), "org-1", "anthropic/claude-sonnet-4", 120, 80) + if err == nil { + t.Fatal("expected non-nil error from Stripe API path for 1 token over free tier, got nil") + } +} +*/ diff --git a/api/internal/billing/ticker.go b/api/internal/billing/ticker.go new file mode 100644 index 00000000..d7ca777c --- /dev/null +++ b/api/internal/billing/ticker.go @@ -0,0 +1,152 @@ +package billing + +import ( + "context" + "log/slog" + "time" + + "github.com/google/uuid" + + "github.com/aspectrr/fluid.sh/api/internal/config" + "github.com/aspectrr/fluid.sh/api/internal/registry" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +// ResourceTicker periodically reports non-token resource usage to Stripe meters. +// It uses store.DataStore (not store.Store) because it only needs data access +// methods, not lifecycle methods like Ping/Close/WithTx. +type ResourceTicker struct { + store store.DataStore + meter *MeterManager + registry *registry.Registry + cfg config.BillingConfig + freeTier config.FreeTierConfig + logger *slog.Logger + interval time.Duration +} + +// NewResourceTicker creates a new ticker that reports resource usage every interval. +func NewResourceTicker( + st store.DataStore, + mm *MeterManager, + reg *registry.Registry, + cfg config.BillingConfig, + logger *slog.Logger, +) *ResourceTicker { + if logger == nil { + logger = slog.Default() + } + return &ResourceTicker{ + store: st, + meter: mm, + registry: reg, + cfg: cfg, + freeTier: cfg.FreeTier, + logger: logger.With("component", "billing-ticker"), + interval: time.Hour, + } +} + +// Start runs the ticker loop until ctx is cancelled. +func (rt *ResourceTicker) Start(ctx context.Context) { + ticker := time.NewTicker(rt.interval) + defer ticker.Stop() + + rt.logger.Info("resource billing ticker started", "interval", rt.interval) + + for { + select { + case <-ctx.Done(): + rt.logger.Info("resource billing ticker stopped") + return + case <-ticker.C: + rt.tick(ctx) + } + } +} + +func (rt *ResourceTicker) tick(ctx context.Context) { + subs, err := rt.store.ListActiveSubscriptions(ctx) + if err != nil { + rt.logger.Warn("failed to list active subscriptions", "error", err) + return + } + + for _, sub := range subs { + rt.reportForOrg(ctx, sub.OrgID) + } +} + +func (rt *ResourceTicker) reportForOrg(ctx context.Context, orgID string) { + org, err := rt.store.GetOrganization(ctx, orgID) + if err != nil { + rt.logger.Warn("failed to get org for billing tick", "error", err, "org_id", orgID) + return + } + if org.StripeCustomerID == "" { + return + } + + now := time.Now().UTC() + + // Get counts from registry (daemon-reported data via heartbeats) + runningSandboxes, sourceVMCount, daemonCount := rt.registry.OrgResourceCounts(orgID) + + // Subtract free tier + billableSandboxes := int64(runningSandboxes - rt.freeTier.MaxConcurrentSandboxes) + billableSourceVMs := int64(sourceVMCount - rt.freeTier.MaxSourceVMs) + billableDaemons := int64(daemonCount - rt.freeTier.MaxAgentHosts) + + // Report to Stripe + if billableSandboxes > 0 { + rt.meter.ReportResourceUsage(ctx, org.StripeCustomerID, "concurrent_sandboxes", billableSandboxes) + } + if billableSourceVMs > 0 { + rt.meter.ReportResourceUsage(ctx, org.StripeCustomerID, "source_vms", billableSourceVMs) + } + if billableDaemons > 0 { + rt.meter.ReportResourceUsage(ctx, org.StripeCustomerID, "fluid_daemons", billableDaemons) + } + + // Create local usage records + if runningSandboxes > 0 { + if err := rt.store.CreateUsageRecord(ctx, &store.UsageRecord{ + ID: uuid.New().String(), + OrgID: orgID, + ResourceType: "max_concurrent_sandboxes", + Quantity: float64(runningSandboxes), + RecordedAt: now, + }); err != nil { + rt.logger.Warn("failed to create usage record", "type", "max_concurrent_sandboxes", "org_id", orgID, "error", err) + } + } + if sourceVMCount > 0 { + if err := rt.store.CreateUsageRecord(ctx, &store.UsageRecord{ + ID: uuid.New().String(), + OrgID: orgID, + ResourceType: "source_vm", + Quantity: float64(sourceVMCount), + RecordedAt: now, + }); err != nil { + rt.logger.Warn("failed to create usage record", "type", "source_vm", "org_id", orgID, "error", err) + } + } + if daemonCount > 0 { + if err := rt.store.CreateUsageRecord(ctx, &store.UsageRecord{ + ID: uuid.New().String(), + OrgID: orgID, + ResourceType: "agent_host", + Quantity: float64(daemonCount), + RecordedAt: now, + }); err != nil { + rt.logger.Warn("failed to create usage record", "type", "agent_host", "org_id", orgID, "error", err) + } + } + + rt.logger.Debug("billing tick completed", + "org_id", orgID, + "sandboxes", runningSandboxes, + "source_vms", sourceVMCount, + "daemons", daemonCount, + ) +} diff --git a/api/internal/billing/ticker_test.go b/api/internal/billing/ticker_test.go new file mode 100644 index 00000000..16b9695f --- /dev/null +++ b/api/internal/billing/ticker_test.go @@ -0,0 +1,415 @@ +package billing + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/aspectrr/fluid.sh/api/internal/config" + "github.com/aspectrr/fluid.sh/api/internal/registry" + "github.com/aspectrr/fluid.sh/api/internal/store" + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" +) + +// --------------------------------------------------------------------------- +// tickerMockStore - implements store.DataStore with tracking and overrides +// --------------------------------------------------------------------------- + +type tickerMockStore struct { + mu sync.Mutex + + // Override functions for specific methods + getOrgFn func(ctx context.Context, id string) (*store.Organization, error) + createUsageRecordFn func(ctx context.Context, rec *store.UsageRecord) error + + // Tracking + usageRecords []*store.UsageRecord + methodCalls map[string]int +} + +func newTickerMockStore() *tickerMockStore { + return &tickerMockStore{ + methodCalls: make(map[string]int), + } +} + +func (m *tickerMockStore) track(method string) { + m.mu.Lock() + defer m.mu.Unlock() + m.methodCalls[method]++ +} + +func (m *tickerMockStore) callCount(method string) int { + m.mu.Lock() + defer m.mu.Unlock() + return m.methodCalls[method] +} + +// --- Overrideable methods used by reportForOrg --- + +func (m *tickerMockStore) GetOrganization(ctx context.Context, id string) (*store.Organization, error) { + m.track("GetOrganization") + if m.getOrgFn != nil { + return m.getOrgFn(ctx, id) + } + return &store.Organization{ID: id}, nil +} + +func (m *tickerMockStore) ListSandboxesByOrg(context.Context, string) ([]store.Sandbox, error) { + return nil, nil +} + +func (m *tickerMockStore) ListSourceHostsByOrg(context.Context, string) ([]*store.SourceHost, error) { + return nil, nil +} + +func (m *tickerMockStore) CreateUsageRecord(ctx context.Context, rec *store.UsageRecord) error { + m.track("CreateUsageRecord") + m.mu.Lock() + m.usageRecords = append(m.usageRecords, rec) + m.mu.Unlock() + if m.createUsageRecordFn != nil { + return m.createUsageRecordFn(ctx, rec) + } + return nil +} + +// --- Stub implementations for the rest of store.DataStore --- + +func (m *tickerMockStore) CreateUser(context.Context, *store.User) error { return nil } +func (m *tickerMockStore) GetUser(context.Context, string) (*store.User, error) { return nil, nil } +func (m *tickerMockStore) GetUserByEmail(context.Context, string) (*store.User, error) { + return nil, nil +} +func (m *tickerMockStore) UpdateUser(context.Context, *store.User) error { return nil } + +func (m *tickerMockStore) CreateOAuthAccount(context.Context, *store.OAuthAccount) error { return nil } +func (m *tickerMockStore) GetOAuthAccount(context.Context, string, string) (*store.OAuthAccount, error) { + return nil, nil +} +func (m *tickerMockStore) GetOAuthAccountsByUser(context.Context, string) ([]*store.OAuthAccount, error) { + return nil, nil +} + +func (m *tickerMockStore) CreateSession(context.Context, *store.Session) error { return nil } +func (m *tickerMockStore) GetSession(context.Context, string) (*store.Session, error) { + return nil, nil +} +func (m *tickerMockStore) DeleteSession(context.Context, string) error { return nil } +func (m *tickerMockStore) DeleteExpiredSessions(context.Context) error { return nil } + +func (m *tickerMockStore) CreateOrganization(context.Context, *store.Organization) error { return nil } +func (m *tickerMockStore) GetOrganizationBySlug(context.Context, string) (*store.Organization, error) { + return nil, nil +} +func (m *tickerMockStore) ListOrganizationsByUser(context.Context, string) ([]*store.Organization, error) { + return nil, nil +} +func (m *tickerMockStore) UpdateOrganization(context.Context, *store.Organization) error { return nil } +func (m *tickerMockStore) DeleteOrganization(context.Context, string) error { return nil } + +func (m *tickerMockStore) CreateOrgMember(context.Context, *store.OrgMember) error { return nil } +func (m *tickerMockStore) GetOrgMember(context.Context, string, string) (*store.OrgMember, error) { + return nil, nil +} +func (m *tickerMockStore) GetOrgMemberByID(context.Context, string, string) (*store.OrgMember, error) { + return nil, nil +} +func (m *tickerMockStore) ListOrgMembers(context.Context, string) ([]*store.OrgMember, error) { + return nil, nil +} +func (m *tickerMockStore) DeleteOrgMember(context.Context, string, string) error { return nil } + +func (m *tickerMockStore) CreateSubscription(context.Context, *store.Subscription) error { return nil } +func (m *tickerMockStore) GetSubscriptionByOrg(context.Context, string) (*store.Subscription, error) { + return nil, nil +} +func (m *tickerMockStore) UpdateSubscription(context.Context, *store.Subscription) error { return nil } + +func (m *tickerMockStore) ListUsageRecords(context.Context, string, time.Time, time.Time) ([]*store.UsageRecord, error) { + return nil, nil +} + +func (m *tickerMockStore) CreateHost(context.Context, *store.Host) error { return nil } +func (m *tickerMockStore) GetHost(context.Context, string) (*store.Host, error) { return nil, nil } +func (m *tickerMockStore) ListHosts(context.Context) ([]store.Host, error) { return nil, nil } +func (m *tickerMockStore) ListHostsByOrg(context.Context, string) ([]store.Host, error) { + return nil, nil +} +func (m *tickerMockStore) UpdateHost(context.Context, *store.Host) error { return nil } +func (m *tickerMockStore) UpdateHostHeartbeat(context.Context, string, int32, int64, int64) error { + return nil +} + +func (m *tickerMockStore) CreateSandbox(context.Context, *store.Sandbox) error { return nil } +func (m *tickerMockStore) GetSandbox(context.Context, string) (*store.Sandbox, error) { + return nil, nil +} +func (m *tickerMockStore) GetSandboxByOrg(context.Context, string, string) (*store.Sandbox, error) { + return nil, nil +} +func (m *tickerMockStore) ListSandboxes(context.Context) ([]store.Sandbox, error) { return nil, nil } +func (m *tickerMockStore) UpdateSandbox(context.Context, *store.Sandbox) error { return nil } +func (m *tickerMockStore) DeleteSandbox(context.Context, string) error { return nil } +func (m *tickerMockStore) GetSandboxesByHostID(context.Context, string) ([]store.Sandbox, error) { + return nil, nil +} +func (m *tickerMockStore) CountSandboxesByHostIDs(context.Context, []string) (map[string]int, error) { + return map[string]int{}, nil +} +func (m *tickerMockStore) ListExpiredSandboxes(context.Context, time.Duration) ([]store.Sandbox, error) { + return nil, nil +} + +func (m *tickerMockStore) CreateCommand(context.Context, *store.Command) error { return nil } +func (m *tickerMockStore) ListSandboxCommands(context.Context, string) ([]store.Command, error) { + return nil, nil +} + +func (m *tickerMockStore) CreateSourceHost(context.Context, *store.SourceHost) error { return nil } +func (m *tickerMockStore) GetSourceHost(context.Context, string) (*store.SourceHost, error) { + return nil, nil +} +func (m *tickerMockStore) DeleteSourceHost(context.Context, string) error { return nil } + +func (m *tickerMockStore) CreateHostToken(context.Context, *store.HostToken) error { return nil } +func (m *tickerMockStore) GetHostTokenByHash(context.Context, string) (*store.HostToken, error) { + return nil, nil +} +func (m *tickerMockStore) ListHostTokensByOrg(context.Context, string) ([]store.HostToken, error) { + return nil, nil +} +func (m *tickerMockStore) DeleteHostToken(context.Context, string, string) error { return nil } + +// Agent/playbook mock methods removed - interface methods commented out in store.go + +func (m *tickerMockStore) GetOrganizationByStripeCustomerID(context.Context, string) (*store.Organization, error) { + return nil, nil +} +func (m *tickerMockStore) GetModelMeter(context.Context, string) (*store.ModelMeter, error) { + return nil, store.ErrNotFound +} +func (m *tickerMockStore) CreateModelMeter(context.Context, *store.ModelMeter) error { return nil } +func (m *tickerMockStore) GetOrgModelSubscription(context.Context, string, string) (*store.OrgModelSubscription, error) { + return nil, store.ErrNotFound +} +func (m *tickerMockStore) CreateOrgModelSubscription(context.Context, *store.OrgModelSubscription) error { + return nil +} +func (m *tickerMockStore) SumTokenUsage(context.Context, string, time.Time, time.Time) (float64, error) { + return 0, nil +} +func (m *tickerMockStore) ListActiveSubscriptions(context.Context) ([]*store.Subscription, error) { + return nil, nil +} +func (m *tickerMockStore) GetSubscriptionByStripeID(context.Context, string) (*store.Subscription, error) { + return nil, nil +} +func (m *tickerMockStore) AcquireAdvisoryLock(context.Context, int64) error { return nil } +func (m *tickerMockStore) ReleaseAdvisoryLock(context.Context, int64) error { return nil } + +// --------------------------------------------------------------------------- +// mockHostStream - minimal HostStream for registry.Register +// --------------------------------------------------------------------------- + +type mockHostStream struct{} + +func (mockHostStream) Send(_ *fluidv1.ControlMessage) error { return nil } + +// --------------------------------------------------------------------------- +// Helper to build a ResourceTicker for tests +// --------------------------------------------------------------------------- + +func newTestTicker(st store.DataStore, freeTier config.FreeTierConfig) *ResourceTicker { + mm := NewMeterManager(st, "", 1.2, nil) + reg := registry.New() + cfg := config.BillingConfig{ + FreeTier: freeTier, + } + return NewResourceTicker(st, mm, reg, cfg, nil) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +func TestReportForOrg_NoStripeCustomer(t *testing.T) { + ms := newTickerMockStore() + ms.getOrgFn = func(_ context.Context, id string) (*store.Organization, error) { + return &store.Organization{ + ID: id, + StripeCustomerID: "", // no Stripe customer + }, nil + } + + rt := newTestTicker(ms, config.FreeTierConfig{ + MaxConcurrentSandboxes: 1, + MaxSourceVMs: 1, + MaxAgentHosts: 1, + }) + + rt.reportForOrg(context.Background(), "org-no-stripe") + + if ms.callCount("GetOrganization") != 1 { + t.Errorf("GetOrganization call count = %d, want 1", ms.callCount("GetOrganization")) + } + + // Should return early before creating any usage records. + if ms.callCount("CreateUsageRecord") != 0 { + t.Errorf("CreateUsageRecord should not be called, got %d calls", ms.callCount("CreateUsageRecord")) + } +} + +func TestReportForOrg_FreeTierSubtraction(t *testing.T) { + ms := newTickerMockStore() + ms.getOrgFn = func(_ context.Context, id string) (*store.Organization, error) { + return &store.Organization{ + ID: id, + StripeCustomerID: "cus_test123", + }, nil + } + + freeTier := config.FreeTierConfig{ + MaxConcurrentSandboxes: 1, + MaxSourceVMs: 1, + MaxAgentHosts: 1, + } + + // Build the ticker with a registry that has 2 connected daemons for this org. + mm := NewMeterManager(ms, "", 1.2, nil) + reg := registry.New() + cfg := config.BillingConfig{FreeTier: freeTier} + rt := NewResourceTicker(ms, mm, reg, cfg, nil) + + // Register 2 daemons and set heartbeat counts. + _ = reg.Register("host-1", "org-ft", "daemon-1", mockHostStream{}) + _ = reg.Register("host-2", "org-ft", "daemon-2", mockHostStream{}) + reg.UpdateHeartbeatCounts("host-1", 1, 2) // 1 sandbox, 2 source VMs + reg.UpdateHeartbeatCounts("host-2", 1, 1) // 1 sandbox, 1 source VM + + rt.reportForOrg(context.Background(), "org-ft") + + // 2 sandboxes, 3 source VMs, 2 daemons -> usage records for all three. + // Local usage records record the raw counts (not free-tier-subtracted). + if ms.callCount("CreateUsageRecord") != 3 { + t.Fatalf("CreateUsageRecord call count = %d, want 3", ms.callCount("CreateUsageRecord")) + } + + // Verify usage record contents. + recordsByType := make(map[string]*store.UsageRecord) + for _, rec := range ms.usageRecords { + recordsByType[rec.ResourceType] = rec + } + + // max_concurrent_sandboxes: raw count = 2 (1+1 from heartbeats) + if rec, ok := recordsByType["max_concurrent_sandboxes"]; !ok { + t.Error("missing max_concurrent_sandboxes usage record") + } else if rec.Quantity != 2 { + t.Errorf("max_concurrent_sandboxes quantity = %v, want 2", rec.Quantity) + } + + // source_vm: raw count = 3 (2+1 from heartbeats) + if rec, ok := recordsByType["source_vm"]; !ok { + t.Error("missing source_vm usage record") + } else if rec.Quantity != 3 { + t.Errorf("source_vm quantity = %v, want 3", rec.Quantity) + } + + // agent_host: raw count = 2 daemons + if rec, ok := recordsByType["agent_host"]; !ok { + t.Error("missing agent_host usage record") + } else if rec.Quantity != 2 { + t.Errorf("agent_host quantity = %v, want 2", rec.Quantity) + } + + // Verify orgID on all records. + for _, rec := range ms.usageRecords { + if rec.OrgID != "org-ft" { + t.Errorf("usage record OrgID = %q, want %q", rec.OrgID, "org-ft") + } + } +} + +func TestReportForOrg_StoreError(t *testing.T) { + ms := newTickerMockStore() + ms.getOrgFn = func(_ context.Context, _ string) (*store.Organization, error) { + return nil, fmt.Errorf("database connection lost") + } + + rt := newTestTicker(ms, config.FreeTierConfig{ + MaxConcurrentSandboxes: 1, + MaxSourceVMs: 1, + MaxAgentHosts: 1, + }) + + // Should not panic. + rt.reportForOrg(context.Background(), "org-error") + + if ms.callCount("GetOrganization") != 1 { + t.Errorf("GetOrganization call count = %d, want 1", ms.callCount("GetOrganization")) + } + + // Should return early without creating usage records. + if ms.callCount("CreateUsageRecord") != 0 { + t.Errorf("CreateUsageRecord should not be called after store error, got %d", ms.callCount("CreateUsageRecord")) + } +} + +func TestReportForOrg_RegistryZeroCounts(t *testing.T) { + ms := newTickerMockStore() + ms.getOrgFn = func(_ context.Context, id string) (*store.Organization, error) { + return &store.Organization{ + ID: id, + StripeCustomerID: "cus_test123", + }, nil + } + + // Registry has a daemon but heartbeat counts are zero. + mm := NewMeterManager(ms, "", 1.2, nil) + reg := registry.New() + cfg := config.BillingConfig{FreeTier: config.FreeTierConfig{ + MaxConcurrentSandboxes: 1, + MaxSourceVMs: 1, + MaxAgentHosts: 1, + }} + rt := NewResourceTicker(ms, mm, reg, cfg, nil) + + _ = reg.Register("host-1", "org-zero", "daemon-1", mockHostStream{}) + // No UpdateHeartbeatCounts called - counts remain 0. + + rt.reportForOrg(context.Background(), "org-zero") + + // 1 daemon > 0 so agent_host usage record created; sandboxes and source VMs are 0. + if ms.callCount("CreateUsageRecord") != 1 { + t.Errorf("CreateUsageRecord call count = %d, want 1 (only agent_host)", ms.callCount("CreateUsageRecord")) + } + if len(ms.usageRecords) == 1 && ms.usageRecords[0].ResourceType != "agent_host" { + t.Errorf("expected agent_host usage record, got %q", ms.usageRecords[0].ResourceType) + } +} + +func TestReportForOrg_NoConnectedDaemons(t *testing.T) { + ms := newTickerMockStore() + ms.getOrgFn = func(_ context.Context, id string) (*store.Organization, error) { + return &store.Organization{ + ID: id, + StripeCustomerID: "cus_test123", + }, nil + } + + // Empty registry - no daemons connected. + rt := newTestTicker(ms, config.FreeTierConfig{ + MaxConcurrentSandboxes: 1, + MaxSourceVMs: 1, + MaxAgentHosts: 1, + }) + + rt.reportForOrg(context.Background(), "org-empty") + + // All counts are 0, nothing to report. + if ms.callCount("CreateUsageRecord") != 0 { + t.Errorf("CreateUsageRecord should not be called with no daemons, got %d", ms.callCount("CreateUsageRecord")) + } +} diff --git a/api/internal/config/config.go b/api/internal/config/config.go new file mode 100644 index 00000000..b0959aa5 --- /dev/null +++ b/api/internal/config/config.go @@ -0,0 +1,287 @@ +package config + +import ( + "fmt" + "log/slog" + "net/url" + "os" + "strconv" + "strings" + "time" +) + +type Config struct { + API APIConfig + GRPC GRPCConfig + Database DatabaseConfig + Auth AuthConfig + Frontend FrontendConfig + Billing BillingConfig + // Agent AgentConfig - commented out, not yet ready for integration + Orchestrator OrchestratorConfig + Logging LoggingConfig + PostHog PostHogConfig + EncryptionKey string +} + +type PostHogConfig struct { + APIKey string + Endpoint string +} + +type GRPCConfig struct { + Address string + TLSCertFile string + TLSKeyFile string + AllowInsecure bool +} + +type OrchestratorConfig struct { + HeartbeatTimeout time.Duration + DefaultTTL time.Duration +} + +type APIConfig struct { + Addr string + ReadTimeout time.Duration + WriteTimeout time.Duration + IdleTimeout time.Duration + ShutdownTimeout time.Duration + EnableDocs bool + TrustedProxies []string +} + +type DatabaseConfig struct { + URL string + MaxOpenConns int + MaxIdleConns int + ConnMaxLifetime time.Duration + AutoMigrate bool +} + +type AuthConfig struct { + SessionTTL time.Duration + SecureCookies bool + GitHub OAuthProviderConfig + Google OAuthProviderConfig +} + +type OAuthProviderConfig struct { + ClientID string + ClientSecret string + RedirectURL string +} + +type FrontendConfig struct { + URL string +} + +type BillingConfig struct { + StripeSecretKey string + StripeWebhookSecret string + StripePublishableKey string + StripePriceID string + Prices PriceConfig + FreeTier FreeTierConfig + BillingMarkup float64 +} + +type PriceConfig struct { + SandboxMonthlyCents int + SourceVMMonthly int + AgentHostMonthly int +} + +type FreeTierConfig struct { + MaxConcurrentSandboxes int + MaxSourceVMs int + MaxAgentHosts int +} + +// AgentConfig - commented out, not yet ready for integration. +/* +type AgentConfig struct { + OpenRouterAPIKey string + OpenRouterBaseURL string + DefaultModel string + MaxTokensPerRequest int + FreeTokensPerMonth int +} +*/ + +type LoggingConfig struct { + Level string + Format string +} + +// Validate checks that required configuration fields are set and valid. +func (c *Config) Validate() error { + if c.Database.URL == "" { + return fmt.Errorf("DATABASE_URL is required") + } + if c.Frontend.URL == "*" { + return fmt.Errorf("FRONTEND_URL must not be '*'") + } + if u, err := url.Parse(c.Frontend.URL); err != nil || u.Scheme == "" { + return fmt.Errorf("FRONTEND_URL must be a valid URL") + } + if c.GRPC.TLSCertFile == "" && c.GRPC.TLSKeyFile == "" && !c.GRPC.AllowInsecure { + return fmt.Errorf("gRPC TLS not configured; set GRPC_TLS_CERT_FILE/GRPC_TLS_KEY_FILE or GRPC_ALLOW_INSECURE=true") + } + if c.EncryptionKey == "" { + slog.Warn("ENCRYPTION_KEY not set: OAuth tokens and Proxmox secrets will be stored in plaintext") + } + return nil +} + +func Load() *Config { + return &Config{ + API: APIConfig{ + Addr: envOr("API_ADDR", ":8080"), + ReadTimeout: envDuration("API_READ_TIMEOUT", 60*time.Second), + WriteTimeout: envDuration("API_WRITE_TIMEOUT", 120*time.Second), + IdleTimeout: envDuration("API_IDLE_TIMEOUT", 120*time.Second), + ShutdownTimeout: envDuration("API_SHUTDOWN_TIMEOUT", 20*time.Second), + EnableDocs: envBool("API_ENABLE_DOCS", false), + TrustedProxies: envStringSlice("TRUSTED_PROXIES"), + }, + Database: DatabaseConfig{ + URL: os.Getenv("DATABASE_URL"), + MaxOpenConns: envInt("DATABASE_MAX_OPEN_CONNS", 16), + MaxIdleConns: envInt("DATABASE_MAX_IDLE_CONNS", 8), + ConnMaxLifetime: envDuration("DATABASE_CONN_MAX_LIFETIME", time.Hour), + AutoMigrate: envBool("DATABASE_AUTO_MIGRATE", false), + }, + Auth: AuthConfig{ + SessionTTL: envDuration("AUTH_SESSION_TTL", 168*time.Hour), + SecureCookies: envBool("AUTH_SECURE_COOKIES", true), + GitHub: OAuthProviderConfig{ + ClientID: os.Getenv("AUTH_GITHUB_CLIENT_ID"), + ClientSecret: os.Getenv("AUTH_GITHUB_CLIENT_SECRET"), + RedirectURL: envOr("AUTH_GITHUB_REDIRECT_URL", "http://localhost:5173/v1/auth/github/callback"), + }, + Google: OAuthProviderConfig{ + ClientID: os.Getenv("AUTH_GOOGLE_CLIENT_ID"), + ClientSecret: os.Getenv("AUTH_GOOGLE_CLIENT_SECRET"), + RedirectURL: envOr("AUTH_GOOGLE_REDIRECT_URL", "http://localhost:5173/v1/auth/google/callback"), + }, + }, + GRPC: GRPCConfig{ + Address: envOr("GRPC_ADDR", ":9090"), + TLSCertFile: os.Getenv("GRPC_TLS_CERT_FILE"), + TLSKeyFile: os.Getenv("GRPC_TLS_KEY_FILE"), + AllowInsecure: envBool("GRPC_ALLOW_INSECURE", false), + }, + Orchestrator: OrchestratorConfig{ + HeartbeatTimeout: envDuration("ORCHESTRATOR_HEARTBEAT_TIMEOUT", 90*time.Second), + DefaultTTL: envDuration("ORCHESTRATOR_DEFAULT_TTL", 24*time.Hour), + }, + Frontend: FrontendConfig{ + URL: envOr("FRONTEND_URL", "http://localhost:5173"), + }, + Billing: BillingConfig{ + StripeSecretKey: os.Getenv("STRIPE_SECRET_KEY"), + StripeWebhookSecret: os.Getenv("STRIPE_WEBHOOK_SECRET"), + StripePublishableKey: os.Getenv("STRIPE_PUBLISHABLE_KEY"), + StripePriceID: os.Getenv("STRIPE_PRICE_ID"), + Prices: PriceConfig{ + SandboxMonthlyCents: envInt("BILLING_SANDBOX_MONTHLY_CENTS", 5000), + SourceVMMonthly: envInt("BILLING_SOURCE_VM_MONTHLY_CENTS", 500), + AgentHostMonthly: envInt("BILLING_AGENT_HOST_MONTHLY_CENTS", 1000), + }, + FreeTier: FreeTierConfig{ + MaxConcurrentSandboxes: envInt("BILLING_FREE_TIER_MAX_SANDBOXES", 1), + MaxSourceVMs: envInt("BILLING_FREE_TIER_MAX_SOURCE_VMS", 3), + MaxAgentHosts: envInt("BILLING_FREE_TIER_MAX_AGENT_HOSTS", 1), + }, + BillingMarkup: envFloat("BILLING_MARKUP", 1.05), + }, + // Agent config - commented out, not yet ready for integration. + // Agent: AgentConfig{ + // OpenRouterAPIKey: os.Getenv("OPENROUTER_API_KEY"), + // OpenRouterBaseURL: envOr("OPENROUTER_BASE_URL", "https://openrouter.ai/api/v1"), + // DefaultModel: envOr("AGENT_DEFAULT_MODEL", "anthropic/claude-sonnet-4"), + // MaxTokensPerRequest: envInt("AGENT_MAX_TOKENS_PER_REQUEST", 8192), + // FreeTokensPerMonth: envInt("AGENT_FREE_TOKENS_PER_MONTH", 100000), + // }, + Logging: LoggingConfig{ + Level: envOr("LOG_LEVEL", "info"), + Format: envOr("LOG_FORMAT", "text"), + }, + PostHog: PostHogConfig{ + APIKey: os.Getenv("POSTHOG_API_KEY"), + Endpoint: envOr("POSTHOG_ENDPOINT", "https://nautilus.fluid.sh"), + }, + EncryptionKey: os.Getenv("ENCRYPTION_KEY"), + } +} + +func envOr(key, fallback string) string { + if v := os.Getenv(key); v != "" { + return v + } + return fallback +} + +func envInt(key string, fallback int) int { + if v := os.Getenv(key); v != "" { + n, err := strconv.Atoi(v) + if err != nil { + slog.Warn("invalid integer for env var, using default", "key", key, "value", v, "default", fallback) + return fallback + } + return n + } + return fallback +} + +func envBool(key string, fallback bool) bool { + if v := os.Getenv(key); v != "" { + b, err := strconv.ParseBool(v) + if err != nil { + slog.Warn("invalid boolean for env var, using default", "key", key, "value", v, "default", fallback) + return fallback + } + return b + } + return fallback +} + +func envDuration(key string, fallback time.Duration) time.Duration { + if v := os.Getenv(key); v != "" { + d, err := time.ParseDuration(v) + if err != nil { + slog.Warn("invalid duration for env var, using default", "key", key, "value", v, "default", fallback) + return fallback + } + return d + } + return fallback +} + +func envStringSlice(key string) []string { + v := os.Getenv(key) + if v == "" { + return nil + } + parts := strings.Split(v, ",") + out := make([]string, 0, len(parts)) + for _, p := range parts { + if s := strings.TrimSpace(p); s != "" { + out = append(out, s) + } + } + return out +} + +func envFloat(key string, fallback float64) float64 { + if v := os.Getenv(key); v != "" { + f, err := strconv.ParseFloat(v, 64) + if err != nil { + slog.Warn("invalid float for env var, using default", "key", key, "value", v, "default", fallback) + return fallback + } + return f + } + return fallback +} diff --git a/api/internal/config/config_test.go b/api/internal/config/config_test.go new file mode 100644 index 00000000..8154d658 --- /dev/null +++ b/api/internal/config/config_test.go @@ -0,0 +1,206 @@ +package config + +import ( + "testing" + "time" +) + +func TestEnvOr_WithValue(t *testing.T) { + t.Setenv("TEST_ENV_OR_KEY", "custom_value") + + got := envOr("TEST_ENV_OR_KEY", "default_value") + if got != "custom_value" { + t.Errorf("expected 'custom_value', got %q", got) + } +} + +func TestEnvOr_Fallback(t *testing.T) { + // TEST_ENV_OR_MISSING is not set + got := envOr("TEST_ENV_OR_MISSING", "fallback") + if got != "fallback" { + t.Errorf("expected 'fallback', got %q", got) + } +} + +func TestEnvOr_EmptyString(t *testing.T) { + t.Setenv("TEST_ENV_OR_EMPTY", "") + + got := envOr("TEST_ENV_OR_EMPTY", "fallback") + if got != "fallback" { + t.Errorf("expected 'fallback' when env is empty string, got %q", got) + } +} + +func TestEnvInt_ValidInt(t *testing.T) { + t.Setenv("TEST_ENV_INT", "42") + + got := envInt("TEST_ENV_INT", 10) + if got != 42 { + t.Errorf("expected 42, got %d", got) + } +} + +func TestEnvInt_InvalidInt(t *testing.T) { + t.Setenv("TEST_ENV_INT_BAD", "notanumber") + + got := envInt("TEST_ENV_INT_BAD", 10) + if got != 10 { + t.Errorf("expected fallback 10, got %d", got) + } +} + +func TestEnvInt_Missing(t *testing.T) { + got := envInt("TEST_ENV_INT_MISSING", 99) + if got != 99 { + t.Errorf("expected fallback 99, got %d", got) + } +} + +func TestEnvBool_True(t *testing.T) { + t.Setenv("TEST_ENV_BOOL", "true") + + got := envBool("TEST_ENV_BOOL", false) + if got != true { + t.Error("expected true, got false") + } +} + +func TestEnvBool_False(t *testing.T) { + t.Setenv("TEST_ENV_BOOL_F", "false") + + got := envBool("TEST_ENV_BOOL_F", true) + if got != false { + t.Error("expected false, got true") + } +} + +func TestEnvBool_Invalid(t *testing.T) { + t.Setenv("TEST_ENV_BOOL_BAD", "notabool") + + got := envBool("TEST_ENV_BOOL_BAD", true) + if got != true { + t.Error("expected fallback true, got false") + } +} + +func TestEnvBool_Missing(t *testing.T) { + got := envBool("TEST_ENV_BOOL_MISSING", true) + if got != true { + t.Error("expected fallback true, got false") + } +} + +func TestEnvDuration_Valid(t *testing.T) { + t.Setenv("TEST_ENV_DUR", "5s") + + got := envDuration("TEST_ENV_DUR", time.Minute) + if got != 5*time.Second { + t.Errorf("expected 5s, got %v", got) + } +} + +func TestEnvDuration_Complex(t *testing.T) { + t.Setenv("TEST_ENV_DUR_COMPLEX", "2h30m") + + got := envDuration("TEST_ENV_DUR_COMPLEX", time.Minute) + expected := 2*time.Hour + 30*time.Minute + if got != expected { + t.Errorf("expected %v, got %v", expected, got) + } +} + +func TestEnvDuration_Invalid(t *testing.T) { + t.Setenv("TEST_ENV_DUR_BAD", "notaduration") + + got := envDuration("TEST_ENV_DUR_BAD", 30*time.Second) + if got != 30*time.Second { + t.Errorf("expected fallback 30s, got %v", got) + } +} + +func TestEnvDuration_Missing(t *testing.T) { + got := envDuration("TEST_ENV_DUR_MISSING", time.Hour) + if got != time.Hour { + t.Errorf("expected fallback 1h, got %v", got) + } +} + +func TestLoad_Defaults(t *testing.T) { + // Clear any env vars that might interfere with defaults + t.Setenv("API_ADDR", "") + t.Setenv("GRPC_ADDR", "") + t.Setenv("LOG_LEVEL", "") + t.Setenv("LOG_FORMAT", "") + t.Setenv("OPENROUTER_BASE_URL", "") + t.Setenv("FRONTEND_URL", "") + t.Setenv("DATABASE_AUTO_MIGRATE", "") + + cfg := Load() + + if cfg.API.Addr != ":8080" { + t.Errorf("expected API.Addr ':8080', got %q", cfg.API.Addr) + } + if cfg.API.ReadTimeout != 60*time.Second { + t.Errorf("expected API.ReadTimeout 60s, got %v", cfg.API.ReadTimeout) + } + if cfg.API.WriteTimeout != 120*time.Second { + t.Errorf("expected API.WriteTimeout 120s, got %v", cfg.API.WriteTimeout) + } + if cfg.GRPC.Address != ":9090" { + t.Errorf("expected GRPC.Address ':9090', got %q", cfg.GRPC.Address) + } + if cfg.Database.MaxOpenConns != 16 { + t.Errorf("expected Database.MaxOpenConns 16, got %d", cfg.Database.MaxOpenConns) + } + if cfg.Database.MaxIdleConns != 8 { + t.Errorf("expected Database.MaxIdleConns 8, got %d", cfg.Database.MaxIdleConns) + } + if cfg.Database.AutoMigrate != false { + t.Error("expected Database.AutoMigrate false, got true") + } + if cfg.Orchestrator.HeartbeatTimeout != 90*time.Second { + t.Errorf("expected Orchestrator.HeartbeatTimeout 90s, got %v", cfg.Orchestrator.HeartbeatTimeout) + } + if cfg.Orchestrator.DefaultTTL != 24*time.Hour { + t.Errorf("expected Orchestrator.DefaultTTL 24h, got %v", cfg.Orchestrator.DefaultTTL) + } + if cfg.Logging.Level != "info" { + t.Errorf("expected Logging.Level 'info', got %q", cfg.Logging.Level) + } + if cfg.Logging.Format != "text" { + t.Errorf("expected Logging.Format 'text', got %q", cfg.Logging.Format) + } + if cfg.Frontend.URL != "http://localhost:5173" { + t.Errorf("expected Frontend.URL 'http://localhost:5173', got %q", cfg.Frontend.URL) + } +} + +func TestLoad_EnvOverrides(t *testing.T) { + t.Setenv("API_ADDR", ":9999") + t.Setenv("GRPC_ADDR", ":7070") + t.Setenv("LOG_LEVEL", "debug") + t.Setenv("DATABASE_MAX_OPEN_CONNS", "32") + t.Setenv("DATABASE_AUTO_MIGRATE", "false") + t.Setenv("API_READ_TIMEOUT", "30s") + + cfg := Load() + + if cfg.API.Addr != ":9999" { + t.Errorf("expected API.Addr ':9999', got %q", cfg.API.Addr) + } + if cfg.GRPC.Address != ":7070" { + t.Errorf("expected GRPC.Address ':7070', got %q", cfg.GRPC.Address) + } + if cfg.Logging.Level != "debug" { + t.Errorf("expected Logging.Level 'debug', got %q", cfg.Logging.Level) + } + if cfg.Database.MaxOpenConns != 32 { + t.Errorf("expected Database.MaxOpenConns 32, got %d", cfg.Database.MaxOpenConns) + } + if cfg.Database.AutoMigrate != false { + t.Error("expected Database.AutoMigrate false, got true") + } + if cfg.API.ReadTimeout != 30*time.Second { + t.Errorf("expected API.ReadTimeout 30s, got %v", cfg.API.ReadTimeout) + } +} diff --git a/api/internal/crypto/crypto.go b/api/internal/crypto/crypto.go new file mode 100644 index 00000000..0cb5f11a --- /dev/null +++ b/api/internal/crypto/crypto.go @@ -0,0 +1,68 @@ +package crypto + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "fmt" + "io" +) + +// DeriveKey produces a 32-byte AES-256 key from the raw config value. +func DeriveKey(raw string) []byte { + h := sha256.Sum256([]byte(raw)) + return h[:] +} + +// Encrypt encrypts plaintext with AES-256-GCM using the given key. +// Returns a base64-encoded nonce+ciphertext string. +func Encrypt(key []byte, plaintext string) (string, error) { + if len(plaintext) == 0 { + return "", nil + } + block, err := aes.NewCipher(key) + if err != nil { + return "", fmt.Errorf("crypto: new cipher: %w", err) + } + gcm, err := cipher.NewGCM(block) + if err != nil { + return "", fmt.Errorf("crypto: new gcm: %w", err) + } + nonce := make([]byte, gcm.NonceSize()) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return "", fmt.Errorf("crypto: generate nonce: %w", err) + } + ciphertext := gcm.Seal(nonce, nonce, []byte(plaintext), nil) + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +// Decrypt decrypts a base64-encoded nonce+ciphertext produced by Encrypt. +func Decrypt(key []byte, encoded string) (string, error) { + if len(encoded) == 0 { + return "", nil + } + data, err := base64.StdEncoding.DecodeString(encoded) + if err != nil { + return "", fmt.Errorf("crypto: base64 decode: %w", err) + } + block, err := aes.NewCipher(key) + if err != nil { + return "", fmt.Errorf("crypto: new cipher: %w", err) + } + gcm, err := cipher.NewGCM(block) + if err != nil { + return "", fmt.Errorf("crypto: new gcm: %w", err) + } + nonceSize := gcm.NonceSize() + if len(data) < nonceSize { + return "", fmt.Errorf("crypto: ciphertext too short") + } + nonce, ciphertext := data[:nonceSize], data[nonceSize:] + plaintext, err := gcm.Open(nil, nonce, ciphertext, nil) + if err != nil { + return "", fmt.Errorf("crypto: decrypt: %w", err) + } + return string(plaintext), nil +} diff --git a/api/internal/error/responderror.go b/api/internal/error/responderror.go new file mode 100644 index 00000000..3484deea --- /dev/null +++ b/api/internal/error/responderror.go @@ -0,0 +1,40 @@ +package error + +import ( + "log/slog" + "net/http" + + serverJSON "github.com/aspectrr/fluid.sh/api/internal/json" +) + +type ErrorResponse struct { + Error string `json:"error"` + Code int `json:"code"` + Details string `json:"details,omitempty"` +} + +// RespondError logs the actual error and returns a generic message to the client. +func RespondError(w http.ResponseWriter, status int, err error) { + if err != nil { + slog.Warn("api error", "status", status, "error", err.Error()) + } + msg := http.StatusText(status) + if msg == "" { + msg = "unexpected error" + } + _ = serverJSON.RespondJSON(w, status, ErrorResponse{ + Error: msg, + Code: status, + }) +} + +// RespondErrorMsg logs the internal error and returns a specific user-facing message. +func RespondErrorMsg(w http.ResponseWriter, status int, userMsg string, internalErr error) { + if internalErr != nil { + slog.Warn("api error", "status", status, "error", internalErr.Error(), "user_msg", userMsg) + } + _ = serverJSON.RespondJSON(w, status, ErrorResponse{ + Error: userMsg, + Code: status, + }) +} diff --git a/api/internal/error/responderror_test.go b/api/internal/error/responderror_test.go new file mode 100644 index 00000000..6e50bce8 --- /dev/null +++ b/api/internal/error/responderror_test.go @@ -0,0 +1,130 @@ +package error + +import ( + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" +) + +func TestRespondError_StatusCode(t *testing.T) { + w := httptest.NewRecorder() + RespondError(w, http.StatusBadRequest, errors.New("bad input")) + + if w.Code != http.StatusBadRequest { + t.Errorf("expected status 400, got %d", w.Code) + } +} + +func TestRespondError_BodyContainsGenericMessage(t *testing.T) { + w := httptest.NewRecorder() + RespondError(w, http.StatusNotFound, errors.New("item not found")) + + var resp ErrorResponse + if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil { + t.Fatalf("response body is not valid JSON: %v", err) + } + + // Should return generic HTTP status text, not the internal error + if resp.Error != "Not Found" { + t.Errorf("expected error 'Not Found', got %q", resp.Error) + } + if resp.Code != http.StatusNotFound { + t.Errorf("expected code 404, got %d", resp.Code) + } +} + +func TestRespondError_ContentType(t *testing.T) { + w := httptest.NewRecorder() + RespondError(w, http.StatusInternalServerError, errors.New("internal")) + + ct := w.Header().Get("Content-Type") + if ct != "application/json; charset=utf-8" { + t.Errorf("expected Content-Type 'application/json; charset=utf-8', got %q", ct) + } +} + +func TestRespondError_DetailsOmittedWhenEmpty(t *testing.T) { + w := httptest.NewRecorder() + RespondError(w, http.StatusForbidden, errors.New("forbidden")) + + var raw map[string]any + if err := json.Unmarshal(w.Body.Bytes(), &raw); err != nil { + t.Fatalf("response body is not valid JSON: %v", err) + } + + if _, ok := raw["details"]; ok { + t.Error("expected 'details' field to be omitted when empty") + } +} + +func TestRespondError_MultipleStatuses(t *testing.T) { + tests := []struct { + status int + internalMsg string + expectedMsg string + }{ + {http.StatusBadRequest, "bad request", "Bad Request"}, + {http.StatusUnauthorized, "unauthorized", "Unauthorized"}, + {http.StatusForbidden, "forbidden", "Forbidden"}, + {http.StatusNotFound, "not found", "Not Found"}, + {http.StatusInternalServerError, "server error", "Internal Server Error"}, + } + + for _, tt := range tests { + w := httptest.NewRecorder() + RespondError(w, tt.status, errors.New(tt.internalMsg)) + + if w.Code != tt.status { + t.Errorf("status %d: expected %d, got %d", tt.status, tt.status, w.Code) + } + + var resp ErrorResponse + if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil { + t.Fatalf("status %d: response body is not valid JSON: %v", tt.status, err) + } + // Should return generic HTTP status text, not the internal error + if resp.Error != tt.expectedMsg { + t.Errorf("status %d: expected error %q, got %q", tt.status, tt.expectedMsg, resp.Error) + } + if resp.Code != tt.status { + t.Errorf("status %d: expected code %d in body, got %d", tt.status, tt.status, resp.Code) + } + } +} + +func TestRespondError_NilError(t *testing.T) { + w := httptest.NewRecorder() + RespondError(w, http.StatusBadRequest, nil) + + if w.Code != http.StatusBadRequest { + t.Errorf("expected status 400, got %d", w.Code) + } + + var resp ErrorResponse + if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil { + t.Fatalf("response body is not valid JSON: %v", err) + } + if resp.Error != "Bad Request" { + t.Errorf("expected error 'Bad Request', got %q", resp.Error) + } +} + +func TestRespondErrorMsg(t *testing.T) { + w := httptest.NewRecorder() + RespondErrorMsg(w, http.StatusBadRequest, "email is required", errors.New("validation: email field empty")) + + if w.Code != http.StatusBadRequest { + t.Errorf("expected status 400, got %d", w.Code) + } + + var resp ErrorResponse + if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil { + t.Fatalf("response body is not valid JSON: %v", err) + } + // RespondErrorMsg returns the user-facing message, not the internal error + if resp.Error != "email is required" { + t.Errorf("expected error 'email is required', got %q", resp.Error) + } +} diff --git a/api/internal/grpc/server.go b/api/internal/grpc/server.go new file mode 100644 index 00000000..746fcf86 --- /dev/null +++ b/api/internal/grpc/server.go @@ -0,0 +1,81 @@ +// Package grpc provides the gRPC server that accepts bidirectional streams +// from sandbox hosts. +package grpc + +import ( + "fmt" + "log/slog" + "net" + "time" + + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" + + "github.com/aspectrr/fluid.sh/api/internal/registry" + "github.com/aspectrr/fluid.sh/api/internal/store" + + "google.golang.org/grpc" +) + +// Server wraps a gRPC server that sandbox hosts connect to. +type Server struct { + listener net.Listener + grpcServer *grpc.Server + handler *StreamHandler + registry *registry.Registry + store store.Store + logger *slog.Logger +} + +// NewServer creates a gRPC server listening on addr and registers the +// HostService stream handler. +func NewServer( + addr string, + reg *registry.Registry, + st store.Store, + logger *slog.Logger, + heartbeatTimeout time.Duration, + opts ...grpc.ServerOption, +) (*Server, error) { + if logger == nil { + logger = slog.Default() + } + + lis, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("listen %s: %w", addr, err) + } + + gs := grpc.NewServer(opts...) + + handler := NewStreamHandler(reg, st, logger, heartbeatTimeout) + fluidv1.RegisterHostServiceServer(gs, handler) + + s := &Server{ + listener: lis, + grpcServer: gs, + handler: handler, + registry: reg, + store: st, + logger: logger.With("component", "grpc"), + } + + return s, nil +} + +// Handler returns the stream handler, allowing the orchestrator to call +// SendAndWait for dispatching commands to connected hosts. +func (s *Server) Handler() *StreamHandler { + return s.handler +} + +// Start begins accepting connections. Blocks until stopped. +func (s *Server) Start() error { + s.logger.Info("gRPC server starting", "addr", s.listener.Addr().String()) + return s.grpcServer.Serve(s.listener) +} + +// Stop performs a graceful shutdown of the gRPC server. +func (s *Server) Stop() { + s.logger.Info("gRPC server stopping") + s.grpcServer.GracefulStop() +} diff --git a/api/internal/grpc/stream.go b/api/internal/grpc/stream.go new file mode 100644 index 00000000..b9b66026 --- /dev/null +++ b/api/internal/grpc/stream.go @@ -0,0 +1,401 @@ +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "log/slog" + "sync" + "time" + + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" + + "github.com/aspectrr/fluid.sh/api/internal/auth" + "github.com/aspectrr/fluid.sh/api/internal/registry" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +// StreamHandler implements fluidv1.HostServiceServer. +type StreamHandler struct { + fluidv1.UnimplementedHostServiceServer + + registry *registry.Registry + store store.Store + logger *slog.Logger + heartbeatTimeout time.Duration + + // pendingRequests maps request_id -> response channel. + pendingRequests sync.Map // map[string]chan *fluidv1.HostMessage + + // streams maps host_id -> active server stream. + streams sync.Map // map[string]fluidv1.HostService_ConnectServer + + // streamMu holds a per-host mutex to serialize stream.Send calls. + streamMu sync.Map // map[string]*sync.Mutex + + // cancelFns maps host_id -> context.CancelFunc for the active connection. + // Used to cancel old connections when a host reconnects. + cancelFns sync.Map // map[string]context.CancelFunc +} + +// NewStreamHandler creates a stream handler wired to the given dependencies. +func NewStreamHandler( + reg *registry.Registry, + st store.Store, + logger *slog.Logger, + heartbeatTimeout time.Duration, +) *StreamHandler { + if logger == nil { + logger = slog.Default() + } + return &StreamHandler{ + registry: reg, + store: st, + logger: logger.With("component", "stream-handler"), + heartbeatTimeout: heartbeatTimeout, + } +} + +// hostMu returns the per-host mutex, creating one if needed. +func (h *StreamHandler) hostMu(hostID string) *sync.Mutex { + v, _ := h.streamMu.LoadOrStore(hostID, &sync.Mutex{}) + mu, ok := v.(*sync.Mutex) + if !ok { + h.logger.Error("streamMu contains non-Mutex value", "host_id", hostID) + return &sync.Mutex{} + } + return mu +} + +// Connect handles a single bidirectional stream from a sandbox host. +func (h *StreamHandler) Connect(stream fluidv1.HostService_ConnectServer) error { + firstMsg, err := stream.Recv() + if err != nil { + return fmt.Errorf("recv registration: %w", err) + } + + reg := firstMsg.GetRegistration() + if reg == nil { + return fmt.Errorf("first message must be HostRegistration") + } + + hostID := reg.GetHostId() + hostname := reg.GetHostname() + orgID := auth.OrgIDFromContext(stream.Context()) + tokenID := auth.TokenIDFromContext(stream.Context()) + + if tokenID == "" { + return fmt.Errorf("missing token identity from auth context") + } + + // Override daemon-supplied hostID with server-assigned identity derived + // from the authenticated token so a daemon cannot impersonate another host. + if hostID != tokenID { + h.logger.Warn("daemon-supplied host_id differs from token, overriding", + "daemon_host_id", hostID, "token_id", tokenID) + hostID = tokenID + } + + logger := h.logger.With("host_id", hostID, "hostname", hostname, "org_id", orgID) + logger.Info("host connecting", "version", reg.GetVersion()) + + // Send RegistrationAck. + ack := &fluidv1.ControlMessage{ + RequestId: firstMsg.GetRequestId(), + Payload: &fluidv1.ControlMessage_RegistrationAck{ + RegistrationAck: &fluidv1.RegistrationAck{ + Accepted: true, + AssignedHostId: hostID, + }, + }, + } + if err := stream.Send(ack); err != nil { + return fmt.Errorf("send registration ack: %w", err) + } + + // Cancel any existing connection for this host to avoid duplicate streams. + if oldCancel, loaded := h.cancelFns.LoadAndDelete(hostID); loaded { + if fn, ok := oldCancel.(context.CancelFunc); ok { + fn() + } else { + logger.Error("cancelFns contains non-CancelFunc value") + } + } + + // Store the stream before registering so it is available immediately + // when other goroutines observe the host in the registry. + h.streams.Store(hostID, stream) + if err := h.registry.Register(hostID, orgID, hostname, stream); err != nil { + h.streams.Delete(hostID) + return fmt.Errorf("register host: %w", err) + } + h.registry.SetRegistration(hostID, reg) + h.registry.UpdateHeartbeatCounts(hostID, 0, int32(len(reg.GetSourceVms()))) + + // Persist or update host in the database using a background context + // so the write completes even if the stream context is cancelled. + regCtx, regCancel := context.WithTimeout(context.Background(), 10*time.Second) + defer regCancel() + h.persistHostRegistration(regCtx, hostID, orgID, reg) + + logger.Info("host registered", + "total_cpus", reg.GetTotalCpus(), + "total_memory_mb", reg.GetTotalMemoryMb(), + "base_images", reg.GetBaseImages(), + ) + + // Spawn heartbeat monitor. + ctx, cancel := context.WithCancel(stream.Context()) + defer cancel() + h.cancelFns.Store(hostID, cancel) + + go h.monitorHeartbeat(ctx, cancel, hostID, logger) + + // Cleanup on disconnect. + defer func() { + // Only clean up if we still own the stream. A reconnecting host + // stores its new stream before re-registering, so if CompareAndDelete + // fails our state has already been replaced and cleanup would clobber + // the new connection. + if h.streams.CompareAndDelete(hostID, stream) { + h.cancelFns.Delete(hostID) + h.registry.Unregister(hostID) + h.streamMu.Delete(hostID) + logger.Info("host disconnected") + } else { + logger.Info("connection replaced, skipping stale cleanup") + } + }() + + // Main recv loop. + for { + msg, err := stream.Recv() + if err != nil { + if errors.Is(err, io.EOF) { + logger.Info("host stream closed by peer") + return nil + } + logger.Error("stream recv error", "error", err) + return err + } + + h.handleHostMessage(ctx, hostID, msg, logger) + } +} + +func (h *StreamHandler) handleHostMessage(ctx context.Context, hostID string, msg *fluidv1.HostMessage, logger *slog.Logger) { + switch msg.Payload.(type) { + case *fluidv1.HostMessage_Heartbeat: + hb := msg.GetHeartbeat() + h.registry.UpdateHeartbeat(hostID) + h.registry.UpdateHeartbeatCounts(hostID, hb.GetActiveSandboxes(), hb.GetSourceVmCount()) + if err := h.store.UpdateHostHeartbeat( + ctx, + hostID, + hb.GetAvailableCpus(), + hb.GetAvailableMemoryMb(), + hb.GetAvailableDiskMb(), + ); err != nil { + h.logger.Warn("failed to update heartbeat", "host_id", hostID, "error", err) + } + h.registry.UpdateResources(hostID, hb.GetAvailableCpus(), hb.GetAvailableMemoryMb()) + + case *fluidv1.HostMessage_ResourceReport: + h.registry.UpdateHeartbeat(hostID) + logger.Info("received resource report") + + case *fluidv1.HostMessage_ErrorReport: + er := msg.GetErrorReport() + logger.Error("host reported error", + "sandbox_id", er.GetSandboxId(), + "error", er.GetError(), + "context", er.GetContext(), + ) + + default: + reqID := msg.GetRequestId() + if reqID == "" { + logger.Warn("received message without request_id, dropping") + return + } + if ch, ok := h.pendingRequests.LoadAndDelete(reqID); ok { + respCh, ok := ch.(chan *fluidv1.HostMessage) + if !ok { + logger.Error("pendingRequests contains non-channel value", "request_id", reqID) + return + } + respCh <- msg + } else { + logger.Warn("no pending request for response", "request_id", reqID) + } + } +} + +// SendAndWait sends a ControlMessage to a specific host and blocks until the +// host responds with a matching request_id, the context is cancelled, or the +// timeout expires. +func (h *StreamHandler) SendAndWait(ctx context.Context, hostID string, msg *fluidv1.ControlMessage, timeout time.Duration) (*fluidv1.HostMessage, error) { + streamVal, ok := h.streams.Load(hostID) + if !ok { + return nil, fmt.Errorf("host %s is not connected", hostID) + } + stream, ok := streamVal.(fluidv1.HostService_ConnectServer) + if !ok { + return nil, fmt.Errorf("host %s: stream has unexpected type", hostID) + } + + reqID := msg.GetRequestId() + if reqID == "" { + return nil, fmt.Errorf("control message must have a request_id") + } + + respCh := make(chan *fluidv1.HostMessage, 1) + h.pendingRequests.Store(reqID, respCh) + defer h.pendingRequests.Delete(reqID) + + mu := h.hostMu(hostID) + mu.Lock() + err := stream.Send(msg) + mu.Unlock() + if err != nil { + return nil, fmt.Errorf("send to host %s: %w", hostID, err) + } + + timer := time.NewTimer(timeout) + defer timer.Stop() + + select { + case resp := <-respCh: + return resp, nil + case <-ctx.Done(): + return nil, fmt.Errorf("context cancelled waiting for response from host %s", hostID) + case <-timer.C: + return nil, fmt.Errorf("timeout waiting for response from host %s (request_id=%s)", hostID, reqID) + } +} + +// monitorHeartbeat checks for heartbeat timeouts on a connected host. +// +// Timing: the check interval is heartbeatTimeout/3 (default 90s/3 = 30s). +// A disconnect requires 3 consecutive misses, so the effective disconnect +// window is ~2-3 minutes after the last successful heartbeat. This +// intentional buffer tolerates transient network issues. For tighter SLAs, +// reduce ORCHESTRATOR_HEARTBEAT_TIMEOUT. +func (h *StreamHandler) monitorHeartbeat(ctx context.Context, cancel context.CancelFunc, hostID string, logger *slog.Logger) { + interval := h.heartbeatTimeout / 3 + if interval < 10*time.Second { + interval = 10 * time.Second + } + ticker := time.NewTicker(interval) + defer ticker.Stop() + + consecutiveMisses := 0 + const maxMisses = 3 + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + host, ok := h.registry.GetHost(hostID) + if !ok { + return + } + if time.Since(host.LastHeartbeat) > h.heartbeatTimeout { + consecutiveMisses++ + logger.Warn("host heartbeat overdue", + "last_heartbeat", host.LastHeartbeat, + "overdue_by", time.Since(host.LastHeartbeat)-h.heartbeatTimeout, + "consecutive_misses", consecutiveMisses, + ) + if consecutiveMisses >= maxMisses { + logger.Error("host heartbeat missed too many times, disconnecting", "consecutive_misses", consecutiveMisses) + cancel() + return + } + } else { + consecutiveMisses = 0 + } + } + } +} + +func (h *StreamHandler) persistHostRegistration(ctx context.Context, hostID, orgID string, reg *fluidv1.HostRegistration) { + existing, err := h.store.GetHost(ctx, hostID) + if err != nil { + if !errors.Is(err, store.ErrNotFound) { + h.logger.Error("failed to look up host in store", "host_id", hostID, "error", err) + return + } + host := hostFromRegistration(hostID, orgID, reg) + if createErr := h.store.CreateHost(ctx, host); createErr != nil { + h.logger.Error("failed to create host in store", "host_id", hostID, "error", createErr) + } + return + } + + existing.OrgID = orgID + existing.Hostname = reg.GetHostname() + existing.Version = reg.GetVersion() + existing.TotalCPUs = reg.GetTotalCpus() + existing.TotalMemoryMB = reg.GetTotalMemoryMb() + existing.TotalDiskMB = reg.GetTotalDiskMb() + existing.AvailableCPUs = reg.GetAvailableCpus() + existing.AvailableMemoryMB = reg.GetAvailableMemoryMb() + existing.AvailableDiskMB = reg.GetAvailableDiskMb() + existing.BaseImages = reg.GetBaseImages() + existing.Status = store.HostStatusOnline + existing.LastHeartbeat = time.Now() + + existing.SourceVMs = sourceVMsFromProto(reg.GetSourceVms()) + existing.Bridges = bridgesFromProto(reg.GetBridges()) + + if err := h.store.UpdateHost(ctx, existing); err != nil { + h.logger.Error("failed to update host in store", "host_id", hostID, "error", err) + } +} + +func sourceVMsFromProto(vms []*fluidv1.SourceVMInfo) store.SourceVMSlice { + result := make(store.SourceVMSlice, 0, len(vms)) + for _, vm := range vms { + result = append(result, store.SourceVMJSON{ + Name: vm.GetName(), + State: vm.GetState(), + IPAddress: vm.GetIpAddress(), + Prepared: vm.GetPrepared(), + }) + } + return result +} + +func bridgesFromProto(bridges []*fluidv1.BridgeInfo) store.BridgeSlice { + result := make(store.BridgeSlice, 0, len(bridges)) + for _, b := range bridges { + result = append(result, store.BridgeJSON{ + Name: b.GetName(), + Subnet: b.GetSubnet(), + }) + } + return result +} + +func hostFromRegistration(hostID, orgID string, reg *fluidv1.HostRegistration) *store.Host { + return &store.Host{ + ID: hostID, + OrgID: orgID, + Hostname: reg.GetHostname(), + Version: reg.GetVersion(), + TotalCPUs: reg.GetTotalCpus(), + TotalMemoryMB: reg.GetTotalMemoryMb(), + TotalDiskMB: reg.GetTotalDiskMb(), + AvailableCPUs: reg.GetAvailableCpus(), + AvailableMemoryMB: reg.GetAvailableMemoryMb(), + AvailableDiskMB: reg.GetAvailableDiskMb(), + BaseImages: reg.GetBaseImages(), + SourceVMs: sourceVMsFromProto(reg.GetSourceVms()), + Bridges: bridgesFromProto(reg.GetBridges()), + Status: store.HostStatusOnline, + LastHeartbeat: time.Now(), + } +} diff --git a/api/internal/grpc/stream_test.go b/api/internal/grpc/stream_test.go new file mode 100644 index 00000000..36b0514b --- /dev/null +++ b/api/internal/grpc/stream_test.go @@ -0,0 +1,873 @@ +package grpc + +import ( + "context" + "fmt" + "io" + "strings" + "testing" + "time" + + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" + + "github.com/aspectrr/fluid.sh/api/internal/auth" + "github.com/aspectrr/fluid.sh/api/internal/registry" + "github.com/aspectrr/fluid.sh/api/internal/store" + + "google.golang.org/grpc/metadata" +) + +// --------------------------------------------------------------------------- +// mockStore - minimal implementation of store.Store for grpc tests +// --------------------------------------------------------------------------- + +type mockStore struct{} + +func (m *mockStore) Config() store.Config { return store.Config{} } +func (m *mockStore) Ping(context.Context) error { return nil } +func (m *mockStore) Close() error { return nil } +func (m *mockStore) WithTx(ctx context.Context, fn func(tx store.DataStore) error) error { + return fn(m) +} + +func (m *mockStore) CreateUser(context.Context, *store.User) error { return nil } +func (m *mockStore) GetUser(context.Context, string) (*store.User, error) { return nil, nil } +func (m *mockStore) GetUserByEmail(context.Context, string) (*store.User, error) { + return nil, nil +} +func (m *mockStore) UpdateUser(context.Context, *store.User) error { return nil } + +func (m *mockStore) CreateOAuthAccount(context.Context, *store.OAuthAccount) error { return nil } +func (m *mockStore) GetOAuthAccount(context.Context, string, string) (*store.OAuthAccount, error) { + return nil, nil +} +func (m *mockStore) GetOAuthAccountsByUser(context.Context, string) ([]*store.OAuthAccount, error) { + return nil, nil +} + +func (m *mockStore) CreateSession(context.Context, *store.Session) error { return nil } +func (m *mockStore) GetSession(context.Context, string) (*store.Session, error) { return nil, nil } +func (m *mockStore) DeleteSession(context.Context, string) error { return nil } +func (m *mockStore) DeleteExpiredSessions(context.Context) error { return nil } + +func (m *mockStore) CreateOrganization(context.Context, *store.Organization) error { return nil } +func (m *mockStore) GetOrganization(context.Context, string) (*store.Organization, error) { + return nil, nil +} +func (m *mockStore) GetOrganizationBySlug(context.Context, string) (*store.Organization, error) { + return nil, nil +} +func (m *mockStore) ListOrganizationsByUser(context.Context, string) ([]*store.Organization, error) { + return nil, nil +} +func (m *mockStore) UpdateOrganization(context.Context, *store.Organization) error { return nil } +func (m *mockStore) DeleteOrganization(context.Context, string) error { return nil } + +func (m *mockStore) CreateOrgMember(context.Context, *store.OrgMember) error { return nil } +func (m *mockStore) GetOrgMember(context.Context, string, string) (*store.OrgMember, error) { + return nil, nil +} +func (m *mockStore) GetOrgMemberByID(context.Context, string, string) (*store.OrgMember, error) { + return nil, nil +} +func (m *mockStore) ListOrgMembers(context.Context, string) ([]*store.OrgMember, error) { + return nil, nil +} +func (m *mockStore) DeleteOrgMember(context.Context, string, string) error { return nil } + +func (m *mockStore) CreateSubscription(context.Context, *store.Subscription) error { return nil } +func (m *mockStore) GetSubscriptionByOrg(context.Context, string) (*store.Subscription, error) { + return nil, nil +} +func (m *mockStore) UpdateSubscription(context.Context, *store.Subscription) error { return nil } + +func (m *mockStore) CreateUsageRecord(context.Context, *store.UsageRecord) error { return nil } +func (m *mockStore) ListUsageRecords(context.Context, string, time.Time, time.Time) ([]*store.UsageRecord, error) { + return nil, nil +} + +func (m *mockStore) CreateHost(context.Context, *store.Host) error { return nil } +func (m *mockStore) GetHost(context.Context, string) (*store.Host, error) { return nil, nil } +func (m *mockStore) ListHosts(context.Context) ([]store.Host, error) { return nil, nil } +func (m *mockStore) ListHostsByOrg(context.Context, string) ([]store.Host, error) { return nil, nil } +func (m *mockStore) UpdateHost(context.Context, *store.Host) error { return nil } +func (m *mockStore) UpdateHostHeartbeat(context.Context, string, int32, int64, int64) error { + return nil +} + +func (m *mockStore) CreateSandbox(context.Context, *store.Sandbox) error { return nil } +func (m *mockStore) GetSandbox(context.Context, string) (*store.Sandbox, error) { return nil, nil } +func (m *mockStore) GetSandboxByOrg(context.Context, string, string) (*store.Sandbox, error) { + return nil, nil +} +func (m *mockStore) ListSandboxes(context.Context) ([]store.Sandbox, error) { return nil, nil } +func (m *mockStore) ListSandboxesByOrg(context.Context, string) ([]store.Sandbox, error) { + return nil, nil +} +func (m *mockStore) UpdateSandbox(context.Context, *store.Sandbox) error { return nil } +func (m *mockStore) DeleteSandbox(context.Context, string) error { return nil } +func (m *mockStore) GetSandboxesByHostID(context.Context, string) ([]store.Sandbox, error) { + return nil, nil +} +func (m *mockStore) CountSandboxesByHostIDs(context.Context, []string) (map[string]int, error) { + return map[string]int{}, nil +} +func (m *mockStore) ListExpiredSandboxes(context.Context, time.Duration) ([]store.Sandbox, error) { + return nil, nil +} + +func (m *mockStore) CreateCommand(context.Context, *store.Command) error { return nil } +func (m *mockStore) ListSandboxCommands(context.Context, string) ([]store.Command, error) { + return nil, nil +} + +func (m *mockStore) CreateSourceHost(context.Context, *store.SourceHost) error { return nil } +func (m *mockStore) GetSourceHost(context.Context, string) (*store.SourceHost, error) { + return nil, nil +} +func (m *mockStore) ListSourceHostsByOrg(context.Context, string) ([]*store.SourceHost, error) { + return nil, nil +} +func (m *mockStore) DeleteSourceHost(context.Context, string) error { return nil } + +func (m *mockStore) CreateHostToken(context.Context, *store.HostToken) error { return nil } +func (m *mockStore) GetHostTokenByHash(context.Context, string) (*store.HostToken, error) { + return nil, nil +} +func (m *mockStore) ListHostTokensByOrg(context.Context, string) ([]store.HostToken, error) { + return nil, nil +} +func (m *mockStore) DeleteHostToken(context.Context, string, string) error { return nil } + +// Agent/playbook mock methods removed - interface methods commented out in store.go + +func (m *mockStore) GetOrganizationByStripeCustomerID(context.Context, string) (*store.Organization, error) { + return nil, nil +} +func (m *mockStore) GetModelMeter(context.Context, string) (*store.ModelMeter, error) { + return nil, store.ErrNotFound +} +func (m *mockStore) CreateModelMeter(context.Context, *store.ModelMeter) error { return nil } +func (m *mockStore) GetOrgModelSubscription(context.Context, string, string) (*store.OrgModelSubscription, error) { + return nil, store.ErrNotFound +} +func (m *mockStore) CreateOrgModelSubscription(context.Context, *store.OrgModelSubscription) error { + return nil +} +func (m *mockStore) SumTokenUsage(context.Context, string, time.Time, time.Time) (float64, error) { + return 0, nil +} +func (m *mockStore) ListActiveSubscriptions(context.Context) ([]*store.Subscription, error) { + return nil, nil +} +func (m *mockStore) GetSubscriptionByStripeID(context.Context, string) (*store.Subscription, error) { + return nil, nil +} +func (m *mockStore) AcquireAdvisoryLock(context.Context, int64) error { return nil } +func (m *mockStore) ReleaseAdvisoryLock(context.Context, int64) error { return nil } + +// --------------------------------------------------------------------------- +// mockConnectServer implements fluidv1.HostService_ConnectServer +// (which is grpc.BidiStreamingServer[HostMessage, ControlMessage]) +// --------------------------------------------------------------------------- + +type mockConnectServer struct { + sentMessages []*fluidv1.ControlMessage + sendErr error + ctx context.Context +} + +func (m *mockConnectServer) Send(msg *fluidv1.ControlMessage) error { + if m.sendErr != nil { + return m.sendErr + } + m.sentMessages = append(m.sentMessages, msg) + return nil +} + +func (m *mockConnectServer) Recv() (*fluidv1.HostMessage, error) { + return nil, fmt.Errorf("not implemented in mock") +} + +func (m *mockConnectServer) SetHeader(metadata.MD) error { return nil } +func (m *mockConnectServer) SendHeader(metadata.MD) error { return nil } +func (m *mockConnectServer) SetTrailer(metadata.MD) {} +func (m *mockConnectServer) Context() context.Context { + if m.ctx != nil { + return m.ctx + } + return context.Background() +} +func (m *mockConnectServer) SendMsg(interface{}) error { return nil } +func (m *mockConnectServer) RecvMsg(interface{}) error { return nil } + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +func TestSendAndWait_HostNotConnected(t *testing.T) { + reg := registry.New() + handler := NewStreamHandler(reg, &mockStore{}, nil, 90*time.Second) + + msg := &fluidv1.ControlMessage{ + RequestId: "req-1", + } + + _, err := handler.SendAndWait(context.Background(), "nonexistent-host", msg, 5*time.Second) + if err == nil { + t.Fatal("SendAndWait: expected error for disconnected host") + } + if !strings.Contains(err.Error(), "not connected") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "not connected") + } +} + +func TestSendAndWait_MissingRequestID(t *testing.T) { + reg := registry.New() + handler := NewStreamHandler(reg, &mockStore{}, nil, 90*time.Second) + + // Store a mock stream so the host is "connected". + mock := &mockConnectServer{} + handler.streams.Store("host-1", fluidv1.HostService_ConnectServer(mock)) + + msg := &fluidv1.ControlMessage{ + RequestId: "", // Empty request ID. + } + + _, err := handler.SendAndWait(context.Background(), "host-1", msg, 5*time.Second) + if err == nil { + t.Fatal("SendAndWait: expected error for empty request_id") + } + if !strings.Contains(err.Error(), "request_id") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "request_id") + } +} + +func TestSendAndWait_Success(t *testing.T) { + reg := registry.New() + handler := NewStreamHandler(reg, &mockStore{}, nil, 90*time.Second) + + mock := &mockConnectServer{} + handler.streams.Store("host-1", fluidv1.HostService_ConnectServer(mock)) + + msg := &fluidv1.ControlMessage{ + RequestId: "req-123", + Payload: &fluidv1.ControlMessage_DestroySandbox{ + DestroySandbox: &fluidv1.DestroySandboxCommand{ + SandboxId: "sbx-1", + }, + }, + } + + // Simulate the host responding asynchronously. + go func() { + // Wait briefly for SendAndWait to register the pending request. + time.Sleep(50 * time.Millisecond) + + response := &fluidv1.HostMessage{ + RequestId: "req-123", + Payload: &fluidv1.HostMessage_SandboxDestroyed{ + SandboxDestroyed: &fluidv1.SandboxDestroyed{ + SandboxId: "sbx-1", + }, + }, + } + + // Deliver response via the pendingRequests map. + if ch, ok := handler.pendingRequests.Load("req-123"); ok { + respCh := ch.(chan *fluidv1.HostMessage) + respCh <- response + } + }() + + resp, err := handler.SendAndWait(context.Background(), "host-1", msg, 5*time.Second) + if err != nil { + t.Fatalf("SendAndWait: unexpected error: %v", err) + } + + destroyed := resp.GetSandboxDestroyed() + if destroyed == nil { + t.Fatal("response: expected SandboxDestroyed payload") + } + if destroyed.GetSandboxId() != "sbx-1" { + t.Errorf("SandboxId = %q, want %q", destroyed.GetSandboxId(), "sbx-1") + } + + // Verify the message was actually sent to the mock stream. + if len(mock.sentMessages) != 1 { + t.Fatalf("sentMessages: got %d, want 1", len(mock.sentMessages)) + } + if mock.sentMessages[0].GetRequestId() != "req-123" { + t.Errorf("sent RequestId = %q, want %q", mock.sentMessages[0].GetRequestId(), "req-123") + } +} + +func TestSendAndWait_Timeout(t *testing.T) { + reg := registry.New() + handler := NewStreamHandler(reg, &mockStore{}, nil, 90*time.Second) + + mock := &mockConnectServer{} + handler.streams.Store("host-1", fluidv1.HostService_ConnectServer(mock)) + + msg := &fluidv1.ControlMessage{ + RequestId: "req-timeout", + } + + // Use a very short timeout; no response will come. + _, err := handler.SendAndWait(context.Background(), "host-1", msg, 100*time.Millisecond) + if err == nil { + t.Fatal("SendAndWait: expected timeout error") + } + if !strings.Contains(err.Error(), "timeout") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "timeout") + } +} + +func TestSendAndWait_SendError(t *testing.T) { + reg := registry.New() + handler := NewStreamHandler(reg, &mockStore{}, nil, 90*time.Second) + + mock := &mockConnectServer{ + sendErr: fmt.Errorf("stream broken"), + } + handler.streams.Store("host-1", fluidv1.HostService_ConnectServer(mock)) + + msg := &fluidv1.ControlMessage{ + RequestId: "req-fail", + } + + _, err := handler.SendAndWait(context.Background(), "host-1", msg, 5*time.Second) + if err == nil { + t.Fatal("SendAndWait: expected error when stream Send fails") + } + if !strings.Contains(err.Error(), "send to host") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "send to host") + } +} + +func TestSendAndWait_CleansPendingOnTimeout(t *testing.T) { + reg := registry.New() + handler := NewStreamHandler(reg, &mockStore{}, nil, 90*time.Second) + + mock := &mockConnectServer{} + handler.streams.Store("host-1", fluidv1.HostService_ConnectServer(mock)) + + msg := &fluidv1.ControlMessage{ + RequestId: "req-cleanup", + } + + _, _ = handler.SendAndWait(context.Background(), "host-1", msg, 50*time.Millisecond) + + // After timeout, the pending request should be cleaned up via defer. + _, exists := handler.pendingRequests.Load("req-cleanup") + if exists { + t.Error("pending request should be cleaned up after timeout") + } +} + +// --------------------------------------------------------------------------- +// connectTestStore - mockStore override for Connect tests. +// Embeds mockStore for all methods, overrides GetHost/CreateHost/UpdateHost/ +// UpdateHostHeartbeat so persistHostRegistration does not nil-deref. +// --------------------------------------------------------------------------- + +type connectTestStore struct { + mockStore + + hostCreated bool + hostUpdated bool + heartbeatCalled bool + heartbeatHostID string + heartbeatCPUs int32 + heartbeatMemoryMB int64 + heartbeatDiskMB int64 + getHostReturn *store.Host + getHostErr error +} + +func (s *connectTestStore) GetHost(_ context.Context, _ string) (*store.Host, error) { + if s.getHostErr != nil { + return nil, s.getHostErr + } + if s.getHostReturn != nil { + return s.getHostReturn, nil + } + // Default: return a valid host so the update path works. + return &store.Host{}, nil +} + +func (s *connectTestStore) CreateHost(_ context.Context, _ *store.Host) error { + s.hostCreated = true + return nil +} + +func (s *connectTestStore) UpdateHost(_ context.Context, _ *store.Host) error { + s.hostUpdated = true + return nil +} + +func (s *connectTestStore) UpdateHostHeartbeat(_ context.Context, hostID string, cpus int32, memMB int64, diskMB int64) error { + s.heartbeatCalled = true + s.heartbeatHostID = hostID + s.heartbeatCPUs = cpus + s.heartbeatMemoryMB = memMB + s.heartbeatDiskMB = diskMB + return nil +} + +// --------------------------------------------------------------------------- +// mockConnectServerQueued - mock stream that returns queued messages from Recv. +// After all messages are consumed, Recv returns io.EOF. +// --------------------------------------------------------------------------- + +type mockConnectServerQueued struct { + msgs []*fluidv1.HostMessage + idx int + sent []*fluidv1.ControlMessage + sendErr error + ctx context.Context +} + +func (m *mockConnectServerQueued) Recv() (*fluidv1.HostMessage, error) { + if m.idx >= len(m.msgs) { + return nil, io.EOF + } + msg := m.msgs[m.idx] + m.idx++ + return msg, nil +} + +func (m *mockConnectServerQueued) Send(msg *fluidv1.ControlMessage) error { + if m.sendErr != nil { + return m.sendErr + } + m.sent = append(m.sent, msg) + return nil +} + +func (m *mockConnectServerQueued) SetHeader(metadata.MD) error { return nil } +func (m *mockConnectServerQueued) SendHeader(metadata.MD) error { return nil } +func (m *mockConnectServerQueued) SetTrailer(metadata.MD) {} +func (m *mockConnectServerQueued) Context() context.Context { + if m.ctx != nil { + return m.ctx + } + return context.Background() +} +func (m *mockConnectServerQueued) SendMsg(interface{}) error { return nil } +func (m *mockConnectServerQueued) RecvMsg(interface{}) error { return nil } + +// --------------------------------------------------------------------------- +// Connect() tests +// --------------------------------------------------------------------------- + +func TestConnect_RecvError(t *testing.T) { + reg := registry.New() + st := &connectTestStore{} + handler := NewStreamHandler(reg, st, nil, 90*time.Second) + + mock := &mockConnectServerQueued{ + msgs: nil, // empty - Recv immediately returns EOF + } + + err := handler.Connect(mock) + if err == nil { + t.Fatal("Connect: expected error when first Recv fails") + } + if !strings.Contains(err.Error(), "recv registration") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "recv registration") + } +} + +func TestConnect_FirstMessageNotRegistration(t *testing.T) { + reg := registry.New() + st := &connectTestStore{} + handler := NewStreamHandler(reg, st, nil, 90*time.Second) + + mock := &mockConnectServerQueued{ + msgs: []*fluidv1.HostMessage{ + {Payload: &fluidv1.HostMessage_Heartbeat{ + Heartbeat: &fluidv1.Heartbeat{AvailableCpus: 4}, + }}, + }, + } + + err := handler.Connect(mock) + if err == nil { + t.Fatal("Connect: expected error when first message is not registration") + } + if !strings.Contains(err.Error(), "first message must be HostRegistration") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "first message must be HostRegistration") + } +} + +func TestConnect_SendAckError(t *testing.T) { + reg := registry.New() + st := &connectTestStore{} + handler := NewStreamHandler(reg, st, nil, 90*time.Second) + + mock := &mockConnectServerQueued{ + msgs: []*fluidv1.HostMessage{ + {Payload: &fluidv1.HostMessage_Registration{ + Registration: &fluidv1.HostRegistration{ + HostId: "host-1", + Hostname: "test-host", + }, + }}, + }, + sendErr: fmt.Errorf("broken pipe"), + ctx: auth.WithTokenID(auth.WithOrgID(context.Background(), "org-1"), "host-1"), + } + + err := handler.Connect(mock) + if err == nil { + t.Fatal("Connect: expected error when Send(ack) fails") + } + if !strings.Contains(err.Error(), "send registration ack") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "send registration ack") + } +} + +func TestConnect_SuccessfulRegistrationAndEOF(t *testing.T) { + reg := registry.New() + st := &connectTestStore{} + handler := NewStreamHandler(reg, st, nil, 90*time.Second) + + mock := &mockConnectServerQueued{ + msgs: []*fluidv1.HostMessage{ + {Payload: &fluidv1.HostMessage_Registration{ + Registration: &fluidv1.HostRegistration{ + HostId: "host-1", + Hostname: "test-host", + Version: "1.0.0", + }, + }}, + // No more messages - next Recv returns EOF. + }, + ctx: auth.WithTokenID(auth.WithOrgID(context.Background(), "org-1"), "host-1"), + } + + err := handler.Connect(mock) + if err != nil { + t.Fatalf("Connect: unexpected error: %v", err) + } + + // Verify RegistrationAck was sent. + if len(mock.sent) != 1 { + t.Fatalf("sent messages: got %d, want 1", len(mock.sent)) + } + ack := mock.sent[0].GetRegistrationAck() + if ack == nil { + t.Fatal("expected RegistrationAck in first sent message") + } + if !ack.GetAccepted() { + t.Error("RegistrationAck.Accepted = false, want true") + } + if ack.GetAssignedHostId() != "host-1" { + t.Errorf("AssignedHostId = %q, want %q", ack.GetAssignedHostId(), "host-1") + } + + // Verify host was persisted (update path since GetHost returns &store.Host{}). + if !st.hostUpdated { + t.Error("expected store.UpdateHost to be called") + } + + // After EOF, the defer should have unregistered the host from the registry. + if _, ok := reg.GetHost("host-1"); ok { + t.Error("host should be unregistered from registry after disconnect") + } + + // Stream should also be cleaned up. + if _, ok := handler.streams.Load("host-1"); ok { + t.Error("stream should be deleted after disconnect") + } +} + +func TestConnect_PersistHostCreatesWhenNotFound(t *testing.T) { + reg := registry.New() + st := &connectTestStore{ + getHostErr: store.ErrNotFound, // GetHost returns error -> create path + } + handler := NewStreamHandler(reg, st, nil, 90*time.Second) + + mock := &mockConnectServerQueued{ + msgs: []*fluidv1.HostMessage{ + {Payload: &fluidv1.HostMessage_Registration{ + Registration: &fluidv1.HostRegistration{ + HostId: "host-new", + Hostname: "new-host", + }, + }}, + }, + ctx: auth.WithTokenID(auth.WithOrgID(context.Background(), "org-1"), "host-new"), + } + + err := handler.Connect(mock) + if err != nil { + t.Fatalf("Connect: unexpected error: %v", err) + } + + if !st.hostCreated { + t.Error("expected store.CreateHost to be called when GetHost returns error") + } + if st.hostUpdated { + t.Error("store.UpdateHost should not be called when GetHost returns error") + } +} + +func TestConnect_HeartbeatDispatch(t *testing.T) { + reg := registry.New() + st := &connectTestStore{} + handler := NewStreamHandler(reg, st, nil, 90*time.Second) + + mock := &mockConnectServerQueued{ + msgs: []*fluidv1.HostMessage{ + {Payload: &fluidv1.HostMessage_Registration{ + Registration: &fluidv1.HostRegistration{ + HostId: "host-hb", + Hostname: "heartbeat-host", + }, + }}, + {Payload: &fluidv1.HostMessage_Heartbeat{ + Heartbeat: &fluidv1.Heartbeat{ + AvailableCpus: 8, + AvailableMemoryMb: 16384, + AvailableDiskMb: 256000, + }, + }}, + // EOF after heartbeat. + }, + ctx: auth.WithTokenID(auth.WithOrgID(context.Background(), "org-1"), "host-hb"), + } + + err := handler.Connect(mock) + if err != nil { + t.Fatalf("Connect: unexpected error: %v", err) + } + + if !st.heartbeatCalled { + t.Fatal("expected UpdateHostHeartbeat to be called") + } + if st.heartbeatHostID != "host-hb" { + t.Errorf("heartbeat hostID = %q, want %q", st.heartbeatHostID, "host-hb") + } + if st.heartbeatCPUs != 8 { + t.Errorf("heartbeat CPUs = %d, want 8", st.heartbeatCPUs) + } + if st.heartbeatMemoryMB != 16384 { + t.Errorf("heartbeat MemoryMB = %d, want 16384", st.heartbeatMemoryMB) + } + if st.heartbeatDiskMB != 256000 { + t.Errorf("heartbeat DiskMB = %d, want 256000", st.heartbeatDiskMB) + } +} + +func TestConnect_ResponseDispatch(t *testing.T) { + reg := registry.New() + st := &connectTestStore{} + handler := NewStreamHandler(reg, st, nil, 90*time.Second) + + // Pre-populate a pending request channel. + respCh := make(chan *fluidv1.HostMessage, 1) + handler.pendingRequests.Store("req-test", respCh) + + mock := &mockConnectServerQueued{ + msgs: []*fluidv1.HostMessage{ + {Payload: &fluidv1.HostMessage_Registration{ + Registration: &fluidv1.HostRegistration{ + HostId: "host-resp", + Hostname: "resp-host", + }, + }}, + { + RequestId: "req-test", + Payload: &fluidv1.HostMessage_SandboxDestroyed{ + SandboxDestroyed: &fluidv1.SandboxDestroyed{ + SandboxId: "sbx-1", + }, + }, + }, + // EOF after response. + }, + ctx: auth.WithTokenID(auth.WithOrgID(context.Background(), "org-1"), "host-resp"), + } + + err := handler.Connect(mock) + if err != nil { + t.Fatalf("Connect: unexpected error: %v", err) + } + + // Verify the response was delivered to the channel. + select { + case resp := <-respCh: + destroyed := resp.GetSandboxDestroyed() + if destroyed == nil { + t.Fatal("expected SandboxDestroyed payload in response") + } + if destroyed.GetSandboxId() != "sbx-1" { + t.Errorf("SandboxId = %q, want %q", destroyed.GetSandboxId(), "sbx-1") + } + default: + t.Fatal("expected response to be delivered to pending request channel") + } +} + +func TestConnect_ErrorReportDoesNotPanic(t *testing.T) { + reg := registry.New() + st := &connectTestStore{} + handler := NewStreamHandler(reg, st, nil, 90*time.Second) + + mock := &mockConnectServerQueued{ + msgs: []*fluidv1.HostMessage{ + {Payload: &fluidv1.HostMessage_Registration{ + Registration: &fluidv1.HostRegistration{ + HostId: "host-err", + Hostname: "err-host", + }, + }}, + {Payload: &fluidv1.HostMessage_ErrorReport{ + ErrorReport: &fluidv1.ErrorReport{ + Error: "disk full", + SandboxId: "sbx-42", + Context: "creating overlay", + }, + }}, + }, + ctx: auth.WithTokenID(auth.WithOrgID(context.Background(), "org-1"), "host-err"), + } + + // Should not panic and should return nil on EOF. + err := handler.Connect(mock) + if err != nil { + t.Fatalf("Connect: unexpected error: %v", err) + } +} + +func TestConnect_ResourceReportUpdatesHeartbeat(t *testing.T) { + reg := registry.New() + st := &connectTestStore{} + handler := NewStreamHandler(reg, st, nil, 90*time.Second) + + mock := &mockConnectServerQueued{ + msgs: []*fluidv1.HostMessage{ + {Payload: &fluidv1.HostMessage_Registration{ + Registration: &fluidv1.HostRegistration{ + HostId: "host-rr", + Hostname: "resource-host", + }, + }}, + {Payload: &fluidv1.HostMessage_ResourceReport{ + ResourceReport: &fluidv1.ResourceReport{ + AvailableCpus: 16, + AvailableMemoryMb: 32768, + AvailableDiskMb: 512000, + }, + }}, + }, + ctx: auth.WithTokenID(auth.WithOrgID(context.Background(), "org-1"), "host-rr"), + } + + err := handler.Connect(mock) + if err != nil { + t.Fatalf("Connect: unexpected error: %v", err) + } + + // ResourceReport should update the heartbeat in the registry, but since + // the host is unregistered on disconnect (defer), we cannot check the + // registry here. The test verifies no panic or error occurs. +} + +func TestConnect_MessageWithoutRequestIDDropped(t *testing.T) { + reg := registry.New() + st := &connectTestStore{} + handler := NewStreamHandler(reg, st, nil, 90*time.Second) + + mock := &mockConnectServerQueued{ + msgs: []*fluidv1.HostMessage{ + {Payload: &fluidv1.HostMessage_Registration{ + Registration: &fluidv1.HostRegistration{ + HostId: "host-drop", + Hostname: "drop-host", + }, + }}, + // A response-type message with no request_id should be dropped. + { + RequestId: "", + Payload: &fluidv1.HostMessage_SandboxCreated{ + SandboxCreated: &fluidv1.SandboxCreated{ + SandboxId: "sbx-orphan", + }, + }, + }, + }, + ctx: auth.WithTokenID(auth.WithOrgID(context.Background(), "org-1"), "host-drop"), + } + + // Should not panic; the message is silently dropped. + err := handler.Connect(mock) + if err != nil { + t.Fatalf("Connect: unexpected error: %v", err) + } +} + +func TestConnect_UnmatchedRequestIDDropped(t *testing.T) { + reg := registry.New() + st := &connectTestStore{} + handler := NewStreamHandler(reg, st, nil, 90*time.Second) + + // No pending requests pre-populated. + + mock := &mockConnectServerQueued{ + msgs: []*fluidv1.HostMessage{ + {Payload: &fluidv1.HostMessage_Registration{ + Registration: &fluidv1.HostRegistration{ + HostId: "host-unmatched", + Hostname: "unmatched-host", + }, + }}, + // A response with a request_id that has no pending listener. + { + RequestId: "req-nobody", + Payload: &fluidv1.HostMessage_SandboxDestroyed{ + SandboxDestroyed: &fluidv1.SandboxDestroyed{ + SandboxId: "sbx-gone", + }, + }, + }, + }, + ctx: auth.WithTokenID(auth.WithOrgID(context.Background(), "org-1"), "host-unmatched"), + } + + // Should not panic; the response is logged as orphan and dropped. + err := handler.Connect(mock) + if err != nil { + t.Fatalf("Connect: unexpected error: %v", err) + } +} + +func TestConnect_EmptyTokenID_Rejected(t *testing.T) { + reg := registry.New() + st := &connectTestStore{} + handler := NewStreamHandler(reg, st, nil, 90*time.Second) + + mock := &mockConnectServerQueued{ + msgs: []*fluidv1.HostMessage{ + {Payload: &fluidv1.HostMessage_Registration{ + Registration: &fluidv1.HostRegistration{ + HostId: "host-1", + Hostname: "test-host", + }, + }}, + }, + // No WithTokenID - context has empty token ID. + ctx: context.Background(), + } + + err := handler.Connect(mock) + if err == nil { + t.Fatal("Connect: expected error when tokenID is empty") + } + if !strings.Contains(err.Error(), "missing token identity") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "missing token identity") + } +} diff --git a/api/internal/id/id.go b/api/internal/id/id.go new file mode 100644 index 00000000..cf93713d --- /dev/null +++ b/api/internal/id/id.go @@ -0,0 +1,15 @@ +package id + +import ( + "crypto/rand" + "encoding/hex" + "fmt" +) + +func Generate(prefix string) (string, error) { + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("crypto/rand failed: %w", err) + } + return prefix + hex.EncodeToString(b), nil +} diff --git a/api/internal/id/id_test.go b/api/internal/id/id_test.go new file mode 100644 index 00000000..53db3fe5 --- /dev/null +++ b/api/internal/id/id_test.go @@ -0,0 +1,48 @@ +package id + +import ( + "encoding/hex" + "strings" + "testing" +) + +func TestGenerate_PrefixAndLength(t *testing.T) { + prefix := "TST-" + got, err := Generate(prefix) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !strings.HasPrefix(got, prefix) { + t.Errorf("expected prefix %q, got %q", prefix, got) + } + // prefix + 16 hex chars + if len(got) != len(prefix)+16 { + t.Errorf("expected length %d, got %d (%q)", len(prefix)+16, len(got), got) + } +} + +func TestGenerate_ValidHex(t *testing.T) { + prefix := "X-" + got, err := Generate(prefix) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + hexPart := got[len(prefix):] + if _, err := hex.DecodeString(hexPart); err != nil { + t.Errorf("hex part %q is not valid hex: %v", hexPart, err) + } +} + +func TestGenerate_NoCollisions(t *testing.T) { + seen := make(map[string]struct{}, 10000) + for i := 0; i < 10000; i++ { + id, err := Generate("T-") + if err != nil { + t.Fatalf("unexpected error at iteration %d: %v", i, err) + } + if _, ok := seen[id]; ok { + t.Fatalf("collision at iteration %d: %s", i, id) + } + seen[id] = struct{}{} + } +} diff --git a/fluid/internal/json/decodejson.go b/api/internal/json/decodejson.go old mode 100755 new mode 100644 similarity index 86% rename from fluid/internal/json/decodejson.go rename to api/internal/json/decodejson.go index 95118d82..34fa86b7 --- a/fluid/internal/json/decodejson.go +++ b/api/internal/json/decodejson.go @@ -8,7 +8,7 @@ import ( "net/http" ) -// decodeJSON is a strict JSON decoder with basic size limit and context awareness. +// DecodeJSON is a strict JSON decoder with basic size limit and context awareness. func DecodeJSON(ctx context.Context, r *http.Request, v any) error { const maxBody = int64(1 << 20) // 1 MiB r.Body = http.MaxBytesReader(nil, r.Body, maxBody) @@ -18,7 +18,6 @@ func DecodeJSON(ctx context.Context, r *http.Request, v any) error { if err := dec.Decode(v); err != nil { return fmt.Errorf("invalid json: %w", err) } - // Disallow trailing data if dec.More() { var extra any if err := dec.Decode(&extra); err == nil { diff --git a/api/internal/json/decodejson_test.go b/api/internal/json/decodejson_test.go new file mode 100644 index 00000000..350b886b --- /dev/null +++ b/api/internal/json/decodejson_test.go @@ -0,0 +1,111 @@ +package json + +import ( + "bytes" + "context" + "net/http/httptest" + "strings" + "testing" +) + +func TestDecodeJSON_Success(t *testing.T) { + body := `{"name":"alice","age":30}` + r := httptest.NewRequest("POST", "/", bytes.NewBufferString(body)) + r.Header.Set("Content-Type", "application/json") + + var dst struct { + Name string `json:"name"` + Age int `json:"age"` + } + + err := DecodeJSON(context.Background(), r, &dst) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if dst.Name != "alice" { + t.Errorf("expected name alice, got %s", dst.Name) + } + if dst.Age != 30 { + t.Errorf("expected age 30, got %d", dst.Age) + } +} + +func TestDecodeJSON_InvalidJSON(t *testing.T) { + body := `{not valid json}` + r := httptest.NewRequest("POST", "/", bytes.NewBufferString(body)) + + var dst struct { + Name string `json:"name"` + } + + err := DecodeJSON(context.Background(), r, &dst) + if err == nil { + t.Fatal("expected error for invalid JSON, got nil") + } + if !strings.Contains(err.Error(), "invalid json") { + t.Errorf("expected error to contain 'invalid json', got %v", err) + } +} + +func TestDecodeJSON_UnknownFields(t *testing.T) { + body := `{"name":"alice","unknown_field":"value"}` + r := httptest.NewRequest("POST", "/", bytes.NewBufferString(body)) + + var dst struct { + Name string `json:"name"` + } + + err := DecodeJSON(context.Background(), r, &dst) + if err == nil { + t.Fatal("expected error for unknown fields, got nil") + } + if !strings.Contains(err.Error(), "invalid json") { + t.Errorf("expected error to contain 'invalid json', got %v", err) + } +} + +func TestDecodeJSON_BodyTooLarge(t *testing.T) { + // 1 MiB = 1048576 bytes; create a body slightly larger + large := strings.Repeat("x", 1<<20+100) + body := `{"name":"` + large + `"}` + r := httptest.NewRequest("POST", "/", bytes.NewBufferString(body)) + + var dst struct { + Name string `json:"name"` + } + + err := DecodeJSON(context.Background(), r, &dst) + if err == nil { + t.Fatal("expected error for body too large, got nil") + } +} + +func TestDecodeJSON_TrailingData(t *testing.T) { + body := `{"name":"alice"}{"name":"bob"}` + r := httptest.NewRequest("POST", "/", bytes.NewBufferString(body)) + + var dst struct { + Name string `json:"name"` + } + + err := DecodeJSON(context.Background(), r, &dst) + if err == nil { + t.Fatal("expected error for trailing data, got nil") + } + if !strings.Contains(err.Error(), "trailing data") { + t.Errorf("expected error to contain 'trailing data', got %v", err) + } +} + +func TestDecodeJSON_EmptyBody(t *testing.T) { + r := httptest.NewRequest("POST", "/", bytes.NewBufferString("")) + + var dst struct { + Name string `json:"name"` + } + + err := DecodeJSON(context.Background(), r, &dst) + if err == nil { + t.Fatal("expected error for empty body, got nil") + } +} diff --git a/fluid-remote/internal/json/respondjson.go b/api/internal/json/respondjson.go old mode 100755 new mode 100644 similarity index 100% rename from fluid-remote/internal/json/respondjson.go rename to api/internal/json/respondjson.go diff --git a/api/internal/json/respondjson_test.go b/api/internal/json/respondjson_test.go new file mode 100644 index 00000000..567ca0ab --- /dev/null +++ b/api/internal/json/respondjson_test.go @@ -0,0 +1,98 @@ +package json + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestRespondJSON_ContentType(t *testing.T) { + w := httptest.NewRecorder() + err := RespondJSON(w, http.StatusOK, map[string]string{"key": "value"}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + ct := w.Header().Get("Content-Type") + if ct != "application/json; charset=utf-8" { + t.Errorf("expected Content-Type 'application/json; charset=utf-8', got %q", ct) + } +} + +func TestRespondJSON_StatusCode(t *testing.T) { + w := httptest.NewRecorder() + err := RespondJSON(w, http.StatusCreated, map[string]string{"id": "123"}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if w.Code != http.StatusCreated { + t.Errorf("expected status 201, got %d", w.Code) + } +} + +func TestRespondJSON_ValidBody(t *testing.T) { + data := map[string]any{ + "name": "test", + "count": float64(42), + } + + w := httptest.NewRecorder() + err := RespondJSON(w, http.StatusOK, data) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + var result map[string]any + if err := json.Unmarshal(w.Body.Bytes(), &result); err != nil { + t.Fatalf("response body is not valid JSON: %v", err) + } + if result["name"] != "test" { + t.Errorf("expected name 'test', got %v", result["name"]) + } + if result["count"] != float64(42) { + t.Errorf("expected count 42, got %v", result["count"]) + } +} + +func TestRespondJSON_HTMLCharsNotEscaped(t *testing.T) { + data := map[string]string{ + "html": "bold & \"quoted\"", + } + + w := httptest.NewRecorder() + err := RespondJSON(w, http.StatusOK, data) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + body := w.Body.String() + // With SetEscapeHTML(false), the raw chars should appear unescaped + if strings.Contains(body, `\u003c`) { + t.Errorf("expected HTML chars not to be escaped, but found \\u003c in body: %s", body) + } + if strings.Contains(body, `\u0026`) { + t.Errorf("expected HTML chars not to be escaped, but found \\u0026 in body: %s", body) + } + if !strings.Contains(body, "") { + t.Errorf("expected literal in body, got: %s", body) + } + if !strings.Contains(body, "&") { + t.Errorf("expected literal & in body, got: %s", body) + } +} + +func TestRespondJSON_XContentTypeOptions(t *testing.T) { + w := httptest.NewRecorder() + err := RespondJSON(w, http.StatusOK, "ok") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + xcto := w.Header().Get("X-Content-Type-Options") + if xcto != "nosniff" { + t.Errorf("expected X-Content-Type-Options 'nosniff', got %q", xcto) + } +} diff --git a/api/internal/orchestrator/orchestrator.go b/api/internal/orchestrator/orchestrator.go new file mode 100644 index 00000000..8f1ffe4d --- /dev/null +++ b/api/internal/orchestrator/orchestrator.go @@ -0,0 +1,845 @@ +// Package orchestrator implements sandbox lifecycle management. +// It coordinates between the host registry, the gRPC stream handler, +// and the persistent store to create, manage, and destroy sandboxes. +package orchestrator + +import ( + "context" + "fmt" + "log/slog" + "math/rand/v2" + "sync" + "time" + + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" + + "github.com/aspectrr/fluid.sh/api/internal/id" + "github.com/aspectrr/fluid.sh/api/internal/registry" + "github.com/aspectrr/fluid.sh/api/internal/store" + + "github.com/google/uuid" + "golang.org/x/sync/errgroup" +) + +const ( + timeoutCreateSandbox = 5 * time.Minute + timeoutDestroySandbox = 2 * time.Minute + timeoutStartStop = 2 * time.Minute + timeoutSnapshot = 5 * time.Minute + timeoutListSources = 30 * time.Second + timeoutValidateVM = 30 * time.Second + timeoutPrepareVM = 5 * time.Minute + timeoutDiscoverHosts = 2 * time.Minute + timeoutReadFile = 30 * time.Second + commandTimeoutBuffer = 30 * time.Second +) + +// HostSender abstracts the ability to send a ControlMessage to a specific host +// and wait for a correlated response. +type HostSender interface { + SendAndWait(ctx context.Context, hostID string, msg *fluidv1.ControlMessage, timeout time.Duration) (*fluidv1.HostMessage, error) +} + +// Orchestrator coordinates sandbox lifecycle operations across connected hosts. +type Orchestrator struct { + registry *registry.Registry + store store.Store + sender HostSender + logger *slog.Logger + defaultTTL time.Duration + heartbeatTimeout time.Duration +} + +// New creates an Orchestrator. +func New( + reg *registry.Registry, + st store.Store, + sender HostSender, + logger *slog.Logger, + defaultTTL time.Duration, + heartbeatTimeout time.Duration, +) *Orchestrator { + if logger == nil { + logger = slog.Default() + } + return &Orchestrator{ + registry: reg, + store: st, + sender: sender, + logger: logger.With("component", "orchestrator"), + defaultTTL: defaultTTL, + heartbeatTimeout: heartbeatTimeout, + } +} + +// --------------------------------------------------------------------------- +// Sandbox lifecycle +// --------------------------------------------------------------------------- + +// CreateSandbox selects a host, sends a CreateSandboxCommand over the gRPC +// stream, waits for the SandboxCreated response, and persists the sandbox. +func (o *Orchestrator) CreateSandbox(ctx context.Context, req CreateSandboxRequest) (*store.Sandbox, error) { + sandboxID, err := id.Generate("SBX-") + if err != nil { + return nil, fmt.Errorf("generate sandbox ID: %w", err) + } + + vcpus := int32(req.VCPUs) + if vcpus == 0 { + vcpus = 2 + } + memMB := int32(req.MemoryMB) + if memMB == 0 { + memMB = 2048 + } + + host, err := SelectHost(o.registry, req.SourceVM, req.OrgID, o.heartbeatTimeout, vcpus, memMB) + if err != nil { + // SourceVM is always set (validated in handler). Fall back to + // source-VM-aware placement when base image matching fails. + var fallbackErr error + host, fallbackErr = SelectHostForSourceVM(o.registry, req.SourceVM, req.OrgID, o.heartbeatTimeout, vcpus, memMB) + if fallbackErr != nil { + return nil, fmt.Errorf("select host: image match: %v; source VM fallback: %w", err, fallbackErr) + } + } + + ttlSeconds := int32(req.TTLSeconds) + if ttlSeconds == 0 && o.defaultTTL > 0 { + ttlSeconds = int32(o.defaultTTL.Seconds()) + } + + name := req.Name + if name == "" { + name = "sbx-" + sandboxID[4:12] + } + + // Map live flag to snapshot mode + var snapshotMode fluidv1.SnapshotMode + if req.Live { + snapshotMode = fluidv1.SnapshotMode_SNAPSHOT_MODE_FRESH + } + + // Resolve source host connection if source_host_id is provided + var sourceHostConn *fluidv1.SourceHostConnection + if req.SourceHostID != "" { + sh, err := o.store.GetSourceHost(ctx, req.SourceHostID) + if err != nil { + return nil, fmt.Errorf("get source host: %w", err) + } + sourceHostConn = &fluidv1.SourceHostConnection{ + Type: sh.Type, + SshHost: sh.Hostname, + SshPort: int32(sh.SSHPort), + SshUser: sh.SSHUser, + SshIdentityFile: sh.SSHIdentityFile, + ProxmoxHost: sh.ProxmoxHost, + ProxmoxTokenId: sh.ProxmoxTokenID, + ProxmoxSecret: sh.ProxmoxSecret, + ProxmoxNode: sh.ProxmoxNode, + ProxmoxVerifySsl: sh.ProxmoxVerifySSL, + } + } + + reqID := uuid.New().String() + cmd := &fluidv1.ControlMessage{ + RequestId: reqID, + Payload: &fluidv1.ControlMessage_CreateSandbox{ + CreateSandbox: &fluidv1.CreateSandboxCommand{ + SandboxId: sandboxID, + BaseImage: req.SourceVM, + Name: name, + Vcpus: vcpus, + MemoryMb: memMB, + TtlSeconds: ttlSeconds, + AgentId: req.AgentID, + Network: req.Network, + SourceVm: req.SourceVM, + SnapshotMode: snapshotMode, + SourceHostConnection: sourceHostConn, + }, + }, + } + + o.logger.Info("creating sandbox", + "sandbox_id", sandboxID, + "host_id", host.HostID, + "org_id", req.OrgID, + "source_vm", req.SourceVM, + "live", req.Live, + ) + + resp, err := o.sender.SendAndWait(ctx, host.HostID, cmd, timeoutCreateSandbox) + if err != nil { + return nil, fmt.Errorf("create sandbox on host %s: %w", host.HostID, err) + } + + created := resp.GetSandboxCreated() + if created == nil { + if errReport := resp.GetErrorReport(); errReport != nil { + return nil, fmt.Errorf("host error: %s", errReport.GetError()) + } + return nil, fmt.Errorf("unexpected response type from host") + } + + sandbox := &store.Sandbox{ + ID: sandboxID, + OrgID: req.OrgID, + HostID: host.HostID, + Name: created.GetName(), + AgentID: req.AgentID, + BaseImage: req.SourceVM, + Bridge: created.GetBridge(), + MACAddress: created.GetMacAddress(), + IPAddress: created.GetIpAddress(), + State: store.SandboxState(created.GetState()), + VCPUs: vcpus, + MemoryMB: memMB, + TTLSeconds: ttlSeconds, + SourceVM: req.SourceVM, + } + + if err := o.store.CreateSandbox(ctx, sandbox); err != nil { + // Compensating action: destroy the VM on the host to avoid orphan. + // Uses context.Background() so this runs reliably even if the + // caller's context is cancelled. + o.logger.Warn("DB persist failed, issuing compensating destroy", + "sandbox_id", sandboxID, "host_id", host.HostID, "error", err) + compReqID := uuid.New().String() + compCmd := &fluidv1.ControlMessage{ + RequestId: compReqID, + Payload: &fluidv1.ControlMessage_DestroySandbox{ + DestroySandbox: &fluidv1.DestroySandboxCommand{ + SandboxId: sandboxID, + }, + }, + } + if _, compErr := o.sender.SendAndWait(context.Background(), host.HostID, compCmd, timeoutDestroySandbox); compErr != nil { + o.logger.Error("compensating destroy failed - orphaned VM on host", + "sandbox_id", sandboxID, "host_id", host.HostID, "error", compErr) + } + return nil, fmt.Errorf("persist sandbox: %w", err) + } + + o.logger.Info("sandbox created", + "sandbox_id", sandboxID, + "host_id", host.HostID, + "ip_address", created.GetIpAddress(), + ) + + return sandbox, nil +} + +// GetSandbox retrieves a sandbox by ID, scoped to the given org. +func (o *Orchestrator) GetSandbox(ctx context.Context, orgID, id string) (*store.Sandbox, error) { + return o.store.GetSandboxByOrg(ctx, orgID, id) +} + +// ListSandboxesByOrg returns all non-deleted sandboxes for an org. +func (o *Orchestrator) ListSandboxesByOrg(ctx context.Context, orgID string) ([]*store.Sandbox, error) { + sandboxes, err := o.store.ListSandboxesByOrg(ctx, orgID) + if err != nil { + return nil, err + } + result := make([]*store.Sandbox, len(sandboxes)) + for i := range sandboxes { + result[i] = &sandboxes[i] + } + return result, nil +} + +// DestroySandbox sends a destroy command to the host and marks the sandbox +// as destroyed in the store. The sandbox is looked up scoped to orgID for +// defense-in-depth authorization. +func (o *Orchestrator) DestroySandbox(ctx context.Context, orgID, sandboxID string) error { + sandbox, err := o.store.GetSandboxByOrg(ctx, orgID, sandboxID) + if err != nil { + return fmt.Errorf("get sandbox: %w", err) + } + + reqID := uuid.New().String() + cmd := &fluidv1.ControlMessage{ + RequestId: reqID, + Payload: &fluidv1.ControlMessage_DestroySandbox{ + DestroySandbox: &fluidv1.DestroySandboxCommand{ + SandboxId: sandboxID, + }, + }, + } + + resp, err := o.sender.SendAndWait(ctx, sandbox.HostID, cmd, timeoutDestroySandbox) + if err != nil { + return fmt.Errorf("destroy sandbox on host %s: %w", sandbox.HostID, err) + } + + if destroyed := resp.GetSandboxDestroyed(); destroyed == nil { + if errReport := resp.GetErrorReport(); errReport != nil { + return fmt.Errorf("host error: %s", errReport.GetError()) + } + return fmt.Errorf("unexpected response type from host") + } + + if err := o.store.DeleteSandbox(ctx, sandboxID); err != nil { + return fmt.Errorf("delete sandbox from store: %w", err) + } + + o.logger.Info("sandbox destroyed", "sandbox_id", sandboxID) + return nil +} + +// RunCommand sends a command to execute in a sandbox and persists the result. +func (o *Orchestrator) RunCommand(ctx context.Context, orgID, sandboxID, command string, timeoutSec int) (*store.Command, error) { + sandbox, err := o.store.GetSandboxByOrg(ctx, orgID, sandboxID) + if err != nil { + return nil, fmt.Errorf("get sandbox: %w", err) + } + + if timeoutSec == 0 { + timeoutSec = 300 + } + + reqID := uuid.New().String() + cmd := &fluidv1.ControlMessage{ + RequestId: reqID, + Payload: &fluidv1.ControlMessage_RunCommand{ + RunCommand: &fluidv1.RunCommandCommand{ + SandboxId: sandboxID, + Command: command, + TimeoutSeconds: int32(timeoutSec), + }, + }, + } + + startedAt := time.Now() + + resp, err := o.sender.SendAndWait(ctx, sandbox.HostID, cmd, time.Duration(timeoutSec)*time.Second+commandTimeoutBuffer) + if err != nil { + return nil, fmt.Errorf("run command on host %s: %w", sandbox.HostID, err) + } + + result := resp.GetCommandResult() + if result == nil { + if errReport := resp.GetErrorReport(); errReport != nil { + return nil, fmt.Errorf("host error: %s", errReport.GetError()) + } + return nil, fmt.Errorf("unexpected response type from host") + } + + cmdRecord := &store.Command{ + ID: uuid.New().String(), + SandboxID: sandboxID, + Command: command, + Stdout: result.GetStdout(), + Stderr: result.GetStderr(), + ExitCode: result.GetExitCode(), + DurationMS: result.GetDurationMs(), + StartedAt: startedAt, + EndedAt: time.Now(), + } + + if err := o.store.CreateCommand(ctx, cmdRecord); err != nil { + o.logger.Error("failed to persist command", "sandbox_id", sandboxID, "error", err) + } + + return cmdRecord, nil +} + +// StartSandbox sends a start command to the host. +func (o *Orchestrator) StartSandbox(ctx context.Context, orgID, sandboxID string) error { + sandbox, err := o.store.GetSandboxByOrg(ctx, orgID, sandboxID) + if err != nil { + return fmt.Errorf("get sandbox: %w", err) + } + + if sandbox.State == store.SandboxStateRunning { + return fmt.Errorf("sandbox is already running") + } + if sandbox.State == store.SandboxStateDestroyed { + return fmt.Errorf("sandbox is destroyed") + } + + reqID := uuid.New().String() + cmd := &fluidv1.ControlMessage{ + RequestId: reqID, + Payload: &fluidv1.ControlMessage_StartSandbox{ + StartSandbox: &fluidv1.StartSandboxCommand{ + SandboxId: sandboxID, + }, + }, + } + + resp, err := o.sender.SendAndWait(ctx, sandbox.HostID, cmd, timeoutStartStop) + if err != nil { + return fmt.Errorf("start sandbox on host %s: %w", sandbox.HostID, err) + } + + started := resp.GetSandboxStarted() + if started == nil { + if errReport := resp.GetErrorReport(); errReport != nil { + return fmt.Errorf("host error: %s", errReport.GetError()) + } + return fmt.Errorf("unexpected response type from host") + } + + sandbox.State = store.SandboxState(started.GetState()) + sandbox.IPAddress = started.GetIpAddress() + if err := o.store.UpdateSandbox(ctx, sandbox); err != nil { + return fmt.Errorf("host operation succeeded but failed to persist state: %w", err) + } + + return nil +} + +// StopSandbox sends a stop command to the host. +func (o *Orchestrator) StopSandbox(ctx context.Context, orgID, sandboxID string) error { + sandbox, err := o.store.GetSandboxByOrg(ctx, orgID, sandboxID) + if err != nil { + return fmt.Errorf("get sandbox: %w", err) + } + + if sandbox.State == store.SandboxStateStopped { + return fmt.Errorf("sandbox is already stopped") + } + if sandbox.State == store.SandboxStateDestroyed { + return fmt.Errorf("sandbox is destroyed") + } + + reqID := uuid.New().String() + cmd := &fluidv1.ControlMessage{ + RequestId: reqID, + Payload: &fluidv1.ControlMessage_StopSandbox{ + StopSandbox: &fluidv1.StopSandboxCommand{ + SandboxId: sandboxID, + }, + }, + } + + resp, err := o.sender.SendAndWait(ctx, sandbox.HostID, cmd, timeoutStartStop) + if err != nil { + return fmt.Errorf("stop sandbox on host %s: %w", sandbox.HostID, err) + } + + stopped := resp.GetSandboxStopped() + if stopped == nil { + if errReport := resp.GetErrorReport(); errReport != nil { + return fmt.Errorf("host error: %s", errReport.GetError()) + } + return fmt.Errorf("unexpected response type from host") + } + + sandbox.State = store.SandboxState(stopped.GetState()) + if err := o.store.UpdateSandbox(ctx, sandbox); err != nil { + return fmt.Errorf("host operation succeeded but failed to persist state: %w", err) + } + + return nil +} + +// CreateSnapshot sends a snapshot command to the host. +func (o *Orchestrator) CreateSnapshot(ctx context.Context, orgID, sandboxID, name string) (*SnapshotResponse, error) { + sandbox, err := o.store.GetSandboxByOrg(ctx, orgID, sandboxID) + if err != nil { + return nil, fmt.Errorf("get sandbox: %w", err) + } + + reqID := uuid.New().String() + cmd := &fluidv1.ControlMessage{ + RequestId: reqID, + Payload: &fluidv1.ControlMessage_CreateSnapshot{ + CreateSnapshot: &fluidv1.SnapshotCommand{ + SandboxId: sandboxID, + SnapshotName: name, + }, + }, + } + + resp, err := o.sender.SendAndWait(ctx, sandbox.HostID, cmd, timeoutSnapshot) + if err != nil { + return nil, fmt.Errorf("create snapshot on host %s: %w", sandbox.HostID, err) + } + + created := resp.GetSnapshotCreated() + if created == nil { + if errReport := resp.GetErrorReport(); errReport != nil { + return nil, fmt.Errorf("host error: %s", errReport.GetError()) + } + return nil, fmt.Errorf("unexpected response type from host") + } + + return &SnapshotResponse{ + SnapshotID: created.GetSnapshotId(), + SandboxID: sandboxID, + SnapshotName: created.GetSnapshotName(), + CreatedAt: time.Now(), + }, nil +} + +// ListCommands returns all commands for a given sandbox. +func (o *Orchestrator) ListCommands(ctx context.Context, orgID, sandboxID string) ([]*store.Command, error) { + // Verify sandbox belongs to org. + if _, err := o.store.GetSandboxByOrg(ctx, orgID, sandboxID); err != nil { + return nil, fmt.Errorf("get sandbox: %w", err) + } + + commands, err := o.store.ListSandboxCommands(ctx, sandboxID) + if err != nil { + return nil, err + } + result := make([]*store.Command, len(commands)) + for i := range commands { + result[i] = &commands[i] + } + return result, nil +} + +// --------------------------------------------------------------------------- +// Host operations +// --------------------------------------------------------------------------- + +// ListHosts returns info about all connected hosts. +func (o *Orchestrator) ListHosts(ctx context.Context, orgID string) ([]*HostInfo, error) { + connected := o.registry.ListConnectedByOrg(orgID) + result := make([]*HostInfo, 0, len(connected)) + + // Batch-fetch sandbox counts to avoid N+1 queries. + hostIDs := make([]string, len(connected)) + for i, h := range connected { + hostIDs[i] = h.HostID + } + counts, err := o.store.CountSandboxesByHostIDs(ctx, hostIDs) + if err != nil { + o.logger.Warn("failed to batch-count sandboxes by host", "error", err) + counts = map[string]int{} + } + + for _, h := range connected { + info := &HostInfo{ + HostID: h.HostID, + Hostname: h.Hostname, + Status: "ONLINE", + LastHeartbeat: h.LastHeartbeat.Format(time.RFC3339), + ActiveSandboxes: counts[h.HostID], + } + if h.Registration != nil { + info.AvailableCPUs = h.Registration.GetAvailableCpus() + info.AvailableMemMB = h.Registration.GetAvailableMemoryMb() + info.AvailableDiskMB = h.Registration.GetAvailableDiskMb() + info.BaseImages = h.Registration.GetBaseImages() + } + + result = append(result, info) + } + + return result, nil +} + +// GetHost returns info about a specific connected host. +func (o *Orchestrator) GetHost(ctx context.Context, id, orgID string) (*HostInfo, error) { + h, ok := o.registry.GetHost(id) + if !ok { + return nil, fmt.Errorf("host %s not found or not connected", id) + } + if h.OrgID != orgID { + return nil, fmt.Errorf("host %s not found or not connected", id) + } + + info := &HostInfo{ + HostID: h.HostID, + Hostname: h.Hostname, + Status: "ONLINE", + LastHeartbeat: h.LastHeartbeat.Format(time.RFC3339), + } + if h.Registration != nil { + info.AvailableCPUs = h.Registration.GetAvailableCpus() + info.AvailableMemMB = h.Registration.GetAvailableMemoryMb() + info.AvailableDiskMB = h.Registration.GetAvailableDiskMb() + info.BaseImages = h.Registration.GetBaseImages() + } + + counts, err := o.store.CountSandboxesByHostIDs(ctx, []string{h.HostID}) + if err != nil { + o.logger.Warn("failed to count sandboxes for host", "host_id", h.HostID, "error", err) + } else { + info.ActiveSandboxes = counts[h.HostID] + } + + return info, nil +} + +// --------------------------------------------------------------------------- +// Source VM operations +// --------------------------------------------------------------------------- + +// ListVMs aggregates source VMs from all connected hosts in parallel. +func (o *Orchestrator) ListVMs(ctx context.Context, orgID string) ([]*VMInfo, error) { + connected := o.registry.ListConnectedByOrg(orgID) + + var mu sync.Mutex + var result []*VMInfo + + g, gCtx := errgroup.WithContext(ctx) + g.SetLimit(10) + for _, h := range connected { + if h.Registration == nil { + continue + } + + g.Go(func() error { + select { + case <-gCtx.Done(): + return gCtx.Err() + default: + } + + reqID := uuid.New().String() + cmd := &fluidv1.ControlMessage{ + RequestId: reqID, + Payload: &fluidv1.ControlMessage_ListSourceVms{ + ListSourceVms: &fluidv1.ListSourceVMsCommand{}, + }, + } + + var vms []*VMInfo + resp, err := o.sender.SendAndWait(gCtx, h.HostID, cmd, timeoutListSources) + if err != nil { + o.logger.Warn("failed to list VMs from host", "host_id", h.HostID, "error", err) + for _, vm := range h.Registration.GetSourceVms() { + vms = append(vms, &VMInfo{ + Name: vm.GetName(), + State: vm.GetState(), + IPAddress: vm.GetIpAddress(), + Prepared: vm.GetPrepared(), + HostID: h.HostID, + }) + } + } else if vmList := resp.GetSourceVmsList(); vmList != nil { + for _, vm := range vmList.GetVms() { + vms = append(vms, &VMInfo{ + Name: vm.GetName(), + State: vm.GetState(), + IPAddress: vm.GetIpAddress(), + Prepared: vm.GetPrepared(), + HostID: h.HostID, + }) + } + } + + mu.Lock() + result = append(result, vms...) + mu.Unlock() + return nil + }) + } + + // g.Wait always returns nil because goroutines handle errors internally + // (log warning + fall back to cached registration data). Partial results + // are returned intentionally. + _ = g.Wait() + return result, nil +} + +// PrepareSourceVM sends a prepare command to the host that owns the source VM. +func (o *Orchestrator) PrepareSourceVM(ctx context.Context, orgID, vmName, sshUser, keyPath string) (*fluidv1.SourceVMPrepared, error) { + host, err := SelectHostForSourceVM(o.registry, vmName, orgID, o.heartbeatTimeout, 0, 0) + if err != nil { + return nil, err + } + + reqID := uuid.New().String() + cmd := &fluidv1.ControlMessage{ + RequestId: reqID, + Payload: &fluidv1.ControlMessage_PrepareSourceVm{ + PrepareSourceVm: &fluidv1.PrepareSourceVMCommand{ + SourceVm: vmName, + SshUser: sshUser, + SshKeyPath: keyPath, + }, + }, + } + + resp, err := o.sender.SendAndWait(ctx, host.HostID, cmd, timeoutPrepareVM) + if err != nil { + return nil, fmt.Errorf("prepare source VM on host %s: %w", host.HostID, err) + } + + prepared := resp.GetSourceVmPrepared() + if prepared == nil { + if errReport := resp.GetErrorReport(); errReport != nil { + return nil, fmt.Errorf("host error: %s", errReport.GetError()) + } + return nil, fmt.Errorf("unexpected response type from host") + } + + return prepared, nil +} + +// ValidateSourceVM sends a validate command to the host that owns the source VM. +func (o *Orchestrator) ValidateSourceVM(ctx context.Context, orgID, vmName string) (*fluidv1.SourceVMValidation, error) { + host, err := SelectHostForSourceVM(o.registry, vmName, orgID, o.heartbeatTimeout, 0, 0) + if err != nil { + return nil, err + } + + reqID := uuid.New().String() + cmd := &fluidv1.ControlMessage{ + RequestId: reqID, + Payload: &fluidv1.ControlMessage_ValidateSourceVm{ + ValidateSourceVm: &fluidv1.ValidateSourceVMCommand{ + SourceVm: vmName, + }, + }, + } + + resp, err := o.sender.SendAndWait(ctx, host.HostID, cmd, timeoutValidateVM) + if err != nil { + return nil, fmt.Errorf("validate source VM on host %s: %w", host.HostID, err) + } + + validation := resp.GetSourceVmValidation() + if validation == nil { + if errReport := resp.GetErrorReport(); errReport != nil { + return nil, fmt.Errorf("host error: %s", errReport.GetError()) + } + return nil, fmt.Errorf("unexpected response type from host") + } + + return validation, nil +} + +// RunSourceCommand executes a read-only command on a source VM via the host. +func (o *Orchestrator) RunSourceCommand(ctx context.Context, orgID, vmName, command string, timeoutSec int) (*SourceCommandResult, error) { + host, err := SelectHostForSourceVM(o.registry, vmName, orgID, o.heartbeatTimeout, 0, 0) + if err != nil { + return nil, err + } + + if timeoutSec == 0 { + timeoutSec = 30 + } + + reqID := uuid.New().String() + cmd := &fluidv1.ControlMessage{ + RequestId: reqID, + Payload: &fluidv1.ControlMessage_RunSourceCommand{ + RunSourceCommand: &fluidv1.RunSourceCommandCommand{ + SourceVm: vmName, + Command: command, + TimeoutSeconds: int32(timeoutSec), + }, + }, + } + + resp, err := o.sender.SendAndWait(ctx, host.HostID, cmd, time.Duration(timeoutSec)*time.Second+commandTimeoutBuffer) + if err != nil { + return nil, fmt.Errorf("run source command on host %s: %w", host.HostID, err) + } + + result := resp.GetSourceCommandResult() + if result == nil { + if errReport := resp.GetErrorReport(); errReport != nil { + return nil, fmt.Errorf("host error: %s", errReport.GetError()) + } + return nil, fmt.Errorf("unexpected response type from host") + } + + return &SourceCommandResult{ + SourceVM: vmName, + ExitCode: int(result.GetExitCode()), + Stdout: result.GetStdout(), + Stderr: result.GetStderr(), + }, nil +} + +// ReadSourceFile reads a file from a source VM via the host. +func (o *Orchestrator) ReadSourceFile(ctx context.Context, orgID, vmName, path string) (*SourceFileResult, error) { + host, err := SelectHostForSourceVM(o.registry, vmName, orgID, o.heartbeatTimeout, 0, 0) + if err != nil { + return nil, err + } + + reqID := uuid.New().String() + cmd := &fluidv1.ControlMessage{ + RequestId: reqID, + Payload: &fluidv1.ControlMessage_ReadSourceFile{ + ReadSourceFile: &fluidv1.ReadSourceFileCommand{ + SourceVm: vmName, + Path: path, + }, + }, + } + + resp, err := o.sender.SendAndWait(ctx, host.HostID, cmd, timeoutReadFile) + if err != nil { + return nil, fmt.Errorf("read source file on host %s: %w", host.HostID, err) + } + + result := resp.GetSourceFileResult() + if result == nil { + if errReport := resp.GetErrorReport(); errReport != nil { + return nil, fmt.Errorf("host error: %s", errReport.GetError()) + } + return nil, fmt.Errorf("unexpected response type from host") + } + + return &SourceFileResult{ + SourceVM: vmName, + Path: result.GetPath(), + Content: result.GetContent(), + }, nil +} + +// --------------------------------------------------------------------------- +// Source host operations +// --------------------------------------------------------------------------- + +// DiscoverSourceHosts sends SSH config to a connected daemon for parsing and probing. +func (o *Orchestrator) DiscoverSourceHosts(ctx context.Context, orgID, sshConfigContent string) ([]*DiscoveredHost, error) { + connected := o.registry.ListConnectedByOrg(orgID) + if len(connected) == 0 { + return nil, fmt.Errorf("no connected daemon hosts available for discovery") + } + + // Pick a random connected host to distribute probing load. + host := connected[rand.IntN(len(connected))] + + reqID := uuid.New().String() + cmd := &fluidv1.ControlMessage{ + RequestId: reqID, + Payload: &fluidv1.ControlMessage_DiscoverHosts{ + DiscoverHosts: &fluidv1.DiscoverHostsCommand{ + SshConfigContent: sshConfigContent, + }, + }, + } + + resp, err := o.sender.SendAndWait(ctx, host.HostID, cmd, timeoutDiscoverHosts) + if err != nil { + return nil, fmt.Errorf("discover hosts via %s: %w", host.HostID, err) + } + + result := resp.GetDiscoverHostsResult() + if result == nil { + if errReport := resp.GetErrorReport(); errReport != nil { + return nil, fmt.Errorf("host error: %s", errReport.GetError()) + } + return nil, fmt.Errorf("unexpected response type from host") + } + + discovered := make([]*DiscoveredHost, 0, len(result.GetHosts())) + for _, h := range result.GetHosts() { + discovered = append(discovered, &DiscoveredHost{ + Name: h.GetName(), + Hostname: h.GetHostname(), + User: h.GetUser(), + Port: int(h.GetPort()), + IdentityFile: h.GetIdentityFile(), + Reachable: h.GetReachable(), + HasLibvirt: h.GetHasLibvirt(), + HasProxmox: h.GetHasProxmox(), + VMs: h.GetVms(), + Error: h.GetError(), + }) + } + + return discovered, nil +} diff --git a/api/internal/orchestrator/orchestrator_test.go b/api/internal/orchestrator/orchestrator_test.go new file mode 100644 index 00000000..31a29dcf --- /dev/null +++ b/api/internal/orchestrator/orchestrator_test.go @@ -0,0 +1,1380 @@ +package orchestrator + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" + + "github.com/aspectrr/fluid.sh/api/internal/registry" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +// --------------------------------------------------------------------------- +// mockStore implements store.Store with function-field delegation +// --------------------------------------------------------------------------- + +type mockStore struct { + CreateUserFn func(ctx context.Context, u *store.User) error + GetUserFn func(ctx context.Context, id string) (*store.User, error) + GetUserByEmailFn func(ctx context.Context, email string) (*store.User, error) + UpdateUserFn func(ctx context.Context, u *store.User) error + + CreateOAuthAccountFn func(ctx context.Context, oa *store.OAuthAccount) error + GetOAuthAccountFn func(ctx context.Context, provider, providerID string) (*store.OAuthAccount, error) + GetOAuthAccountsByUserFn func(ctx context.Context, userID string) ([]*store.OAuthAccount, error) + + CreateSessionFn func(ctx context.Context, s *store.Session) error + GetSessionFn func(ctx context.Context, id string) (*store.Session, error) + DeleteSessionFn func(ctx context.Context, id string) error + DeleteExpiredSessionsFn func(ctx context.Context) error + + CreateOrganizationFn func(ctx context.Context, org *store.Organization) error + GetOrganizationFn func(ctx context.Context, id string) (*store.Organization, error) + GetOrganizationBySlugFn func(ctx context.Context, slug string) (*store.Organization, error) + ListOrganizationsByUserFn func(ctx context.Context, userID string) ([]*store.Organization, error) + UpdateOrganizationFn func(ctx context.Context, org *store.Organization) error + DeleteOrganizationFn func(ctx context.Context, id string) error + + CreateOrgMemberFn func(ctx context.Context, m *store.OrgMember) error + GetOrgMemberFn func(ctx context.Context, orgID, userID string) (*store.OrgMember, error) + ListOrgMembersFn func(ctx context.Context, orgID string) ([]*store.OrgMember, error) + DeleteOrgMemberFn func(ctx context.Context, orgID, id string) error + + CreateSubscriptionFn func(ctx context.Context, sub *store.Subscription) error + GetSubscriptionByOrgFn func(ctx context.Context, orgID string) (*store.Subscription, error) + UpdateSubscriptionFn func(ctx context.Context, sub *store.Subscription) error + + CreateUsageRecordFn func(ctx context.Context, rec *store.UsageRecord) error + ListUsageRecordsFn func(ctx context.Context, orgID string, from, to time.Time) ([]*store.UsageRecord, error) + + CreateHostFn func(ctx context.Context, host *store.Host) error + GetHostFn func(ctx context.Context, hostID string) (*store.Host, error) + ListHostsFn func(ctx context.Context) ([]store.Host, error) + UpdateHostFn func(ctx context.Context, host *store.Host) error + UpdateHostHeartbeatFn func(ctx context.Context, hostID string, availCPUs int32, availMemMB int64, availDiskMB int64) error + + CreateSandboxFn func(ctx context.Context, sandbox *store.Sandbox) error + GetSandboxFn func(ctx context.Context, sandboxID string) (*store.Sandbox, error) + GetSandboxByOrgFn func(ctx context.Context, orgID, sandboxID string) (*store.Sandbox, error) + ListSandboxesFn func(ctx context.Context) ([]store.Sandbox, error) + ListSandboxesByOrgFn func(ctx context.Context, orgID string) ([]store.Sandbox, error) + UpdateSandboxFn func(ctx context.Context, sandbox *store.Sandbox) error + DeleteSandboxFn func(ctx context.Context, sandboxID string) error + GetSandboxesByHostIDFn func(ctx context.Context, hostID string) ([]store.Sandbox, error) + CountSandboxesByHostIDsFn func(ctx context.Context, hostIDs []string) (map[string]int, error) + ListExpiredSandboxesFn func(ctx context.Context, defaultTTL time.Duration) ([]store.Sandbox, error) + + CreateCommandFn func(ctx context.Context, cmd *store.Command) error + ListSandboxCommandsFn func(ctx context.Context, sandboxID string) ([]store.Command, error) + + CreateSourceHostFn func(ctx context.Context, sh *store.SourceHost) error + GetSourceHostFn func(ctx context.Context, id string) (*store.SourceHost, error) + ListSourceHostsByOrgFn func(ctx context.Context, orgID string) ([]*store.SourceHost, error) + DeleteSourceHostFn func(ctx context.Context, id string) error + + CreateHostTokenFn func(ctx context.Context, token *store.HostToken) error + GetHostTokenByHashFn func(ctx context.Context, hash string) (*store.HostToken, error) + ListHostTokensByOrgFn func(ctx context.Context, orgID string) ([]store.HostToken, error) + DeleteHostTokenFn func(ctx context.Context, orgID, id string) error + + // Agent/playbook fields removed - interface methods commented out in store.go + + GetOrganizationByStripeCustomerIDFn func(ctx context.Context, customerID string) (*store.Organization, error) + GetModelMeterFn func(ctx context.Context, modelID string) (*store.ModelMeter, error) + CreateModelMeterFn func(ctx context.Context, m *store.ModelMeter) error + GetOrgModelSubscriptionFn func(ctx context.Context, orgID, modelID string) (*store.OrgModelSubscription, error) + CreateOrgModelSubscriptionFn func(ctx context.Context, s *store.OrgModelSubscription) error + SumTokenUsageFn func(ctx context.Context, orgID string, from, to time.Time) (float64, error) + ListActiveSubscriptionsFn func(ctx context.Context) ([]*store.Subscription, error) + + WithTxFn func(ctx context.Context, fn func(tx store.DataStore) error) error +} + +func (m *mockStore) p(name string) { panic(fmt.Sprintf("mockStore.%s not configured", name)) } + +func (m *mockStore) Config() store.Config { return store.Config{} } +func (m *mockStore) Ping(context.Context) error { return nil } +func (m *mockStore) Close() error { return nil } + +func (m *mockStore) WithTx(ctx context.Context, fn func(tx store.DataStore) error) error { + if m.WithTxFn != nil { + return m.WithTxFn(ctx, fn) + } + return fn(m) +} + +func (m *mockStore) CreateUser(ctx context.Context, u *store.User) error { + if m.CreateUserFn != nil { + return m.CreateUserFn(ctx, u) + } + m.p("CreateUser") + return nil +} +func (m *mockStore) GetUser(ctx context.Context, id string) (*store.User, error) { + if m.GetUserFn != nil { + return m.GetUserFn(ctx, id) + } + m.p("GetUser") + return nil, nil +} +func (m *mockStore) GetUserByEmail(ctx context.Context, email string) (*store.User, error) { + if m.GetUserByEmailFn != nil { + return m.GetUserByEmailFn(ctx, email) + } + m.p("GetUserByEmail") + return nil, nil +} +func (m *mockStore) UpdateUser(ctx context.Context, u *store.User) error { + if m.UpdateUserFn != nil { + return m.UpdateUserFn(ctx, u) + } + m.p("UpdateUser") + return nil +} + +func (m *mockStore) CreateOAuthAccount(ctx context.Context, oa *store.OAuthAccount) error { + if m.CreateOAuthAccountFn != nil { + return m.CreateOAuthAccountFn(ctx, oa) + } + m.p("CreateOAuthAccount") + return nil +} +func (m *mockStore) GetOAuthAccount(ctx context.Context, provider, providerID string) (*store.OAuthAccount, error) { + if m.GetOAuthAccountFn != nil { + return m.GetOAuthAccountFn(ctx, provider, providerID) + } + m.p("GetOAuthAccount") + return nil, nil +} +func (m *mockStore) GetOAuthAccountsByUser(ctx context.Context, userID string) ([]*store.OAuthAccount, error) { + if m.GetOAuthAccountsByUserFn != nil { + return m.GetOAuthAccountsByUserFn(ctx, userID) + } + m.p("GetOAuthAccountsByUser") + return nil, nil +} + +func (m *mockStore) CreateSession(ctx context.Context, s *store.Session) error { + if m.CreateSessionFn != nil { + return m.CreateSessionFn(ctx, s) + } + m.p("CreateSession") + return nil +} +func (m *mockStore) GetSession(ctx context.Context, id string) (*store.Session, error) { + if m.GetSessionFn != nil { + return m.GetSessionFn(ctx, id) + } + m.p("GetSession") + return nil, nil +} +func (m *mockStore) DeleteSession(ctx context.Context, id string) error { + if m.DeleteSessionFn != nil { + return m.DeleteSessionFn(ctx, id) + } + m.p("DeleteSession") + return nil +} +func (m *mockStore) DeleteExpiredSessions(ctx context.Context) error { + if m.DeleteExpiredSessionsFn != nil { + return m.DeleteExpiredSessionsFn(ctx) + } + m.p("DeleteExpiredSessions") + return nil +} + +func (m *mockStore) CreateOrganization(ctx context.Context, org *store.Organization) error { + if m.CreateOrganizationFn != nil { + return m.CreateOrganizationFn(ctx, org) + } + m.p("CreateOrganization") + return nil +} +func (m *mockStore) GetOrganization(ctx context.Context, id string) (*store.Organization, error) { + if m.GetOrganizationFn != nil { + return m.GetOrganizationFn(ctx, id) + } + m.p("GetOrganization") + return nil, nil +} +func (m *mockStore) GetOrganizationBySlug(ctx context.Context, slug string) (*store.Organization, error) { + if m.GetOrganizationBySlugFn != nil { + return m.GetOrganizationBySlugFn(ctx, slug) + } + m.p("GetOrganizationBySlug") + return nil, nil +} +func (m *mockStore) ListOrganizationsByUser(ctx context.Context, userID string) ([]*store.Organization, error) { + if m.ListOrganizationsByUserFn != nil { + return m.ListOrganizationsByUserFn(ctx, userID) + } + m.p("ListOrganizationsByUser") + return nil, nil +} +func (m *mockStore) UpdateOrganization(ctx context.Context, org *store.Organization) error { + if m.UpdateOrganizationFn != nil { + return m.UpdateOrganizationFn(ctx, org) + } + m.p("UpdateOrganization") + return nil +} +func (m *mockStore) DeleteOrganization(ctx context.Context, id string) error { + if m.DeleteOrganizationFn != nil { + return m.DeleteOrganizationFn(ctx, id) + } + m.p("DeleteOrganization") + return nil +} + +func (m *mockStore) CreateOrgMember(ctx context.Context, mem *store.OrgMember) error { + if m.CreateOrgMemberFn != nil { + return m.CreateOrgMemberFn(ctx, mem) + } + m.p("CreateOrgMember") + return nil +} +func (m *mockStore) GetOrgMember(ctx context.Context, orgID, userID string) (*store.OrgMember, error) { + if m.GetOrgMemberFn != nil { + return m.GetOrgMemberFn(ctx, orgID, userID) + } + m.p("GetOrgMember") + return nil, nil +} +func (m *mockStore) GetOrgMemberByID(ctx context.Context, orgID, memberID string) (*store.OrgMember, error) { + m.p("GetOrgMemberByID") + return nil, nil +} +func (m *mockStore) ListOrgMembers(ctx context.Context, orgID string) ([]*store.OrgMember, error) { + if m.ListOrgMembersFn != nil { + return m.ListOrgMembersFn(ctx, orgID) + } + m.p("ListOrgMembers") + return nil, nil +} +func (m *mockStore) DeleteOrgMember(ctx context.Context, orgID, id string) error { + if m.DeleteOrgMemberFn != nil { + return m.DeleteOrgMemberFn(ctx, orgID, id) + } + m.p("DeleteOrgMember") + return nil +} + +func (m *mockStore) CreateSubscription(ctx context.Context, sub *store.Subscription) error { + if m.CreateSubscriptionFn != nil { + return m.CreateSubscriptionFn(ctx, sub) + } + m.p("CreateSubscription") + return nil +} +func (m *mockStore) GetSubscriptionByOrg(ctx context.Context, orgID string) (*store.Subscription, error) { + if m.GetSubscriptionByOrgFn != nil { + return m.GetSubscriptionByOrgFn(ctx, orgID) + } + m.p("GetSubscriptionByOrg") + return nil, nil +} +func (m *mockStore) UpdateSubscription(ctx context.Context, sub *store.Subscription) error { + if m.UpdateSubscriptionFn != nil { + return m.UpdateSubscriptionFn(ctx, sub) + } + m.p("UpdateSubscription") + return nil +} + +func (m *mockStore) CreateUsageRecord(ctx context.Context, rec *store.UsageRecord) error { + if m.CreateUsageRecordFn != nil { + return m.CreateUsageRecordFn(ctx, rec) + } + m.p("CreateUsageRecord") + return nil +} +func (m *mockStore) ListUsageRecords(ctx context.Context, orgID string, from, to time.Time) ([]*store.UsageRecord, error) { + if m.ListUsageRecordsFn != nil { + return m.ListUsageRecordsFn(ctx, orgID, from, to) + } + m.p("ListUsageRecords") + return nil, nil +} + +func (m *mockStore) CreateHost(ctx context.Context, host *store.Host) error { + if m.CreateHostFn != nil { + return m.CreateHostFn(ctx, host) + } + m.p("CreateHost") + return nil +} +func (m *mockStore) GetHost(ctx context.Context, hostID string) (*store.Host, error) { + if m.GetHostFn != nil { + return m.GetHostFn(ctx, hostID) + } + m.p("GetHost") + return nil, nil +} +func (m *mockStore) ListHosts(ctx context.Context) ([]store.Host, error) { + if m.ListHostsFn != nil { + return m.ListHostsFn(ctx) + } + m.p("ListHosts") + return nil, nil +} +func (m *mockStore) ListHostsByOrg(_ context.Context, _ string) ([]store.Host, error) { + return nil, nil +} +func (m *mockStore) UpdateHost(ctx context.Context, host *store.Host) error { + if m.UpdateHostFn != nil { + return m.UpdateHostFn(ctx, host) + } + m.p("UpdateHost") + return nil +} +func (m *mockStore) UpdateHostHeartbeat(ctx context.Context, hostID string, availCPUs int32, availMemMB int64, availDiskMB int64) error { + if m.UpdateHostHeartbeatFn != nil { + return m.UpdateHostHeartbeatFn(ctx, hostID, availCPUs, availMemMB, availDiskMB) + } + m.p("UpdateHostHeartbeat") + return nil +} + +func (m *mockStore) CreateSandbox(ctx context.Context, sandbox *store.Sandbox) error { + if m.CreateSandboxFn != nil { + return m.CreateSandboxFn(ctx, sandbox) + } + m.p("CreateSandbox") + return nil +} +func (m *mockStore) GetSandbox(ctx context.Context, sandboxID string) (*store.Sandbox, error) { + if m.GetSandboxFn != nil { + return m.GetSandboxFn(ctx, sandboxID) + } + m.p("GetSandbox") + return nil, nil +} +func (m *mockStore) GetSandboxByOrg(ctx context.Context, orgID, sandboxID string) (*store.Sandbox, error) { + if m.GetSandboxByOrgFn != nil { + return m.GetSandboxByOrgFn(ctx, orgID, sandboxID) + } + // Fall back to GetSandboxFn for backward compat in tests. + if m.GetSandboxFn != nil { + return m.GetSandboxFn(ctx, sandboxID) + } + m.p("GetSandboxByOrg") + return nil, nil +} +func (m *mockStore) ListSandboxes(ctx context.Context) ([]store.Sandbox, error) { + if m.ListSandboxesFn != nil { + return m.ListSandboxesFn(ctx) + } + m.p("ListSandboxes") + return nil, nil +} +func (m *mockStore) ListSandboxesByOrg(ctx context.Context, orgID string) ([]store.Sandbox, error) { + if m.ListSandboxesByOrgFn != nil { + return m.ListSandboxesByOrgFn(ctx, orgID) + } + m.p("ListSandboxesByOrg") + return nil, nil +} +func (m *mockStore) UpdateSandbox(ctx context.Context, sandbox *store.Sandbox) error { + if m.UpdateSandboxFn != nil { + return m.UpdateSandboxFn(ctx, sandbox) + } + m.p("UpdateSandbox") + return nil +} +func (m *mockStore) DeleteSandbox(ctx context.Context, sandboxID string) error { + if m.DeleteSandboxFn != nil { + return m.DeleteSandboxFn(ctx, sandboxID) + } + m.p("DeleteSandbox") + return nil +} +func (m *mockStore) GetSandboxesByHostID(ctx context.Context, hostID string) ([]store.Sandbox, error) { + if m.GetSandboxesByHostIDFn != nil { + return m.GetSandboxesByHostIDFn(ctx, hostID) + } + m.p("GetSandboxesByHostID") + return nil, nil +} +func (m *mockStore) CountSandboxesByHostIDs(ctx context.Context, hostIDs []string) (map[string]int, error) { + if m.CountSandboxesByHostIDsFn != nil { + return m.CountSandboxesByHostIDsFn(ctx, hostIDs) + } + return map[string]int{}, nil +} +func (m *mockStore) ListExpiredSandboxes(ctx context.Context, defaultTTL time.Duration) ([]store.Sandbox, error) { + if m.ListExpiredSandboxesFn != nil { + return m.ListExpiredSandboxesFn(ctx, defaultTTL) + } + m.p("ListExpiredSandboxes") + return nil, nil +} + +func (m *mockStore) CreateCommand(ctx context.Context, cmd *store.Command) error { + if m.CreateCommandFn != nil { + return m.CreateCommandFn(ctx, cmd) + } + m.p("CreateCommand") + return nil +} +func (m *mockStore) ListSandboxCommands(ctx context.Context, sandboxID string) ([]store.Command, error) { + if m.ListSandboxCommandsFn != nil { + return m.ListSandboxCommandsFn(ctx, sandboxID) + } + m.p("ListSandboxCommands") + return nil, nil +} + +func (m *mockStore) CreateSourceHost(ctx context.Context, sh *store.SourceHost) error { + if m.CreateSourceHostFn != nil { + return m.CreateSourceHostFn(ctx, sh) + } + m.p("CreateSourceHost") + return nil +} +func (m *mockStore) GetSourceHost(ctx context.Context, id string) (*store.SourceHost, error) { + if m.GetSourceHostFn != nil { + return m.GetSourceHostFn(ctx, id) + } + m.p("GetSourceHost") + return nil, nil +} +func (m *mockStore) ListSourceHostsByOrg(ctx context.Context, orgID string) ([]*store.SourceHost, error) { + if m.ListSourceHostsByOrgFn != nil { + return m.ListSourceHostsByOrgFn(ctx, orgID) + } + m.p("ListSourceHostsByOrg") + return nil, nil +} +func (m *mockStore) DeleteSourceHost(ctx context.Context, id string) error { + if m.DeleteSourceHostFn != nil { + return m.DeleteSourceHostFn(ctx, id) + } + m.p("DeleteSourceHost") + return nil +} + +func (m *mockStore) CreateHostToken(ctx context.Context, token *store.HostToken) error { + if m.CreateHostTokenFn != nil { + return m.CreateHostTokenFn(ctx, token) + } + m.p("CreateHostToken") + return nil +} +func (m *mockStore) GetHostTokenByHash(ctx context.Context, hash string) (*store.HostToken, error) { + if m.GetHostTokenByHashFn != nil { + return m.GetHostTokenByHashFn(ctx, hash) + } + m.p("GetHostTokenByHash") + return nil, nil +} +func (m *mockStore) ListHostTokensByOrg(ctx context.Context, orgID string) ([]store.HostToken, error) { + if m.ListHostTokensByOrgFn != nil { + return m.ListHostTokensByOrgFn(ctx, orgID) + } + m.p("ListHostTokensByOrg") + return nil, nil +} +func (m *mockStore) DeleteHostToken(ctx context.Context, orgID, id string) error { + if m.DeleteHostTokenFn != nil { + return m.DeleteHostTokenFn(ctx, orgID, id) + } + m.p("DeleteHostToken") + return nil +} + +// Agent/playbook mock methods removed - interface methods commented out in store.go + +func (m *mockStore) GetOrganizationByStripeCustomerID(ctx context.Context, customerID string) (*store.Organization, error) { + if m.GetOrganizationByStripeCustomerIDFn != nil { + return m.GetOrganizationByStripeCustomerIDFn(ctx, customerID) + } + m.p("GetOrganizationByStripeCustomerID") + return nil, nil +} +func (m *mockStore) GetModelMeter(ctx context.Context, modelID string) (*store.ModelMeter, error) { + if m.GetModelMeterFn != nil { + return m.GetModelMeterFn(ctx, modelID) + } + return nil, store.ErrNotFound +} +func (m *mockStore) CreateModelMeter(ctx context.Context, mm *store.ModelMeter) error { + if m.CreateModelMeterFn != nil { + return m.CreateModelMeterFn(ctx, mm) + } + return nil +} +func (m *mockStore) GetOrgModelSubscription(ctx context.Context, orgID, modelID string) (*store.OrgModelSubscription, error) { + if m.GetOrgModelSubscriptionFn != nil { + return m.GetOrgModelSubscriptionFn(ctx, orgID, modelID) + } + return nil, store.ErrNotFound +} +func (m *mockStore) CreateOrgModelSubscription(ctx context.Context, s *store.OrgModelSubscription) error { + if m.CreateOrgModelSubscriptionFn != nil { + return m.CreateOrgModelSubscriptionFn(ctx, s) + } + return nil +} +func (m *mockStore) SumTokenUsage(ctx context.Context, orgID string, from, to time.Time) (float64, error) { + if m.SumTokenUsageFn != nil { + return m.SumTokenUsageFn(ctx, orgID, from, to) + } + return 0, nil +} +func (m *mockStore) ListActiveSubscriptions(ctx context.Context) ([]*store.Subscription, error) { + if m.ListActiveSubscriptionsFn != nil { + return m.ListActiveSubscriptionsFn(ctx) + } + return nil, nil +} +func (m *mockStore) GetSubscriptionByStripeID(_ context.Context, _ string) (*store.Subscription, error) { + return nil, nil +} +func (m *mockStore) AcquireAdvisoryLock(_ context.Context, _ int64) error { return nil } +func (m *mockStore) ReleaseAdvisoryLock(_ context.Context, _ int64) error { return nil } + +// --------------------------------------------------------------------------- +// mockSender implements HostSender +// --------------------------------------------------------------------------- + +type mockSender struct { + SendAndWaitFn func(ctx context.Context, hostID string, msg *fluidv1.ControlMessage, timeout time.Duration) (*fluidv1.HostMessage, error) +} + +func (m *mockSender) SendAndWait(ctx context.Context, hostID string, msg *fluidv1.ControlMessage, timeout time.Duration) (*fluidv1.HostMessage, error) { + if m.SendAndWaitFn != nil { + return m.SendAndWaitFn(ctx, hostID, msg, timeout) + } + return nil, fmt.Errorf("mockSender.SendAndWait not configured") +} + +// --------------------------------------------------------------------------- +// Helper +// --------------------------------------------------------------------------- + +func newTestOrchestrator(ms *mockStore, sender *mockSender) *Orchestrator { + reg := registry.New() + return New(reg, ms, sender, nil, 24*time.Hour, 90*time.Second) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +func TestListSandboxesByOrg_Success(t *testing.T) { + ms := &mockStore{ + ListSandboxesByOrgFn: func(_ context.Context, orgID string) ([]store.Sandbox, error) { + if orgID != "org-1" { + t.Errorf("orgID = %q, want %q", orgID, "org-1") + } + return []store.Sandbox{ + {ID: "sbx-1", OrgID: "org-1", Name: "sandbox-1", State: store.SandboxStateRunning}, + {ID: "sbx-2", OrgID: "org-1", Name: "sandbox-2", State: store.SandboxStateStopped}, + }, nil + }, + } + + orch := newTestOrchestrator(ms, &mockSender{}) + result, err := orch.ListSandboxesByOrg(context.Background(), "org-1") + if err != nil { + t.Fatalf("ListSandboxesByOrg: unexpected error: %v", err) + } + if len(result) != 2 { + t.Fatalf("ListSandboxesByOrg: got %d sandboxes, want 2", len(result)) + } + if result[0].ID != "sbx-1" { + t.Errorf("result[0].ID = %q, want %q", result[0].ID, "sbx-1") + } + if result[1].ID != "sbx-2" { + t.Errorf("result[1].ID = %q, want %q", result[1].ID, "sbx-2") + } +} + +func TestListSandboxesByOrg_Empty(t *testing.T) { + ms := &mockStore{ + ListSandboxesByOrgFn: func(_ context.Context, _ string) ([]store.Sandbox, error) { + return []store.Sandbox{}, nil + }, + } + + orch := newTestOrchestrator(ms, &mockSender{}) + result, err := orch.ListSandboxesByOrg(context.Background(), "org-empty") + if err != nil { + t.Fatalf("ListSandboxesByOrg: unexpected error: %v", err) + } + if len(result) != 0 { + t.Fatalf("ListSandboxesByOrg: got %d sandboxes, want 0", len(result)) + } +} + +func TestListSandboxesByOrg_StoreError(t *testing.T) { + ms := &mockStore{ + ListSandboxesByOrgFn: func(_ context.Context, _ string) ([]store.Sandbox, error) { + return nil, fmt.Errorf("db error") + }, + } + + orch := newTestOrchestrator(ms, &mockSender{}) + _, err := orch.ListSandboxesByOrg(context.Background(), "org-1") + if err == nil { + t.Fatal("ListSandboxesByOrg: expected error from store") + } +} + +func TestGetSandbox_Success(t *testing.T) { + expected := &store.Sandbox{ + ID: "sbx-1", + OrgID: "org-1", + Name: "my-sandbox", + State: store.SandboxStateRunning, + } + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, id string) (*store.Sandbox, error) { + if id == "sbx-1" { + return expected, nil + } + return nil, store.ErrNotFound + }, + } + + orch := newTestOrchestrator(ms, &mockSender{}) + result, err := orch.GetSandbox(context.Background(), "org-1", "sbx-1") + if err != nil { + t.Fatalf("GetSandbox: unexpected error: %v", err) + } + if result.ID != expected.ID { + t.Errorf("ID = %q, want %q", result.ID, expected.ID) + } + if result.Name != expected.Name { + t.Errorf("Name = %q, want %q", result.Name, expected.Name) + } +} + +func TestGetSandbox_NotFound(t *testing.T) { + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, _ string) (*store.Sandbox, error) { + return nil, store.ErrNotFound + }, + } + + orch := newTestOrchestrator(ms, &mockSender{}) + _, err := orch.GetSandbox(context.Background(), "org-1", "nonexistent") + if err == nil { + t.Fatal("GetSandbox: expected error for nonexistent sandbox") + } +} + +func TestListCommands_Success(t *testing.T) { + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, id string) (*store.Sandbox, error) { + return &store.Sandbox{ID: id, OrgID: "org-1"}, nil + }, + ListSandboxCommandsFn: func(_ context.Context, sandboxID string) ([]store.Command, error) { + if sandboxID != "sbx-1" { + t.Errorf("sandboxID = %q, want %q", sandboxID, "sbx-1") + } + return []store.Command{ + {ID: "cmd-1", SandboxID: "sbx-1", Command: "ls -la", ExitCode: 0}, + {ID: "cmd-2", SandboxID: "sbx-1", Command: "pwd", ExitCode: 0}, + }, nil + }, + } + + orch := newTestOrchestrator(ms, &mockSender{}) + result, err := orch.ListCommands(context.Background(), "org-1", "sbx-1") + if err != nil { + t.Fatalf("ListCommands: unexpected error: %v", err) + } + if len(result) != 2 { + t.Fatalf("ListCommands: got %d commands, want 2", len(result)) + } + if result[0].Command != "ls -la" { + t.Errorf("result[0].Command = %q, want %q", result[0].Command, "ls -la") + } +} + +func TestListCommands_StoreError(t *testing.T) { + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, id string) (*store.Sandbox, error) { + return &store.Sandbox{ID: id, OrgID: "org-1"}, nil + }, + ListSandboxCommandsFn: func(_ context.Context, _ string) ([]store.Command, error) { + return nil, fmt.Errorf("db error") + }, + } + + orch := newTestOrchestrator(ms, &mockSender{}) + _, err := orch.ListCommands(context.Background(), "org-1", "sbx-1") + if err == nil { + t.Fatal("ListCommands: expected error from store") + } +} + +func TestListHosts_Success(t *testing.T) { + reg := registry.New() + _ = reg.Register("host-1", "org-1", "production-1", &mockStream{}) + reg.SetRegistration("host-1", &fluidv1.HostRegistration{ + AvailableCpus: 16, + AvailableMemoryMb: 32768, + AvailableDiskMb: 512000, + BaseImages: []string{"ubuntu-22.04"}, + }) + + ms := &mockStore{ + CountSandboxesByHostIDsFn: func(_ context.Context, hostIDs []string) (map[string]int, error) { + counts := map[string]int{} + for _, id := range hostIDs { + if id == "host-1" { + counts[id] = 2 + } + } + return counts, nil + }, + } + + sender := &mockSender{} + orch := New(reg, ms, sender, nil, 24*time.Hour, 90*time.Second) + + result, err := orch.ListHosts(context.Background(), "org-1") + if err != nil { + t.Fatalf("ListHosts: unexpected error: %v", err) + } + if len(result) != 1 { + t.Fatalf("ListHosts: got %d hosts, want 1", len(result)) + } + + h := result[0] + if h.HostID != "host-1" { + t.Errorf("HostID = %q, want %q", h.HostID, "host-1") + } + if h.Hostname != "production-1" { + t.Errorf("Hostname = %q, want %q", h.Hostname, "production-1") + } + if h.Status != "ONLINE" { + t.Errorf("Status = %q, want %q", h.Status, "ONLINE") + } + if h.AvailableCPUs != 16 { + t.Errorf("AvailableCPUs = %d, want 16", h.AvailableCPUs) + } + if h.AvailableMemMB != 32768 { + t.Errorf("AvailableMemMB = %d, want 32768", h.AvailableMemMB) + } + if h.ActiveSandboxes != 2 { + t.Errorf("ActiveSandboxes = %d, want 2", h.ActiveSandboxes) + } + if len(h.BaseImages) != 1 || h.BaseImages[0] != "ubuntu-22.04" { + t.Errorf("BaseImages = %v, want [ubuntu-22.04]", h.BaseImages) + } +} + +func TestListHosts_NoHosts(t *testing.T) { + reg := registry.New() + ms := &mockStore{} + sender := &mockSender{} + orch := New(reg, ms, sender, nil, 24*time.Hour, 90*time.Second) + + result, err := orch.ListHosts(context.Background(), "org-1") + if err != nil { + t.Fatalf("ListHosts: unexpected error: %v", err) + } + if len(result) != 0 { + t.Fatalf("ListHosts: got %d hosts, want 0", len(result)) + } +} + +func TestListHosts_FiltersByOrg(t *testing.T) { + reg := registry.New() + _ = reg.Register("host-1", "org-1", "h1", &mockStream{}) + reg.SetRegistration("host-1", &fluidv1.HostRegistration{}) + _ = reg.Register("host-2", "org-2", "h2", &mockStream{}) + reg.SetRegistration("host-2", &fluidv1.HostRegistration{}) + + ms := &mockStore{} + + sender := &mockSender{} + orch := New(reg, ms, sender, nil, 24*time.Hour, 90*time.Second) + + result, err := orch.ListHosts(context.Background(), "org-1") + if err != nil { + t.Fatalf("ListHosts: unexpected error: %v", err) + } + if len(result) != 1 { + t.Fatalf("ListHosts: got %d hosts, want 1", len(result)) + } + if result[0].HostID != "host-1" { + t.Errorf("HostID = %q, want %q", result[0].HostID, "host-1") + } +} + +// --------------------------------------------------------------------------- +// Write operation tests +// --------------------------------------------------------------------------- + +func TestCreateSandbox_Success(t *testing.T) { + reg := newRegistryWithHost(t, "host-1", "org-1", &fluidv1.HostRegistration{ + AvailableCpus: 16, + AvailableMemoryMb: 32768, + BaseImages: []string{"ubuntu-22.04"}, + }) + + var storedSandbox *store.Sandbox + ms := &mockStore{ + CreateSandboxFn: func(_ context.Context, s *store.Sandbox) error { + storedSandbox = s + return nil + }, + } + + sender := &mockSender{ + SendAndWaitFn: func(_ context.Context, hostID string, msg *fluidv1.ControlMessage, _ time.Duration) (*fluidv1.HostMessage, error) { + if hostID != "host-1" { + t.Errorf("hostID = %q, want %q", hostID, "host-1") + } + return &fluidv1.HostMessage{ + RequestId: msg.GetRequestId(), + Payload: &fluidv1.HostMessage_SandboxCreated{ + SandboxCreated: &fluidv1.SandboxCreated{ + SandboxId: msg.GetCreateSandbox().GetSandboxId(), + Name: "my-sandbox", + State: "RUNNING", + IpAddress: "10.0.0.5", + Bridge: "br0", + MacAddress: "aa:bb:cc:dd:ee:ff", + }, + }, + }, nil + }, + } + + orch := New(reg, ms, sender, nil, 24*time.Hour, 90*time.Second) + result, err := orch.CreateSandbox(context.Background(), CreateSandboxRequest{ + OrgID: "org-1", + SourceVM: "ubuntu-22.04", + Name: "my-sandbox", + VCPUs: 4, + MemoryMB: 4096, + }) + if err != nil { + t.Fatalf("CreateSandbox: unexpected error: %v", err) + } + if result.OrgID != "org-1" { + t.Errorf("OrgID = %q, want %q", result.OrgID, "org-1") + } + if result.HostID != "host-1" { + t.Errorf("HostID = %q, want %q", result.HostID, "host-1") + } + if result.Name != "my-sandbox" { + t.Errorf("Name = %q, want %q", result.Name, "my-sandbox") + } + if result.State != store.SandboxStateRunning { + t.Errorf("State = %q, want %q", result.State, store.SandboxStateRunning) + } + if result.IPAddress != "10.0.0.5" { + t.Errorf("IPAddress = %q, want %q", result.IPAddress, "10.0.0.5") + } + if result.Bridge != "br0" { + t.Errorf("Bridge = %q, want %q", result.Bridge, "br0") + } + if result.MACAddress != "aa:bb:cc:dd:ee:ff" { + t.Errorf("MACAddress = %q, want %q", result.MACAddress, "aa:bb:cc:dd:ee:ff") + } + if result.VCPUs != 4 { + t.Errorf("VCPUs = %d, want 4", result.VCPUs) + } + if result.MemoryMB != 4096 { + t.Errorf("MemoryMB = %d, want 4096", result.MemoryMB) + } + if storedSandbox == nil { + t.Fatal("CreateSandboxFn was not called") + } + if storedSandbox.ID != result.ID { + t.Errorf("stored ID = %q, want %q", storedSandbox.ID, result.ID) + } +} + +func TestCreateSandbox_NoHost(t *testing.T) { + ms := &mockStore{} + sender := &mockSender{} + orch := newTestOrchestrator(ms, sender) + + _, err := orch.CreateSandbox(context.Background(), CreateSandboxRequest{ + OrgID: "org-1", + SourceVM: "ubuntu-22.04", + }) + if err == nil { + t.Fatal("CreateSandbox: expected error when no hosts available") + } + if !strings.Contains(err.Error(), "no connected hosts") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "no connected hosts") + } +} + +func TestCreateSandbox_SenderError(t *testing.T) { + reg := newRegistryWithHost(t, "host-1", "org-1", &fluidv1.HostRegistration{ + AvailableCpus: 16, + AvailableMemoryMb: 32768, + BaseImages: []string{"ubuntu-22.04"}, + }) + + ms := &mockStore{} + sender := &mockSender{ + SendAndWaitFn: func(_ context.Context, _ string, _ *fluidv1.ControlMessage, _ time.Duration) (*fluidv1.HostMessage, error) { + return nil, fmt.Errorf("connection lost") + }, + } + + orch := New(reg, ms, sender, nil, 24*time.Hour, 90*time.Second) + _, err := orch.CreateSandbox(context.Background(), CreateSandboxRequest{ + OrgID: "org-1", + SourceVM: "ubuntu-22.04", + }) + if err == nil { + t.Fatal("CreateSandbox: expected error from sender") + } + if !strings.Contains(err.Error(), "connection lost") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "connection lost") + } +} + +func TestCreateSandbox_HostError(t *testing.T) { + reg := newRegistryWithHost(t, "host-1", "org-1", &fluidv1.HostRegistration{ + AvailableCpus: 16, + AvailableMemoryMb: 32768, + BaseImages: []string{"ubuntu-22.04"}, + }) + + ms := &mockStore{} + sender := &mockSender{ + SendAndWaitFn: func(_ context.Context, _ string, msg *fluidv1.ControlMessage, _ time.Duration) (*fluidv1.HostMessage, error) { + return &fluidv1.HostMessage{ + RequestId: msg.GetRequestId(), + Payload: &fluidv1.HostMessage_ErrorReport{ + ErrorReport: &fluidv1.ErrorReport{Error: "disk full"}, + }, + }, nil + }, + } + + orch := New(reg, ms, sender, nil, 24*time.Hour, 90*time.Second) + _, err := orch.CreateSandbox(context.Background(), CreateSandboxRequest{ + OrgID: "org-1", + SourceVM: "ubuntu-22.04", + }) + if err == nil { + t.Fatal("CreateSandbox: expected error from host error report") + } + if !strings.Contains(err.Error(), "host error:") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "host error:") + } + if !strings.Contains(err.Error(), "disk full") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "disk full") + } +} + +func TestCreateSandbox_Defaults(t *testing.T) { + reg := newRegistryWithHost(t, "host-1", "org-1", &fluidv1.HostRegistration{ + AvailableCpus: 16, + AvailableMemoryMb: 32768, + BaseImages: []string{"ubuntu-22.04"}, + }) + + var capturedCmd *fluidv1.CreateSandboxCommand + ms := &mockStore{ + CreateSandboxFn: func(_ context.Context, _ *store.Sandbox) error { + return nil + }, + } + sender := &mockSender{ + SendAndWaitFn: func(_ context.Context, _ string, msg *fluidv1.ControlMessage, _ time.Duration) (*fluidv1.HostMessage, error) { + capturedCmd = msg.GetCreateSandbox() + return &fluidv1.HostMessage{ + RequestId: msg.GetRequestId(), + Payload: &fluidv1.HostMessage_SandboxCreated{ + SandboxCreated: &fluidv1.SandboxCreated{ + SandboxId: capturedCmd.GetSandboxId(), + Name: capturedCmd.GetName(), + State: "RUNNING", + }, + }, + }, nil + }, + } + + orch := New(reg, ms, sender, nil, 24*time.Hour, 90*time.Second) + result, err := orch.CreateSandbox(context.Background(), CreateSandboxRequest{ + OrgID: "org-1", + SourceVM: "ubuntu-22.04", + VCPUs: 0, + MemoryMB: 0, + }) + if err != nil { + t.Fatalf("CreateSandbox: unexpected error: %v", err) + } + if capturedCmd == nil { + t.Fatal("SendAndWait was not called") + } + if capturedCmd.GetVcpus() != 2 { + t.Errorf("default VCPUs = %d, want 2", capturedCmd.GetVcpus()) + } + if capturedCmd.GetMemoryMb() != 2048 { + t.Errorf("default MemoryMB = %d, want 2048", capturedCmd.GetMemoryMb()) + } + if result.VCPUs != 2 { + t.Errorf("result.VCPUs = %d, want 2", result.VCPUs) + } + if result.MemoryMB != 2048 { + t.Errorf("result.MemoryMB = %d, want 2048", result.MemoryMB) + } +} + +func TestDestroySandbox_Success(t *testing.T) { + var deletedID string + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, id string) (*store.Sandbox, error) { + return &store.Sandbox{ID: id, OrgID: "org-1", HostID: "host-1", State: store.SandboxStateRunning}, nil + }, + DeleteSandboxFn: func(_ context.Context, id string) error { + deletedID = id + return nil + }, + } + + sender := &mockSender{ + SendAndWaitFn: func(_ context.Context, hostID string, msg *fluidv1.ControlMessage, _ time.Duration) (*fluidv1.HostMessage, error) { + if hostID != "host-1" { + t.Errorf("hostID = %q, want %q", hostID, "host-1") + } + return &fluidv1.HostMessage{ + RequestId: msg.GetRequestId(), + Payload: &fluidv1.HostMessage_SandboxDestroyed{ + SandboxDestroyed: &fluidv1.SandboxDestroyed{ + SandboxId: msg.GetDestroySandbox().GetSandboxId(), + }, + }, + }, nil + }, + } + + orch := newTestOrchestrator(ms, sender) + err := orch.DestroySandbox(context.Background(), "org-1", "sbx-1") + if err != nil { + t.Fatalf("DestroySandbox: unexpected error: %v", err) + } + if deletedID != "sbx-1" { + t.Errorf("deleted sandbox ID = %q, want %q", deletedID, "sbx-1") + } +} + +func TestDestroySandbox_NotFound(t *testing.T) { + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, _ string) (*store.Sandbox, error) { + return nil, store.ErrNotFound + }, + } + + orch := newTestOrchestrator(ms, &mockSender{}) + err := orch.DestroySandbox(context.Background(), "org-1", "nonexistent") + if err == nil { + t.Fatal("DestroySandbox: expected error for nonexistent sandbox") + } +} + +func TestDestroySandbox_SenderError(t *testing.T) { + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, id string) (*store.Sandbox, error) { + return &store.Sandbox{ID: id, HostID: "host-1"}, nil + }, + } + + sender := &mockSender{ + SendAndWaitFn: func(_ context.Context, _ string, _ *fluidv1.ControlMessage, _ time.Duration) (*fluidv1.HostMessage, error) { + return nil, fmt.Errorf("timeout waiting for response") + }, + } + + orch := newTestOrchestrator(ms, sender) + err := orch.DestroySandbox(context.Background(), "org-1", "sbx-1") + if err == nil { + t.Fatal("DestroySandbox: expected error from sender") + } + if !strings.Contains(err.Error(), "timeout waiting for response") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "timeout waiting for response") + } +} + +func TestRunCommand_Success(t *testing.T) { + var storedCmd *store.Command + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, id string) (*store.Sandbox, error) { + return &store.Sandbox{ID: id, HostID: "host-1"}, nil + }, + CreateCommandFn: func(_ context.Context, cmd *store.Command) error { + storedCmd = cmd + return nil + }, + } + + sender := &mockSender{ + SendAndWaitFn: func(_ context.Context, hostID string, msg *fluidv1.ControlMessage, _ time.Duration) (*fluidv1.HostMessage, error) { + if hostID != "host-1" { + t.Errorf("hostID = %q, want %q", hostID, "host-1") + } + return &fluidv1.HostMessage{ + RequestId: msg.GetRequestId(), + Payload: &fluidv1.HostMessage_CommandResult{ + CommandResult: &fluidv1.CommandResult{ + Stdout: "hello world\n", + Stderr: "", + ExitCode: 0, + DurationMs: 42, + }, + }, + }, nil + }, + } + + orch := newTestOrchestrator(ms, sender) + result, err := orch.RunCommand(context.Background(), "org-1", "sbx-1", "echo hello world", 30) + if err != nil { + t.Fatalf("RunCommand: unexpected error: %v", err) + } + if result.Stdout != "hello world\n" { + t.Errorf("Stdout = %q, want %q", result.Stdout, "hello world\n") + } + if result.ExitCode != 0 { + t.Errorf("ExitCode = %d, want 0", result.ExitCode) + } + if result.DurationMS != 42 { + t.Errorf("DurationMS = %d, want 42", result.DurationMS) + } + if result.Command != "echo hello world" { + t.Errorf("Command = %q, want %q", result.Command, "echo hello world") + } + if result.SandboxID != "sbx-1" { + t.Errorf("SandboxID = %q, want %q", result.SandboxID, "sbx-1") + } + if storedCmd == nil { + t.Fatal("CreateCommandFn was not called") + } + if storedCmd.Stdout != "hello world\n" { + t.Errorf("stored Stdout = %q, want %q", storedCmd.Stdout, "hello world\n") + } +} + +func TestRunCommand_SenderError(t *testing.T) { + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, id string) (*store.Sandbox, error) { + return &store.Sandbox{ID: id, HostID: "host-1"}, nil + }, + } + + sender := &mockSender{ + SendAndWaitFn: func(_ context.Context, _ string, _ *fluidv1.ControlMessage, _ time.Duration) (*fluidv1.HostMessage, error) { + return nil, fmt.Errorf("host unreachable") + }, + } + + orch := newTestOrchestrator(ms, sender) + _, err := orch.RunCommand(context.Background(), "org-1", "sbx-1", "ls", 30) + if err == nil { + t.Fatal("RunCommand: expected error from sender") + } + if !strings.Contains(err.Error(), "host unreachable") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "host unreachable") + } +} + +func TestStartSandbox_Success(t *testing.T) { + var updatedSandbox *store.Sandbox + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, id string) (*store.Sandbox, error) { + return &store.Sandbox{ID: id, HostID: "host-1", State: store.SandboxStateStopped}, nil + }, + UpdateSandboxFn: func(_ context.Context, s *store.Sandbox) error { + updatedSandbox = s + return nil + }, + } + + sender := &mockSender{ + SendAndWaitFn: func(_ context.Context, hostID string, msg *fluidv1.ControlMessage, _ time.Duration) (*fluidv1.HostMessage, error) { + if hostID != "host-1" { + t.Errorf("hostID = %q, want %q", hostID, "host-1") + } + return &fluidv1.HostMessage{ + RequestId: msg.GetRequestId(), + Payload: &fluidv1.HostMessage_SandboxStarted{ + SandboxStarted: &fluidv1.SandboxStarted{ + SandboxId: msg.GetStartSandbox().GetSandboxId(), + State: "RUNNING", + IpAddress: "10.0.0.10", + }, + }, + }, nil + }, + } + + orch := newTestOrchestrator(ms, sender) + err := orch.StartSandbox(context.Background(), "org-1", "sbx-1") + if err != nil { + t.Fatalf("StartSandbox: unexpected error: %v", err) + } + if updatedSandbox == nil { + t.Fatal("UpdateSandboxFn was not called") + } + if updatedSandbox.State != store.SandboxStateRunning { + t.Errorf("State = %q, want %q", updatedSandbox.State, store.SandboxStateRunning) + } + if updatedSandbox.IPAddress != "10.0.0.10" { + t.Errorf("IPAddress = %q, want %q", updatedSandbox.IPAddress, "10.0.0.10") + } +} + +func TestStartSandbox_NotFound(t *testing.T) { + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, _ string) (*store.Sandbox, error) { + return nil, store.ErrNotFound + }, + } + + orch := newTestOrchestrator(ms, &mockSender{}) + err := orch.StartSandbox(context.Background(), "org-1", "nonexistent") + if err == nil { + t.Fatal("StartSandbox: expected error for nonexistent sandbox") + } +} + +func TestStopSandbox_Success(t *testing.T) { + var updatedSandbox *store.Sandbox + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, id string) (*store.Sandbox, error) { + return &store.Sandbox{ID: id, HostID: "host-1", State: store.SandboxStateRunning}, nil + }, + UpdateSandboxFn: func(_ context.Context, s *store.Sandbox) error { + updatedSandbox = s + return nil + }, + } + + sender := &mockSender{ + SendAndWaitFn: func(_ context.Context, hostID string, msg *fluidv1.ControlMessage, _ time.Duration) (*fluidv1.HostMessage, error) { + if hostID != "host-1" { + t.Errorf("hostID = %q, want %q", hostID, "host-1") + } + return &fluidv1.HostMessage{ + RequestId: msg.GetRequestId(), + Payload: &fluidv1.HostMessage_SandboxStopped{ + SandboxStopped: &fluidv1.SandboxStopped{ + SandboxId: msg.GetStopSandbox().GetSandboxId(), + State: "STOPPED", + }, + }, + }, nil + }, + } + + orch := newTestOrchestrator(ms, sender) + err := orch.StopSandbox(context.Background(), "org-1", "sbx-1") + if err != nil { + t.Fatalf("StopSandbox: unexpected error: %v", err) + } + if updatedSandbox == nil { + t.Fatal("UpdateSandboxFn was not called") + } + if updatedSandbox.State != store.SandboxStateStopped { + t.Errorf("State = %q, want %q", updatedSandbox.State, store.SandboxStateStopped) + } +} + +func TestStopSandbox_SenderError(t *testing.T) { + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, id string) (*store.Sandbox, error) { + return &store.Sandbox{ID: id, HostID: "host-1"}, nil + }, + } + + sender := &mockSender{ + SendAndWaitFn: func(_ context.Context, _ string, _ *fluidv1.ControlMessage, _ time.Duration) (*fluidv1.HostMessage, error) { + return nil, fmt.Errorf("stream closed") + }, + } + + orch := newTestOrchestrator(ms, sender) + err := orch.StopSandbox(context.Background(), "org-1", "sbx-1") + if err == nil { + t.Fatal("StopSandbox: expected error from sender") + } + if !strings.Contains(err.Error(), "stream closed") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "stream closed") + } +} + +func TestCreateSnapshot_Success(t *testing.T) { + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, id string) (*store.Sandbox, error) { + return &store.Sandbox{ID: id, HostID: "host-1", State: store.SandboxStateRunning}, nil + }, + } + + sender := &mockSender{ + SendAndWaitFn: func(_ context.Context, hostID string, msg *fluidv1.ControlMessage, _ time.Duration) (*fluidv1.HostMessage, error) { + if hostID != "host-1" { + t.Errorf("hostID = %q, want %q", hostID, "host-1") + } + return &fluidv1.HostMessage{ + RequestId: msg.GetRequestId(), + Payload: &fluidv1.HostMessage_SnapshotCreated{ + SnapshotCreated: &fluidv1.SnapshotCreated{ + SnapshotId: "snap-abc123", + SnapshotName: "before-deploy", + }, + }, + }, nil + }, + } + + orch := newTestOrchestrator(ms, sender) + result, err := orch.CreateSnapshot(context.Background(), "org-1", "sbx-1", "before-deploy") + if err != nil { + t.Fatalf("CreateSnapshot: unexpected error: %v", err) + } + if result.SnapshotID != "snap-abc123" { + t.Errorf("SnapshotID = %q, want %q", result.SnapshotID, "snap-abc123") + } + if result.SnapshotName != "before-deploy" { + t.Errorf("SnapshotName = %q, want %q", result.SnapshotName, "before-deploy") + } + if result.SandboxID != "sbx-1" { + t.Errorf("SandboxID = %q, want %q", result.SandboxID, "sbx-1") + } + if result.CreatedAt.IsZero() { + t.Error("CreatedAt should not be zero") + } +} + +func TestCreateSnapshot_SenderError(t *testing.T) { + ms := &mockStore{ + GetSandboxFn: func(_ context.Context, id string) (*store.Sandbox, error) { + return &store.Sandbox{ID: id, HostID: "host-1"}, nil + }, + } + + sender := &mockSender{ + SendAndWaitFn: func(_ context.Context, _ string, _ *fluidv1.ControlMessage, _ time.Duration) (*fluidv1.HostMessage, error) { + return nil, fmt.Errorf("snapshot failed") + }, + } + + orch := newTestOrchestrator(ms, sender) + _, err := orch.CreateSnapshot(context.Background(), "org-1", "sbx-1", "my-snap") + if err == nil { + t.Fatal("CreateSnapshot: expected error from sender") + } + if !strings.Contains(err.Error(), "snapshot failed") { + t.Errorf("error = %q, want it to contain %q", err.Error(), "snapshot failed") + } +} diff --git a/api/internal/orchestrator/placement.go b/api/internal/orchestrator/placement.go new file mode 100644 index 00000000..f0191fb2 --- /dev/null +++ b/api/internal/orchestrator/placement.go @@ -0,0 +1,117 @@ +package orchestrator + +import ( + "fmt" + "time" + + "github.com/aspectrr/fluid.sh/api/internal/registry" +) + +// SelectHost picks the best connected host for a sandbox that needs the given +// base image. Filters by image availability, resources, and health. +func SelectHost(reg *registry.Registry, baseImage, orgID string, heartbeatTimeout time.Duration, requiredCPUs int32, requiredMemoryMB int32) (registry.ConnectedHost, error) { + hosts := reg.ListConnectedByOrg(orgID) + if len(hosts) == 0 { + return registry.ConnectedHost{}, fmt.Errorf("no connected hosts") + } + + now := time.Now() + var best *registry.ConnectedHost + + for i := range hosts { + h := &hosts[i] + if h.Registration == nil { + continue + } + + if !hostHasImage(*h, baseImage) { + continue + } + + if h.Registration.GetAvailableCpus() < int32(requiredCPUs) { + continue + } + if h.Registration.GetAvailableMemoryMb() < int64(requiredMemoryMB) { + continue + } + + if now.Sub(h.LastHeartbeat) > heartbeatTimeout { + continue + } + + if best == nil || hostScore(*h) > hostScore(*best) { + best = h + } + } + + if best == nil { + return registry.ConnectedHost{}, fmt.Errorf("no healthy host with image %q and sufficient resources", baseImage) + } + + return *best, nil +} + +// SelectHostForSourceVM picks a connected host that has the given source VM. +// When requiredCPUs or requiredMemoryMB are non-zero, hosts without sufficient +// resources are skipped (used as CreateSandbox fallback). +func SelectHostForSourceVM(reg *registry.Registry, vmName, orgID string, heartbeatTimeout time.Duration, requiredCPUs int32, requiredMemoryMB int32) (registry.ConnectedHost, error) { + hosts := reg.ListConnectedByOrg(orgID) + if len(hosts) == 0 { + return registry.ConnectedHost{}, fmt.Errorf("no connected hosts") + } + + now := time.Now() + var best *registry.ConnectedHost + + for i := range hosts { + h := &hosts[i] + if h.Registration == nil { + continue + } + + if now.Sub(h.LastHeartbeat) > heartbeatTimeout { + continue + } + + if requiredCPUs > 0 && h.Registration.GetAvailableCpus() < requiredCPUs { + continue + } + if requiredMemoryMB > 0 && h.Registration.GetAvailableMemoryMb() < int64(requiredMemoryMB) { + continue + } + + hasVM := false + for _, vm := range h.Registration.GetSourceVms() { + if vm.GetName() == vmName { + hasVM = true + break + } + } + if !hasVM { + continue + } + + if best == nil || hostScore(*h) > hostScore(*best) { + best = h + } + } + + if best == nil { + return registry.ConnectedHost{}, fmt.Errorf("no connected host has source VM %q", vmName) + } + return *best, nil +} + +// hostScore computes a placement score considering both memory and CPU. +func hostScore(h registry.ConnectedHost) float64 { + return float64(h.Registration.GetAvailableMemoryMb()) + float64(h.Registration.GetAvailableCpus())*1024 +} + +func hostHasImage(h registry.ConnectedHost, baseImage string) bool { + for _, img := range h.Registration.GetBaseImages() { + if img == baseImage { + return true + } + } + return false +} diff --git a/api/internal/orchestrator/placement_test.go b/api/internal/orchestrator/placement_test.go new file mode 100644 index 00000000..171aa356 --- /dev/null +++ b/api/internal/orchestrator/placement_test.go @@ -0,0 +1,370 @@ +package orchestrator + +import ( + "testing" + "time" + + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" + + "github.com/aspectrr/fluid.sh/api/internal/registry" +) + +// mockStream implements registry.HostStream for testing. +type mockStream struct{} + +func (m *mockStream) Send(_ *fluidv1.ControlMessage) error { return nil } + +func newRegistryWithHost(t *testing.T, hostID, orgID string, reg *fluidv1.HostRegistration) *registry.Registry { + t.Helper() + r := registry.New() + if err := r.Register(hostID, orgID, hostID+"-hostname", &mockStream{}); err != nil { + t.Fatalf("Register(%s): %v", hostID, err) + } + if reg != nil { + r.SetRegistration(hostID, reg) + } + return r +} + +func TestSelectHost_NoHosts(t *testing.T) { + r := registry.New() + _, err := SelectHost(r, "ubuntu-22.04", "org-1", 90*time.Second, 2, 2048) + if err == nil { + t.Fatal("SelectHost: expected error when no hosts are connected") + } +} + +func TestSelectHost_MatchingImage(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + BaseImages: []string{"ubuntu-22.04", "debian-12"}, + AvailableCpus: 4, + AvailableMemoryMb: 8192, + }) + + h, err := SelectHost(r, "ubuntu-22.04", "org-1", 90*time.Second, 2, 2048) + if err != nil { + t.Fatalf("SelectHost: unexpected error: %v", err) + } + if h.HostID != "host-1" { + t.Errorf("HostID = %q, want %q", h.HostID, "host-1") + } +} + +func TestSelectHost_NoMatchingImage(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + BaseImages: []string{"centos-9"}, + AvailableCpus: 4, + AvailableMemoryMb: 8192, + }) + + _, err := SelectHost(r, "ubuntu-22.04", "org-1", 90*time.Second, 2, 2048) + if err == nil { + t.Fatal("SelectHost: expected error when no host has the requested image") + } +} + +func TestSelectHost_InsufficientCPU(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + BaseImages: []string{"ubuntu-22.04"}, + AvailableCpus: 0, // No CPUs available. + AvailableMemoryMb: 8192, + }) + + _, err := SelectHost(r, "ubuntu-22.04", "org-1", 90*time.Second, 2, 2048) + if err == nil { + t.Fatal("SelectHost: expected error when host has insufficient CPUs") + } +} + +func TestSelectHost_InsufficientMemory(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + BaseImages: []string{"ubuntu-22.04"}, + AvailableCpus: 4, + AvailableMemoryMb: 256, // Below required 2048. + }) + + _, err := SelectHost(r, "ubuntu-22.04", "org-1", 90*time.Second, 2, 2048) + if err == nil { + t.Fatal("SelectHost: expected error when host has insufficient memory") + } +} + +func TestSelectHost_StaleHeartbeat(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + BaseImages: []string{"ubuntu-22.04"}, + AvailableCpus: 4, + AvailableMemoryMb: 8192, + }) + + // Use a heartbeat timeout of 1ms and sleep to ensure staleness. + time.Sleep(5 * time.Millisecond) + + _, err := SelectHost(r, "ubuntu-22.04", "org-1", time.Millisecond, 2, 2048) + if err == nil { + t.Fatal("SelectHost: expected error when host has stale heartbeat") + } +} + +func TestSelectHost_NilRegistration(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + // No SetRegistration - Registration is nil. + + _, err := SelectHost(r, "ubuntu-22.04", "org-1", 90*time.Second, 2, 2048) + if err == nil { + t.Fatal("SelectHost: expected error when host has nil registration") + } +} + +func TestSelectHost_PicksHighestMemory(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + BaseImages: []string{"ubuntu-22.04"}, + AvailableCpus: 4, + AvailableMemoryMb: 4096, + }) + _ = r.Register("host-2", "org-1", "h2", &mockStream{}) + r.SetRegistration("host-2", &fluidv1.HostRegistration{ + BaseImages: []string{"ubuntu-22.04"}, + AvailableCpus: 4, + AvailableMemoryMb: 16384, + }) + + h, err := SelectHost(r, "ubuntu-22.04", "org-1", 90*time.Second, 2, 2048) + if err != nil { + t.Fatalf("SelectHost: unexpected error: %v", err) + } + if h.HostID != "host-2" { + t.Errorf("HostID = %q, want %q (host with more memory)", h.HostID, "host-2") + } +} + +func TestSelectHost_FiltersByOrg(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + BaseImages: []string{"ubuntu-22.04"}, + AvailableCpus: 4, + AvailableMemoryMb: 8192, + }) + + _, err := SelectHost(r, "ubuntu-22.04", "org-other", 90*time.Second, 2, 2048) + if err == nil { + t.Fatal("SelectHost: expected error when no hosts belong to the org") + } +} + +func TestSelectHostForSourceVM_Success(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + SourceVms: []*fluidv1.SourceVMInfo{ + {Name: "web-server", State: "running"}, + {Name: "db-server", State: "stopped"}, + }, + AvailableCpus: 4, + AvailableMemoryMb: 8192, + }) + + h, err := SelectHostForSourceVM(r, "web-server", "org-1", 90*time.Second, 0, 0) + if err != nil { + t.Fatalf("SelectHostForSourceVM: unexpected error: %v", err) + } + if h.HostID != "host-1" { + t.Errorf("HostID = %q, want %q", h.HostID, "host-1") + } +} + +func TestSelectHostForSourceVM_NoMatch(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + SourceVms: []*fluidv1.SourceVMInfo{ + {Name: "web-server"}, + }, + }) + + _, err := SelectHostForSourceVM(r, "nonexistent-vm", "org-1", 90*time.Second, 0, 0) + if err == nil { + t.Fatal("SelectHostForSourceVM: expected error when no host has the source VM") + } +} + +func TestSelectHostForSourceVM_NoHosts(t *testing.T) { + r := registry.New() + _, err := SelectHostForSourceVM(r, "web-server", "org-1", 90*time.Second, 0, 0) + if err == nil { + t.Fatal("SelectHostForSourceVM: expected error when no hosts are connected") + } +} + +func TestSelectHostForSourceVM_StaleHeartbeat(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + SourceVms: []*fluidv1.SourceVMInfo{ + {Name: "web-server"}, + }, + }) + + // Use a heartbeat timeout of 1ms and sleep to ensure staleness. + time.Sleep(5 * time.Millisecond) + + _, err := SelectHostForSourceVM(r, "web-server", "org-1", time.Millisecond, 0, 0) + if err == nil { + t.Fatal("SelectHostForSourceVM: expected error when host heartbeat is stale") + } +} + +func TestSelectHostForSourceVM_NilRegistration(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + // No SetRegistration. + + _, err := SelectHostForSourceVM(r, "web-server", "org-1", 90*time.Second, 0, 0) + if err == nil { + t.Fatal("SelectHostForSourceVM: expected error when host has nil registration") + } +} + +func TestSelectHostForSourceVM_FiltersByOrg(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + SourceVms: []*fluidv1.SourceVMInfo{ + {Name: "web-server"}, + }, + }) + + _, err := SelectHostForSourceVM(r, "web-server", "org-other", 90*time.Second, 0, 0) + if err == nil { + t.Fatal("SelectHostForSourceVM: expected error when no hosts belong to the org") + } +} + +// TestSelectHost_FallbackToSourceVM verifies the orchestrator-level fallback: +// SelectHost fails (no matching base image) but SelectHostForSourceVM succeeds +// (host has the source VM). +func TestSelectHost_FallbackToSourceVM(t *testing.T) { + r := registry.New() + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + BaseImages: []string{"centos-9"}, // Does NOT match "web-server" + AvailableCpus: 4, + AvailableMemoryMb: 8192, + SourceVms: []*fluidv1.SourceVMInfo{ + {Name: "web-server", State: "running"}, + }, + }) + + // SelectHost should fail because base image "web-server" is not in BaseImages + _, err := SelectHost(r, "web-server", "org-1", 90*time.Second, 2, 2048) + if err == nil { + t.Fatal("SelectHost: expected error when base image doesn't match") + } + + // But SelectHostForSourceVM should succeed because host has the source VM + h, err := SelectHostForSourceVM(r, "web-server", "org-1", 90*time.Second, 0, 0) + if err != nil { + t.Fatalf("SelectHostForSourceVM: unexpected error: %v", err) + } + if h.HostID != "host-1" { + t.Errorf("HostID = %q, want %q", h.HostID, "host-1") + } +} + +func TestSelectHostForSourceVM_PicksBestScore(t *testing.T) { + r := registry.New() + // Host 1: lower resources + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + SourceVms: []*fluidv1.SourceVMInfo{ + {Name: "web-server", State: "running"}, + }, + AvailableCpus: 2, + AvailableMemoryMb: 4096, + }) + // Host 2: higher resources, same source VM + _ = r.Register("host-2", "org-1", "h2", &mockStream{}) + r.SetRegistration("host-2", &fluidv1.HostRegistration{ + SourceVms: []*fluidv1.SourceVMInfo{ + {Name: "web-server", State: "running"}, + }, + AvailableCpus: 8, + AvailableMemoryMb: 16384, + }) + + h, err := SelectHostForSourceVM(r, "web-server", "org-1", 90*time.Second, 0, 0) + if err != nil { + t.Fatalf("SelectHostForSourceVM: unexpected error: %v", err) + } + // host-1 score: 4096 + 2*1024 = 6144 + // host-2 score: 16384 + 8*1024 = 24576 + if h.HostID != "host-2" { + t.Errorf("HostID = %q, want %q (host with higher score)", h.HostID, "host-2") + } +} + +func TestSelectHost_ScorePrefersCPUAndMemory(t *testing.T) { + r := registry.New() + // Host 1: more memory but fewer CPUs + _ = r.Register("host-1", "org-1", "h1", &mockStream{}) + r.SetRegistration("host-1", &fluidv1.HostRegistration{ + BaseImages: []string{"ubuntu-22.04"}, + AvailableCpus: 2, + AvailableMemoryMb: 16384, + }) + // Host 2: fewer memory but more CPUs + _ = r.Register("host-2", "org-1", "h2", &mockStream{}) + r.SetRegistration("host-2", &fluidv1.HostRegistration{ + BaseImages: []string{"ubuntu-22.04"}, + AvailableCpus: 16, + AvailableMemoryMb: 4096, + }) + + // host-1 score: 16384 + 2*1024 = 18432 + // host-2 score: 4096 + 16*1024 = 20480 + h, err := SelectHost(r, "ubuntu-22.04", "org-1", 90*time.Second, 2, 2048) + if err != nil { + t.Fatalf("SelectHost: unexpected error: %v", err) + } + if h.HostID != "host-2" { + t.Errorf("HostID = %q, want %q (host with higher combined score)", h.HostID, "host-2") + } +} + +func TestSelectHost_EqualScorePicksFirst(t *testing.T) { + r := registry.New() + _ = r.Register("host-a", "org-1", "ha", &mockStream{}) + r.SetRegistration("host-a", &fluidv1.HostRegistration{ + BaseImages: []string{"ubuntu-22.04"}, + AvailableCpus: 4, + AvailableMemoryMb: 8192, + }) + _ = r.Register("host-b", "org-1", "hb", &mockStream{}) + r.SetRegistration("host-b", &fluidv1.HostRegistration{ + BaseImages: []string{"ubuntu-22.04"}, + AvailableCpus: 4, + AvailableMemoryMb: 8192, + }) + + h, err := SelectHost(r, "ubuntu-22.04", "org-1", 90*time.Second, 2, 2048) + if err != nil { + t.Fatalf("SelectHost: unexpected error: %v", err) + } + // With equal scores, either host is acceptable - just verify no error. + if h.HostID != "host-a" && h.HostID != "host-b" { + t.Errorf("HostID = %q, want one of host-a or host-b", h.HostID) + } +} diff --git a/api/internal/orchestrator/types.go b/api/internal/orchestrator/types.go new file mode 100644 index 00000000..65e14610 --- /dev/null +++ b/api/internal/orchestrator/types.go @@ -0,0 +1,105 @@ +package orchestrator + +import "time" + +// CreateSandboxRequest is the request for creating a sandbox. +type CreateSandboxRequest struct { + OrgID string `json:"org_id"` + AgentID string `json:"agent_id"` + SourceVM string `json:"source_vm"` + Name string `json:"name"` + VCPUs int `json:"vcpus,omitempty"` + MemoryMB int `json:"memory_mb,omitempty"` + TTLSeconds int `json:"ttl_seconds,omitempty"` + Network string `json:"network,omitempty"` + SourceHostID string `json:"source_host_id,omitempty"` + Live bool `json:"live,omitempty"` +} + +// DiscoveredHost is a host discovered from SSH config parsing + probing. +type DiscoveredHost struct { + Name string `json:"name"` + Hostname string `json:"hostname"` + User string `json:"user"` + Port int `json:"port"` + IdentityFile string `json:"identity_file"` + Reachable bool `json:"reachable"` + HasLibvirt bool `json:"has_libvirt"` + HasProxmox bool `json:"has_proxmox"` + VMs []string `json:"vms,omitempty"` + Error string `json:"error,omitempty"` +} + +// RunCommandRequest is the request for running a command in a sandbox. +type RunCommandRequest struct { + Command string `json:"command"` + TimeoutSec int `json:"timeout_seconds,omitempty"` + Env map[string]string `json:"env,omitempty"` +} + +// SnapshotRequest is the request for creating a snapshot. +type SnapshotRequest struct { + Name string `json:"name"` +} + +// SnapshotResponse is returned after creating a snapshot. +type SnapshotResponse struct { + SnapshotID string `json:"snapshot_id"` + SandboxID string `json:"sandbox_id"` + SnapshotName string `json:"snapshot_name"` + CreatedAt time.Time `json:"created_at"` +} + +// HostInfo is the REST representation of a connected host. +type HostInfo struct { + HostID string `json:"host_id"` + Hostname string `json:"hostname"` + Status string `json:"status"` + ActiveSandboxes int `json:"active_sandboxes"` + AvailableCPUs int32 `json:"available_cpus"` + AvailableMemMB int64 `json:"available_memory_mb"` + AvailableDiskMB int64 `json:"available_disk_mb"` + BaseImages []string `json:"base_images"` + LastHeartbeat string `json:"last_heartbeat"` +} + +// VMInfo is the REST representation of a source VM. +type VMInfo struct { + Name string `json:"name"` + State string `json:"state"` + IPAddress string `json:"ip_address,omitempty"` + Prepared bool `json:"prepared"` + HostID string `json:"host_id,omitempty"` +} + +// PrepareRequest is the request for preparing a source VM. +type PrepareRequest struct { + SSHUser string `json:"ssh_user"` + SSHKeyPath string `json:"ssh_key_path"` +} + +// RunSourceRequest is the request for running a command on a source VM. +type RunSourceRequest struct { + Command string `json:"command"` + TimeoutSec int `json:"timeout_seconds,omitempty"` +} + +// ReadSourceRequest is the request for reading a file from a source VM. +type ReadSourceRequest struct { + Path string `json:"path"` +} + +// SourceCommandResult is the response for a source VM command. +type SourceCommandResult struct { + SourceVM string `json:"source_vm"` + ExitCode int `json:"exit_code"` + Stdout string `json:"stdout"` + Stderr string `json:"stderr"` +} + +// SourceFileResult is the response for reading a source VM file. +type SourceFileResult struct { + SourceVM string `json:"source_vm"` + Path string `json:"path"` + Content string `json:"content"` +} diff --git a/api/internal/registry/registry.go b/api/internal/registry/registry.go new file mode 100644 index 00000000..6b5ddbe1 --- /dev/null +++ b/api/internal/registry/registry.go @@ -0,0 +1,163 @@ +package registry + +import ( + "fmt" + "sync" + "time" + + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" +) + +// HostStream is the interface for sending control messages to a connected host. +type HostStream interface { + Send(msg *fluidv1.ControlMessage) error +} + +// ConnectedHost represents a sandbox host that is actively connected via gRPC. +type ConnectedHost struct { + HostID string + OrgID string + Hostname string + Stream HostStream + LastHeartbeat time.Time + Registration *fluidv1.HostRegistration + ActiveSandboxes int32 + SourceVMCount int32 +} + +// Registry tracks all currently connected sandbox hosts in memory. +type Registry struct { + mu sync.RWMutex + hosts map[string]*ConnectedHost +} + +// New creates an empty host registry. +func New() *Registry { + return &Registry{ + hosts: make(map[string]*ConnectedHost), + } +} + +// Register adds or replaces a connected host in the registry. +func (r *Registry) Register(hostID, orgID, hostname string, stream HostStream) error { + if hostID == "" { + return fmt.Errorf("host ID must not be empty") + } + if stream == nil { + return fmt.Errorf("stream must not be nil") + } + + r.mu.Lock() + defer r.mu.Unlock() + + r.hosts[hostID] = &ConnectedHost{ + HostID: hostID, + OrgID: orgID, + Hostname: hostname, + Stream: stream, + LastHeartbeat: time.Now(), + } + return nil +} + +// Unregister removes a host from the registry. +func (r *Registry) Unregister(hostID string) { + r.mu.Lock() + defer r.mu.Unlock() + delete(r.hosts, hostID) +} + +// GetHost returns a value copy of the connected host for the given ID, if present. +func (r *Registry) GetHost(hostID string) (ConnectedHost, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + h, ok := r.hosts[hostID] + if !ok { + return ConnectedHost{}, false + } + return *h, ok +} + +// ListConnected returns value copies of all currently connected hosts. +func (r *Registry) ListConnected() []ConnectedHost { + r.mu.RLock() + defer r.mu.RUnlock() + + result := make([]ConnectedHost, 0, len(r.hosts)) + for _, h := range r.hosts { + result = append(result, *h) + } + return result +} + +// ListConnectedByOrg returns value copies of connected hosts belonging to the given org. +func (r *Registry) ListConnectedByOrg(orgID string) []ConnectedHost { + r.mu.RLock() + defer r.mu.RUnlock() + + var result []ConnectedHost + for _, h := range r.hosts { + if h.OrgID == orgID { + result = append(result, *h) + } + } + return result +} + +// SetRegistration updates the registration info and heartbeat for a host. +func (r *Registry) SetRegistration(hostID string, reg *fluidv1.HostRegistration) { + r.mu.Lock() + defer r.mu.Unlock() + if h, ok := r.hosts[hostID]; ok { + h.Registration = reg + h.LastHeartbeat = time.Now() + } +} + +// UpdateHeartbeat records the latest heartbeat time for a connected host. +func (r *Registry) UpdateHeartbeat(hostID string) { + r.mu.Lock() + defer r.mu.Unlock() + if h, ok := r.hosts[hostID]; ok { + h.LastHeartbeat = time.Now() + } +} + +// UpdateResources updates the in-memory available CPU and memory for a host. +func (r *Registry) UpdateResources(hostID string, cpus int32, memMB int64) { + r.mu.Lock() + defer r.mu.Unlock() + h, ok := r.hosts[hostID] + if !ok { + return + } + if h.Registration != nil { + h.Registration.AvailableCpus = cpus + h.Registration.AvailableMemoryMb = memMB + } +} + +// UpdateHeartbeatCounts updates the per-host sandbox and source VM counts from a heartbeat. +func (r *Registry) UpdateHeartbeatCounts(hostID string, activeSandboxes, sourceVMCount int32) { + r.mu.Lock() + defer r.mu.Unlock() + if h, ok := r.hosts[hostID]; ok { + h.LastHeartbeat = time.Now() + h.ActiveSandboxes = activeSandboxes + h.SourceVMCount = sourceVMCount + } +} + +// OrgResourceCounts returns aggregated resource counts for an org across all connected hosts. +func (r *Registry) OrgResourceCounts(orgID string) (sandboxes, sourceVMs, daemons int) { + r.mu.RLock() + defer r.mu.RUnlock() + for _, h := range r.hosts { + if h.OrgID == orgID { + sandboxes += int(h.ActiveSandboxes) + sourceVMs += int(h.SourceVMCount) + daemons++ + } + } + return +} diff --git a/api/internal/registry/registry_test.go b/api/internal/registry/registry_test.go new file mode 100644 index 00000000..9d8cde5c --- /dev/null +++ b/api/internal/registry/registry_test.go @@ -0,0 +1,246 @@ +package registry + +import ( + "sync" + "testing" + "time" + + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" +) + +// mockStream implements HostStream for testing. +type mockStream struct{} + +func (m *mockStream) Send(_ *fluidv1.ControlMessage) error { return nil } + +func TestRegister(t *testing.T) { + reg := New() + s := &mockStream{} + + if err := reg.Register("host-1", "org-1", "myhost", s); err != nil { + t.Fatalf("Register: unexpected error: %v", err) + } + + h, ok := reg.GetHost("host-1") + if !ok { + t.Fatal("GetHost: expected host to be found") + } + if h.HostID != "host-1" { + t.Errorf("HostID = %q, want %q", h.HostID, "host-1") + } + if h.OrgID != "org-1" { + t.Errorf("OrgID = %q, want %q", h.OrgID, "org-1") + } + if h.Hostname != "myhost" { + t.Errorf("Hostname = %q, want %q", h.Hostname, "myhost") + } + if h.Stream == nil { + t.Error("Stream: expected non-nil") + } +} + +func TestRegister_EmptyHostID(t *testing.T) { + reg := New() + err := reg.Register("", "org-1", "myhost", &mockStream{}) + if err == nil { + t.Fatal("Register: expected error for empty host ID") + } +} + +func TestRegister_NilStream(t *testing.T) { + reg := New() + err := reg.Register("host-1", "org-1", "myhost", nil) + if err == nil { + t.Fatal("Register: expected error for nil stream") + } +} + +func TestUnregister(t *testing.T) { + reg := New() + _ = reg.Register("host-1", "org-1", "myhost", &mockStream{}) + + reg.Unregister("host-1") + + _, ok := reg.GetHost("host-1") + if ok { + t.Fatal("GetHost: expected host to be removed after Unregister") + } +} + +func TestUnregister_Nonexistent(t *testing.T) { + reg := New() + // Should not panic. + reg.Unregister("nonexistent") +} + +func TestListConnected(t *testing.T) { + reg := New() + _ = reg.Register("host-1", "org-1", "h1", &mockStream{}) + _ = reg.Register("host-2", "org-1", "h2", &mockStream{}) + _ = reg.Register("host-3", "org-2", "h3", &mockStream{}) + + hosts := reg.ListConnected() + if len(hosts) != 3 { + t.Fatalf("ListConnected: got %d hosts, want 3", len(hosts)) + } +} + +func TestListConnectedByOrg(t *testing.T) { + reg := New() + _ = reg.Register("host-1", "org-1", "h1", &mockStream{}) + _ = reg.Register("host-2", "org-1", "h2", &mockStream{}) + _ = reg.Register("host-3", "org-2", "h3", &mockStream{}) + + hosts := reg.ListConnectedByOrg("org-1") + if len(hosts) != 2 { + t.Fatalf("ListConnectedByOrg(org-1): got %d hosts, want 2", len(hosts)) + } + + hosts = reg.ListConnectedByOrg("org-2") + if len(hosts) != 1 { + t.Fatalf("ListConnectedByOrg(org-2): got %d hosts, want 1", len(hosts)) + } + + hosts = reg.ListConnectedByOrg("org-none") + if len(hosts) != 0 { + t.Fatalf("ListConnectedByOrg(org-none): got %d hosts, want 0", len(hosts)) + } +} + +func TestSetRegistration(t *testing.T) { + reg := New() + _ = reg.Register("host-1", "org-1", "h1", &mockStream{}) + + regData := &fluidv1.HostRegistration{ + Hostname: "updated-host", + AvailableCpus: 8, + AvailableMemoryMb: 16384, + AvailableDiskMb: 102400, + BaseImages: []string{"ubuntu-22.04", "debian-12"}, + SourceVms: []*fluidv1.SourceVMInfo{ + {Name: "web-server", State: "running"}, + }, + Bridges: []*fluidv1.BridgeInfo{ + {Name: "br0", Subnet: "10.0.0.0/24"}, + }, + } + + beforeSet := time.Now() + reg.SetRegistration("host-1", regData) + + h, ok := reg.GetHost("host-1") + if !ok { + t.Fatal("GetHost: host not found after SetRegistration") + } + if h.Registration == nil { + t.Fatal("Registration: expected non-nil after SetRegistration") + } + if h.Registration.GetAvailableCpus() != 8 { + t.Errorf("AvailableCpus = %d, want 8", h.Registration.GetAvailableCpus()) + } + if h.Registration.GetAvailableMemoryMb() != 16384 { + t.Errorf("AvailableMemoryMb = %d, want 16384", h.Registration.GetAvailableMemoryMb()) + } + if len(h.Registration.GetBaseImages()) != 2 { + t.Errorf("BaseImages: got %d, want 2", len(h.Registration.GetBaseImages())) + } + if len(h.Registration.GetSourceVms()) != 1 { + t.Errorf("SourceVms: got %d, want 1", len(h.Registration.GetSourceVms())) + } + if len(h.Registration.GetBridges()) != 1 { + t.Errorf("Bridges: got %d, want 1", len(h.Registration.GetBridges())) + } + // SetRegistration also updates heartbeat. + if h.LastHeartbeat.Before(beforeSet) { + t.Error("LastHeartbeat should have been updated by SetRegistration") + } +} + +func TestSetRegistration_NonexistentHost(t *testing.T) { + reg := New() + // Should not panic. + reg.SetRegistration("nonexistent", &fluidv1.HostRegistration{}) +} + +func TestUpdateHeartbeat(t *testing.T) { + reg := New() + _ = reg.Register("host-1", "org-1", "h1", &mockStream{}) + + h, _ := reg.GetHost("host-1") + originalHB := h.LastHeartbeat + + // Small sleep to ensure time difference. + time.Sleep(10 * time.Millisecond) + reg.UpdateHeartbeat("host-1") + + h, _ = reg.GetHost("host-1") + if !h.LastHeartbeat.After(originalHB) { + t.Error("LastHeartbeat should be newer after UpdateHeartbeat") + } +} + +func TestUpdateHeartbeat_NonexistentHost(t *testing.T) { + reg := New() + // Should not panic. + reg.UpdateHeartbeat("nonexistent") +} + +func TestConcurrentAccess(t *testing.T) { + reg := New() + var wg sync.WaitGroup + const n = 100 + + // Register concurrently. + for i := 0; i < n; i++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + hostID := "host-" + string(rune('A'+id%26)) + string(rune('0'+id%10)) + _ = reg.Register(hostID, "org-1", "h", &mockStream{}) + }(i) + } + wg.Wait() + + // Read concurrently. + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + defer wg.Done() + reg.ListConnected() + }() + } + wg.Wait() + + // Unregister concurrently. + hosts := reg.ListConnected() + for _, h := range hosts { + wg.Add(1) + go func(id string) { + defer wg.Done() + reg.Unregister(id) + }(h.HostID) + } + wg.Wait() + + remaining := reg.ListConnected() + if len(remaining) != 0 { + t.Errorf("after unregister all: got %d hosts, want 0", len(remaining)) + } +} + +func TestRegister_ReplacesExisting(t *testing.T) { + reg := New() + s1 := &mockStream{} + s2 := &mockStream{} + + _ = reg.Register("host-1", "org-1", "h1-old", s1) + _ = reg.Register("host-1", "org-1", "h1-new", s2) + + h, ok := reg.GetHost("host-1") + if !ok { + t.Fatal("GetHost: expected host to exist") + } + if h.Hostname != "h1-new" { + t.Errorf("Hostname = %q, want %q (should be replaced)", h.Hostname, "h1-new") + } +} diff --git a/api/internal/rest/agent_handlers.go b/api/internal/rest/agent_handlers.go new file mode 100644 index 00000000..92864296 --- /dev/null +++ b/api/internal/rest/agent_handlers.go @@ -0,0 +1,162 @@ +package rest + +// Agent handlers - commented out, not yet ready for integration. + +/* +import ( + "errors" + "fmt" + "net/http" + + "github.com/go-chi/chi/v5" + + "github.com/aspectrr/fluid.sh/api/internal/agent" + "github.com/aspectrr/fluid.sh/api/internal/auth" + serverError "github.com/aspectrr/fluid.sh/api/internal/error" + serverJSON "github.com/aspectrr/fluid.sh/api/internal/json" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +// handleAgentChat streams an SSE response for the agent chat. +func (s *Server) handleAgentChat(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + user := auth.UserFromContext(r.Context()) + + var req agent.ChatRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + if req.Message == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("message is required")) + return + } + + if s.agentClient == nil { + serverError.RespondError(w, http.StatusServiceUnavailable, fmt.Errorf("agent not configured: OPENROUTER_API_KEY not set")) + return + } + + s.telemetry.Track(user.ID, "agent_chat_sent", map[string]any{"org_id": org.ID, "model": req.Model}) + + s.agentClient.StreamChat(r.Context(), w, org.ID, user.ID, req) +} + +// handleListConversations returns all conversations for the org. +func (s *Server) handleListConversations(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + conversations, err := s.store.ListAgentConversationsByOrg(r.Context(), org.ID) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to list conversations")) + return + } + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "conversations": conversations, + "count": len(conversations), + }) +} + +// handleGetConversation returns a specific conversation. +func (s *Server) handleGetConversation(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + convID := chi.URLParam(r, "conversationID") + conv, err := s.store.GetAgentConversation(r.Context(), convID) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("conversation not found")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get conversation")) + return + } + if conv.OrgID != org.ID { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("conversation does not belong to this organization")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, conv) +} + +// handleListMessages returns messages for a conversation. +func (s *Server) handleListMessages(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + convID := chi.URLParam(r, "conversationID") + conv, err := s.store.GetAgentConversation(r.Context(), convID) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("conversation not found")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get conversation")) + return + } + if conv.OrgID != org.ID { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("conversation does not belong to this organization")) + return + } + + messages, err := s.store.ListAgentMessages(r.Context(), convID) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to list messages")) + return + } + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "messages": messages, + "count": len(messages), + }) +} + +// handleDeleteConversation deletes a conversation and its messages. +func (s *Server) handleDeleteConversation(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + convID := chi.URLParam(r, "conversationID") + conv, err := s.store.GetAgentConversation(r.Context(), convID) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("conversation not found")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get conversation")) + return + } + if conv.OrgID != org.ID { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("conversation does not belong to this organization")) + return + } + + if err := s.store.DeleteAgentConversation(r.Context(), convID); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to delete conversation")) + return + } + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "deleted": true, + "conversation_id": convID, + }) +} + +// handleListModels returns available LLM models with pricing. +func (s *Server) handleListModels(w http.ResponseWriter, r *http.Request) { + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "models": agent.AvailableModels(), + }) +} +*/ diff --git a/api/internal/rest/agent_handlers_test.go b/api/internal/rest/agent_handlers_test.go new file mode 100644 index 00000000..a8f9fd9d --- /dev/null +++ b/api/internal/rest/agent_handlers_test.go @@ -0,0 +1,207 @@ +package rest + +// Agent handler tests - commented out, not yet ready for integration. + +/* +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +func TestHandleListConversations(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.ListAgentConversationsByOrgFn = func(_ context.Context, orgID string) ([]*store.AgentConversation, error) { + if orgID != testOrg.ID { + t.Fatalf("unexpected orgID: %s", orgID) + } + return []*store.AgentConversation{ + {ID: "conv-1", OrgID: testOrg.ID, UserID: testUser.ID, Title: "Test Conv", Model: "gpt-4o"}, + }, nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/agent/conversations", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + var resp map[string]any + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to parse JSON: %v", err) + } + if resp["count"] != float64(1) { + t.Fatalf("expected count 1, got %v", resp["count"]) + } + convs, ok := resp["conversations"].([]any) + if !ok || len(convs) != 1 { + t.Fatalf("expected 1 conversation, got %v", resp["conversations"]) + } +} + +func TestHandleGetConversation(t *testing.T) { + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetAgentConversationFn = func(_ context.Context, id string) (*store.AgentConversation, error) { + if id == "conv-1" { + return &store.AgentConversation{ + ID: "conv-1", + OrgID: testOrg.ID, + Title: "My Conv", + }, nil + } + return nil, store.ErrNotFound + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/agent/conversations/conv-1", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + var resp map[string]any + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to parse JSON: %v", err) + } + if resp["id"] != "conv-1" { + t.Fatalf("expected id conv-1, got %v", resp["id"]) + } + }) + + t.Run("not_found", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetAgentConversationFn = func(_ context.Context, id string) (*store.AgentConversation, error) { + return nil, store.ErrNotFound + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/agent/conversations/nonexistent", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("wrong_org", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetAgentConversationFn = func(_ context.Context, id string) (*store.AgentConversation, error) { + return &store.AgentConversation{ + ID: "conv-1", + OrgID: "ORG-other", + Title: "Other Org Conv", + }, nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/agent/conversations/conv-1", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusForbidden { + t.Fatalf("expected 403, got %d: %s", rr.Code, rr.Body.String()) + } + }) +} + +func TestHandleDeleteConversation(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetAgentConversationFn = func(_ context.Context, id string) (*store.AgentConversation, error) { + if id == "conv-1" { + return &store.AgentConversation{ID: "conv-1", OrgID: testOrg.ID}, nil + } + return nil, store.ErrNotFound + } + deleted := false + ms.DeleteAgentConversationFn = func(_ context.Context, id string) error { + if id == "conv-1" { + deleted = true + } + return nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org/agent/conversations/conv-1", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + if !deleted { + t.Fatal("expected DeleteAgentConversation to be called") + } + resp := parseJSONResponse(rr) + if resp["deleted"] != true { + t.Fatalf("expected deleted=true, got %v", resp["deleted"]) + } +} + +func TestHandleListMessages(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetAgentConversationFn = func(_ context.Context, id string) (*store.AgentConversation, error) { + if id == "conv-1" { + return &store.AgentConversation{ID: "conv-1", OrgID: testOrg.ID}, nil + } + return nil, store.ErrNotFound + } + ms.ListAgentMessagesFn = func(_ context.Context, convID string) ([]*store.AgentMessage, error) { + return []*store.AgentMessage{ + {ID: "msg-1", ConversationID: convID, Role: store.MessageRoleUser, Content: "hello", CreatedAt: time.Now()}, + {ID: "msg-2", ConversationID: convID, Role: store.MessageRoleAssistant, Content: "hi", CreatedAt: time.Now()}, + }, nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/agent/conversations/conv-1/messages", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + resp := parseJSONResponse(rr) + if resp["count"] != float64(2) { + t.Fatalf("expected count 2, got %v", resp["count"]) + } +} + +func TestHandleListModels(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/agent/models", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + var resp map[string]any + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to parse JSON: %v", err) + } + models, ok := resp["models"].([]any) + if !ok || len(models) == 0 { + t.Fatalf("expected non-empty models list, got %v", resp["models"]) + } +} +*/ diff --git a/api/internal/rest/auth_handlers.go b/api/internal/rest/auth_handlers.go new file mode 100644 index 00000000..7d356318 --- /dev/null +++ b/api/internal/rest/auth_handlers.go @@ -0,0 +1,723 @@ +package rest + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/mail" + "strings" + "time" + + "github.com/aspectrr/fluid.sh/api/internal/auth" + serverError "github.com/aspectrr/fluid.sh/api/internal/error" + "github.com/aspectrr/fluid.sh/api/internal/id" + serverJSON "github.com/aspectrr/fluid.sh/api/internal/json" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +var oauthHTTPClient = &http.Client{Timeout: 10 * time.Second} + +// handleHealth godoc +// @Summary Health check +// @Description Returns API health status +// @Tags Health +// @Produce json +// @Success 200 {object} map[string]string +// @Router /health [get] +func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) { + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]string{"status": "ok"}) +} + +// --- Register --- + +type registerRequest struct { + Email string `json:"email"` + Password string `json:"password"` + DisplayName string `json:"display_name"` +} + +type authResponse struct { + User *userResponse `json:"user"` +} + +type userResponse struct { + ID string `json:"id"` + Email string `json:"email"` + DisplayName string `json:"display_name"` + AvatarURL string `json:"avatar_url,omitempty"` + EmailVerified bool `json:"email_verified"` +} + +// handleRegister godoc +// @Summary Register a new user +// @Description Create a new user account and return a session cookie +// @Tags Auth +// @Accept json +// @Produce json +// @Param request body registerRequest true "Registration details" +// @Success 201 {object} authResponse +// @Failure 400 {object} error.ErrorResponse +// @Failure 409 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Router /auth/register [post] +func (s *Server) handleRegister(w http.ResponseWriter, r *http.Request) { + var req registerRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.Email == "" || req.Password == "" || req.DisplayName == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("email, password, and display_name are required")) + return + } + + if _, err := mail.ParseAddress(req.Email); err != nil { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("invalid email format")) + return + } + + if len(req.Password) < 8 { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("password must be at least 8 characters")) + return + } + + if len(req.Password) > 72 { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("password must be at most 72 characters")) + return + } + + req.Email = strings.ToLower(req.Email) + + hash, err := auth.HashPassword(req.Password) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to hash password")) + return + } + + userID, err := id.Generate("USR-") + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to generate user ID")) + return + } + + user := &store.User{ + ID: userID, + Email: req.Email, + DisplayName: req.DisplayName, + PasswordHash: hash, + } + + if err := s.store.CreateUser(r.Context(), user); err != nil { + if errors.Is(err, store.ErrAlreadyExists) { + serverError.RespondError(w, http.StatusConflict, fmt.Errorf("email already registered")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create user")) + return + } + + rawToken, _, err := auth.CreateSession(r.Context(), s.store, user.ID, r.RemoteAddr, r.UserAgent(), s.cfg.Auth.SessionTTL) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create session")) + return + } + + auth.SetSessionCookie(w, rawToken, s.cfg.Auth.SessionTTL, s.cfg.Auth.SecureCookies) + + s.telemetry.Track(user.ID, "user_registered", map[string]any{"provider": "password"}) + + _ = serverJSON.RespondJSON(w, http.StatusCreated, authResponse{ + User: &userResponse{ + ID: user.ID, + Email: user.Email, + DisplayName: user.DisplayName, + AvatarURL: user.AvatarURL, + EmailVerified: user.EmailVerified, + }, + }) +} + +// --- Login --- + +type loginRequest struct { + Email string `json:"email"` + Password string `json:"password"` +} + +// handleLogin godoc +// @Summary Log in +// @Description Authenticate with email and password, returns a session cookie +// @Tags Auth +// @Accept json +// @Produce json +// @Param request body loginRequest true "Login credentials" +// @Success 200 {object} authResponse +// @Failure 400 {object} error.ErrorResponse +// @Failure 401 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Router /auth/login [post] +func (s *Server) handleLogin(w http.ResponseWriter, r *http.Request) { + var req loginRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.Email == "" || req.Password == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("email and password are required")) + return + } + + if len(req.Password) > 72 { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("password too long (max 72 characters)")) + return + } + + req.Email = strings.ToLower(req.Email) + + user, err := s.store.GetUserByEmail(r.Context(), req.Email) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusUnauthorized, fmt.Errorf("invalid email or password")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to look up user")) + return + } + + if user.PasswordHash == "" { + serverError.RespondError(w, http.StatusUnauthorized, fmt.Errorf("this account uses OAuth login")) + return + } + + if err := auth.VerifyPassword(user.PasswordHash, req.Password); err != nil { + serverError.RespondError(w, http.StatusUnauthorized, fmt.Errorf("invalid email or password")) + return + } + + rawToken, _, err := auth.CreateSession(r.Context(), s.store, user.ID, r.RemoteAddr, r.UserAgent(), s.cfg.Auth.SessionTTL) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create session")) + return + } + + auth.SetSessionCookie(w, rawToken, s.cfg.Auth.SessionTTL, s.cfg.Auth.SecureCookies) + + s.telemetry.Track(user.ID, "user_logged_in", map[string]any{"provider": "password"}) + + _ = serverJSON.RespondJSON(w, http.StatusOK, authResponse{ + User: &userResponse{ + ID: user.ID, + Email: user.Email, + DisplayName: user.DisplayName, + AvatarURL: user.AvatarURL, + EmailVerified: user.EmailVerified, + }, + }) +} + +// --- Logout --- + +// handleLogout godoc +// @Summary Log out +// @Description Invalidate the current session and clear the session cookie +// @Tags Auth +// @Produce json +// @Success 200 {object} map[string]string +// @Security CookieAuth +// @Router /auth/logout [post] +func (s *Server) handleLogout(w http.ResponseWriter, r *http.Request) { + cookie, err := r.Cookie(auth.SessionCookieName) + if err == nil { + _ = s.store.DeleteSession(r.Context(), auth.HashSessionToken(cookie.Value)) + } + auth.ClearSessionCookie(w, s.cfg.Auth.SecureCookies) + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]string{"status": "logged out"}) +} + +// --- Me --- + +// handleMe godoc +// @Summary Get current user +// @Description Return the currently authenticated user +// @Tags Auth +// @Produce json +// @Success 200 {object} authResponse +// @Failure 401 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /auth/me [get] +func (s *Server) handleMe(w http.ResponseWriter, r *http.Request) { + user := auth.UserFromContext(r.Context()) + if user == nil { + serverError.RespondError(w, http.StatusUnauthorized, fmt.Errorf("not authenticated")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, authResponse{ + User: &userResponse{ + ID: user.ID, + Email: user.Email, + DisplayName: user.DisplayName, + AvatarURL: user.AvatarURL, + EmailVerified: user.EmailVerified, + }, + }) +} + +// --- Onboarding --- + +type onboardingRequest struct { + OrgName string `json:"org_name"` + Role string `json:"role"` + UseCases []string `json:"use_cases"` + ReferralSource string `json:"referral_source"` +} + +// handleOnboarding godoc +// @Summary Complete onboarding +// @Description Create the user's first organization during onboarding +// @Tags Auth +// @Accept json +// @Produce json +// @Param request body onboardingRequest true "Onboarding details" +// @Success 201 {object} orgResponse +// @Failure 400 {object} error.ErrorResponse +// @Failure 409 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /auth/onboarding [post] +func (s *Server) handleOnboarding(w http.ResponseWriter, r *http.Request) { + user := auth.UserFromContext(r.Context()) + if user == nil { + serverError.RespondError(w, http.StatusUnauthorized, fmt.Errorf("not authenticated")) + return + } + + var req onboardingRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.OrgName == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("org_name is required")) + return + } + + slug := strings.ToLower(req.OrgName) + slug = strings.Map(func(r rune) rune { + if (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') { + return r + } + if r == ' ' || r == '-' || r == '_' { + return '-' + } + return -1 + }, slug) + for strings.Contains(slug, "--") { + slug = strings.ReplaceAll(slug, "--", "-") + } + slug = strings.Trim(slug, "-") + if !slugRegex.MatchString(slug) { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("org name must produce a valid slug (3-50 lowercase alphanumeric chars and hyphens)")) + return + } + + orgID, err := id.Generate("ORG-") + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to generate org ID")) + return + } + + org := &store.Organization{ + ID: orgID, + Name: req.OrgName, + Slug: slug, + OwnerID: user.ID, + } + + baseSlug := slug + for attempt := 0; attempt < 3; attempt++ { + if attempt > 0 { + suffix := generateSlugSuffix() + maxBase := maxSlugLen - 1 - len(suffix) + b := baseSlug + if len(b) > maxBase { + b = strings.TrimRight(b[:maxBase], "-") + } + slug = b + "-" + suffix + org.Slug = slug + } + + err = s.store.WithTx(r.Context(), func(tx store.DataStore) error { + if err := tx.CreateOrganization(r.Context(), org); err != nil { + return err + } + memberID, err := id.Generate("MBR-") + if err != nil { + return fmt.Errorf("generate member ID: %w", err) + } + member := &store.OrgMember{ + ID: memberID, + OrgID: org.ID, + UserID: user.ID, + Role: store.OrgRoleOwner, + } + return tx.CreateOrgMember(r.Context(), member) + }) + if err == nil { + break + } + if !isDuplicateSlugErr(err) { + break + } + } + + if err != nil { + serverError.RespondErrorMsg(w, http.StatusInternalServerError, "failed to create organization", err) + return + } + + s.telemetry.Track(user.ID, "user_onboarded", map[string]any{ + "org_slug": slug, + "role": req.Role, + "use_cases": req.UseCases, + "referral_source": req.ReferralSource, + }) + + _ = serverJSON.RespondJSON(w, http.StatusCreated, toOrgResponseForOwner(org)) +} + +// --- GitHub OAuth --- + +// handleGitHubLogin godoc +// @Summary GitHub OAuth login +// @Description Redirect to GitHub OAuth authorization page +// @Tags Auth +// @Success 302 "Redirect to GitHub" +// @Failure 501 {object} error.ErrorResponse +// @Router /auth/github [get] +func (s *Server) handleGitHubLogin(w http.ResponseWriter, r *http.Request) { + if s.cfg.Auth.GitHub.ClientID == "" { + serverError.RespondError(w, http.StatusNotImplemented, fmt.Errorf("GitHub OAuth not configured")) + return + } + state, err := auth.GenerateOAuthState() + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to generate oauth state")) + return + } + auth.SetOAuthStateCookie(w, state, s.cfg.Auth.SecureCookies) + cfg := auth.GitHubOAuthConfig(s.cfg.Auth.GitHub.ClientID, s.cfg.Auth.GitHub.ClientSecret, s.cfg.Auth.GitHub.RedirectURL) + url := cfg.AuthCodeURL(state) + http.Redirect(w, r, url, http.StatusFound) +} + +// handleGitHubCallback godoc +// @Summary GitHub OAuth callback +// @Description Handle GitHub OAuth callback, create or link user, set session cookie, and redirect to dashboard +// @Tags Auth +// @Param code query string true "OAuth authorization code" +// @Param state query string true "OAuth CSRF state parameter" +// @Success 302 "Redirect to dashboard" +// @Failure 400 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Router /auth/github/callback [get] +func (s *Server) handleGitHubCallback(w http.ResponseWriter, r *http.Request) { + if err := auth.ValidateOAuthState(r); err != nil { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("invalid oauth state: %w", err)) + return + } + auth.ClearOAuthStateCookie(w) + + code := r.URL.Query().Get("code") + if code == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("missing code parameter")) + return + } + + cfg := auth.GitHubOAuthConfig(s.cfg.Auth.GitHub.ClientID, s.cfg.Auth.GitHub.ClientSecret, s.cfg.Auth.GitHub.RedirectURL) + token, err := cfg.Exchange(r.Context(), code) + if err != nil { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to exchange code: %w", err)) + return + } + + // Fetch user info from GitHub + ghUser, emailVerified, err := fetchGitHubUser(r.Context(), token.AccessToken) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to fetch GitHub user: %w", err)) + return + } + + user, err := s.findOrCreateOAuthUser(r.Context(), "github", fmt.Sprintf("%d", ghUser.ID), ghUser.Email, ghUser.Name, ghUser.AvatarURL, token.AccessToken, token.RefreshToken, emailVerified) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to process OAuth user: %w", err)) + return + } + + rawToken, _, err := auth.CreateSession(r.Context(), s.store, user.ID, r.RemoteAddr, r.UserAgent(), s.cfg.Auth.SessionTTL) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create session")) + return + } + + auth.SetSessionCookie(w, rawToken, s.cfg.Auth.SessionTTL, s.cfg.Auth.SecureCookies) + s.telemetry.Track(user.ID, "user_logged_in", map[string]any{"provider": "github"}) + http.Redirect(w, r, s.cfg.Frontend.URL+"/dashboard", http.StatusFound) +} + +// --- Google OAuth --- + +// handleGoogleLogin godoc +// @Summary Google OAuth login +// @Description Redirect to Google OAuth authorization page +// @Tags Auth +// @Success 302 "Redirect to Google" +// @Failure 501 {object} error.ErrorResponse +// @Router /auth/google [get] +func (s *Server) handleGoogleLogin(w http.ResponseWriter, r *http.Request) { + if s.cfg.Auth.Google.ClientID == "" { + serverError.RespondError(w, http.StatusNotImplemented, fmt.Errorf("google OAuth not configured")) + return + } + state, err := auth.GenerateOAuthState() + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to generate oauth state")) + return + } + auth.SetOAuthStateCookie(w, state, s.cfg.Auth.SecureCookies) + cfg := auth.GoogleOAuthConfig(s.cfg.Auth.Google.ClientID, s.cfg.Auth.Google.ClientSecret, s.cfg.Auth.Google.RedirectURL) + url := cfg.AuthCodeURL(state) + http.Redirect(w, r, url, http.StatusFound) +} + +// handleGoogleCallback godoc +// @Summary Google OAuth callback +// @Description Handle Google OAuth callback, create or link user, set session cookie, and redirect to dashboard +// @Tags Auth +// @Param code query string true "OAuth authorization code" +// @Param state query string true "OAuth CSRF state parameter" +// @Success 302 "Redirect to dashboard" +// @Failure 400 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Router /auth/google/callback [get] +func (s *Server) handleGoogleCallback(w http.ResponseWriter, r *http.Request) { + if err := auth.ValidateOAuthState(r); err != nil { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("invalid oauth state: %w", err)) + return + } + auth.ClearOAuthStateCookie(w) + + code := r.URL.Query().Get("code") + if code == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("missing code parameter")) + return + } + + cfg := auth.GoogleOAuthConfig(s.cfg.Auth.Google.ClientID, s.cfg.Auth.Google.ClientSecret, s.cfg.Auth.Google.RedirectURL) + token, err := cfg.Exchange(r.Context(), code) + if err != nil { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to exchange code: %w", err)) + return + } + + gUser, err := fetchGoogleUser(r.Context(), token.AccessToken) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to fetch Google user: %w", err)) + return + } + + user, err := s.findOrCreateOAuthUser(r.Context(), "google", gUser.ID, gUser.Email, gUser.Name, gUser.Picture, token.AccessToken, token.RefreshToken, gUser.EmailVerified) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to process OAuth user: %w", err)) + return + } + + rawToken, _, err := auth.CreateSession(r.Context(), s.store, user.ID, r.RemoteAddr, r.UserAgent(), s.cfg.Auth.SessionTTL) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create session")) + return + } + + auth.SetSessionCookie(w, rawToken, s.cfg.Auth.SessionTTL, s.cfg.Auth.SecureCookies) + s.telemetry.Track(user.ID, "user_logged_in", map[string]any{"provider": "google"}) + http.Redirect(w, r, s.cfg.Frontend.URL+"/dashboard", http.StatusFound) +} + +// --- OAuth helpers --- + +type githubUserInfo struct { + ID int `json:"id"` + Email string `json:"email"` + Name string `json:"name"` + AvatarURL string `json:"avatar_url"` +} + +func fetchGitHubUser(ctx context.Context, accessToken string) (*githubUserInfo, bool, error) { + req, _ := http.NewRequestWithContext(ctx, "GET", "https://api.github.com/user", nil) + req.Header.Set("Authorization", "Bearer "+accessToken) + resp, err := oauthHTTPClient.Do(req) + if err != nil { + return nil, false, err + } + defer func() { _ = resp.Body.Close() }() + if resp.StatusCode != http.StatusOK { + return nil, false, fmt.Errorf("GitHub user API returned status %d", resp.StatusCode) + } + body, _ := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) + var user githubUserInfo + if err := json.Unmarshal(body, &user); err != nil { + return nil, false, err + } + + emailVerified := false + if user.Email == "" { + email, verified, err := fetchGitHubPrimaryEmail(ctx, accessToken) + if err == nil { + user.Email = email + emailVerified = verified + } + } else { + // Email from /user endpoint - check verification via emails API + _, verified, err := fetchGitHubPrimaryEmail(ctx, accessToken) + if err == nil { + emailVerified = verified + } + } + + return &user, emailVerified, nil +} + +func fetchGitHubPrimaryEmail(ctx context.Context, accessToken string) (string, bool, error) { + req, _ := http.NewRequestWithContext(ctx, "GET", "https://api.github.com/user/emails", nil) + req.Header.Set("Authorization", "Bearer "+accessToken) + resp, err := oauthHTTPClient.Do(req) + if err != nil { + return "", false, err + } + defer func() { _ = resp.Body.Close() }() + if resp.StatusCode != http.StatusOK { + return "", false, fmt.Errorf("GitHub emails API returned status %d", resp.StatusCode) + } + body, _ := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) + var emails []struct { + Email string `json:"email"` + Primary bool `json:"primary"` + Verified bool `json:"verified"` + } + if err := json.Unmarshal(body, &emails); err != nil { + return "", false, err + } + // Prefer verified+primary + for _, e := range emails { + if e.Primary && e.Verified { + return e.Email, true, nil + } + } + // Fall back to any primary + for _, e := range emails { + if e.Primary { + return e.Email, e.Verified, nil + } + } + if len(emails) > 0 { + return emails[0].Email, emails[0].Verified, nil + } + return "", false, fmt.Errorf("no email found") +} + +type googleUserInfo struct { + ID string `json:"sub"` + Email string `json:"email"` + Name string `json:"name"` + Picture string `json:"picture"` + EmailVerified bool `json:"email_verified"` +} + +func fetchGoogleUser(ctx context.Context, accessToken string) (*googleUserInfo, error) { + req, _ := http.NewRequestWithContext(ctx, "GET", "https://www.googleapis.com/oauth2/v3/userinfo", nil) + req.Header.Set("Authorization", "Bearer "+accessToken) + resp, err := oauthHTTPClient.Do(req) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("google userinfo API returned status %d", resp.StatusCode) + } + body, _ := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) + var user googleUserInfo + if err := json.Unmarshal(body, &user); err != nil { + return nil, err + } + return &user, nil +} + +func (s *Server) findOrCreateOAuthUser(ctx context.Context, provider, providerID, email, name, avatarURL, accessToken, refreshToken string, emailVerified bool) (*store.User, error) { + email = strings.ToLower(email) + + // Check if OAuth account exists + oa, err := s.store.GetOAuthAccount(ctx, provider, providerID) + if err == nil { + // Account exists, get the user + return s.store.GetUser(ctx, oa.UserID) + } + + if !errors.Is(err, store.ErrNotFound) { + return nil, err + } + + // Check if user exists with same email + user, err := s.store.GetUserByEmail(ctx, email) + if err != nil && !errors.Is(err, store.ErrNotFound) { + return nil, err + } + + // If user was found by email, refuse to link if OAuth email is not verified + if err == nil && !emailVerified { + return nil, fmt.Errorf("oauth email not verified, cannot link to existing account") + } + + if errors.Is(err, store.ErrNotFound) { + // Create new user + newUserID, err := id.Generate("USR-") + if err != nil { + return nil, fmt.Errorf("generate user ID: %w", err) + } + user = &store.User{ + ID: newUserID, + Email: email, + DisplayName: name, + AvatarURL: avatarURL, + EmailVerified: emailVerified, + } + if err := s.store.CreateUser(ctx, user); err != nil { + return nil, err + } + } + + // Link OAuth account to user + oaID, err := id.Generate("OA-") + if err != nil { + return nil, fmt.Errorf("generate oauth account ID: %w", err) + } + oauthAccount := &store.OAuthAccount{ + ID: oaID, + UserID: user.ID, + Provider: provider, + ProviderID: providerID, + Email: email, + AccessToken: accessToken, + RefreshToken: refreshToken, + } + if err := s.store.CreateOAuthAccount(ctx, oauthAccount); err != nil { + return nil, err + } + + return user, nil +} diff --git a/api/internal/rest/auth_handlers_test.go b/api/internal/rest/auth_handlers_test.go new file mode 100644 index 00000000..ff11fa1a --- /dev/null +++ b/api/internal/rest/auth_handlers_test.go @@ -0,0 +1,644 @@ +package rest + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/aspectrr/fluid.sh/api/internal/auth" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +func TestHandleHealth(t *testing.T) { + ms := &mockStore{} + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := httptest.NewRequest("GET", "/v1/health", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d", rr.Code) + } + body := parseJSONResponse(rr) + if body["status"] != "ok" { + t.Fatalf("expected status ok, got %v", body["status"]) + } +} + +func TestHandleRegister(t *testing.T) { + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + ms.CreateUserFn = func(_ context.Context, u *store.User) error { + return nil + } + ms.CreateSessionFn = func(_ context.Context, s *store.Session) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := httptest.NewRequest("POST", "/v1/auth/register", + strings.NewReader(`{"email":"new@example.com","password":"password123","display_name":"New User"}`)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + user, ok := body["user"].(map[string]any) + if !ok { + t.Fatal("expected user in response") + } + if user["email"] != "new@example.com" { + t.Fatalf("expected email new@example.com, got %v", user["email"]) + } + + // Check that a session cookie was set. + cookies := rr.Result().Cookies() + found := false + for _, c := range cookies { + if c.Name == auth.SessionCookieName { + found = true + break + } + } + if !found { + t.Fatal("expected session cookie to be set") + } + }) + + t.Run("missing fields", func(t *testing.T) { + ms := &mockStore{} + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := httptest.NewRequest("POST", "/v1/auth/register", + strings.NewReader(`{"email":"new@example.com"}`)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("short password", func(t *testing.T) { + ms := &mockStore{} + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := httptest.NewRequest("POST", "/v1/auth/register", + strings.NewReader(`{"email":"new@example.com","password":"short","display_name":"New User"}`)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("duplicate email", func(t *testing.T) { + ms := &mockStore{} + ms.CreateUserFn = func(_ context.Context, u *store.User) error { + return store.ErrAlreadyExists + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := httptest.NewRequest("POST", "/v1/auth/register", + strings.NewReader(`{"email":"existing@example.com","password":"password123","display_name":"Existing"}`)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusConflict { + t.Fatalf("expected 409, got %d: %s", rr.Code, rr.Body.String()) + } + }) +} + +func TestHandleLogin(t *testing.T) { + password := "password123" + hash, err := auth.HashPassword(password) + if err != nil { + t.Fatalf("failed to hash password: %v", err) + } + + loginUser := &store.User{ + ID: "USR-login1234", + Email: "login@example.com", + DisplayName: "Login User", + PasswordHash: hash, + } + + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + ms.GetUserByEmailFn = func(_ context.Context, email string) (*store.User, error) { + if email == loginUser.Email { + return loginUser, nil + } + return nil, store.ErrNotFound + } + ms.CreateSessionFn = func(_ context.Context, s *store.Session) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := httptest.NewRequest("POST", "/v1/auth/login", + strings.NewReader(`{"email":"login@example.com","password":"password123"}`)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + user, ok := body["user"].(map[string]any) + if !ok { + t.Fatal("expected user in response") + } + if user["email"] != loginUser.Email { + t.Fatalf("expected email %s, got %v", loginUser.Email, user["email"]) + } + + cookies := rr.Result().Cookies() + found := false + for _, c := range cookies { + if c.Name == auth.SessionCookieName { + found = true + break + } + } + if !found { + t.Fatal("expected session cookie to be set") + } + }) + + t.Run("wrong password", func(t *testing.T) { + ms := &mockStore{} + ms.GetUserByEmailFn = func(_ context.Context, email string) (*store.User, error) { + if email == loginUser.Email { + return loginUser, nil + } + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := httptest.NewRequest("POST", "/v1/auth/login", + strings.NewReader(`{"email":"login@example.com","password":"wrongpassword"}`)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusUnauthorized { + t.Fatalf("expected 401, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("user not found", func(t *testing.T) { + ms := &mockStore{} + ms.GetUserByEmailFn = func(_ context.Context, email string) (*store.User, error) { + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := httptest.NewRequest("POST", "/v1/auth/login", + strings.NewReader(`{"email":"nonexistent@example.com","password":"password123"}`)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusUnauthorized { + t.Fatalf("expected 401, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("missing fields", func(t *testing.T) { + ms := &mockStore{} + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := httptest.NewRequest("POST", "/v1/auth/login", + strings.NewReader(`{"email":"login@example.com"}`)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rr.Code, rr.Body.String()) + } + }) +} + +func TestHandleLogout(t *testing.T) { + ms := &mockStore{} + ms.DeleteSessionFn = func(_ context.Context, id string) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "POST", "/v1/auth/logout", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["status"] != "logged out" { + t.Fatalf("expected status 'logged out', got %v", body["status"]) + } + + // Check that the session cookie is cleared. + cookies := rr.Result().Cookies() + for _, c := range cookies { + if c.Name == auth.SessionCookieName && c.MaxAge == -1 { + return + } + } + t.Fatal("expected session cookie to be cleared") +} + +func TestHandleOnboarding(t *testing.T) { + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + ms.CreateOrganizationFn = func(_ context.Context, org *store.Organization) error { + return nil + } + ms.CreateOrgMemberFn = func(_ context.Context, m *store.OrgMember) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"org_name":"My Company","role":"devops","use_cases":["ci","testing"],"referral_source":"github"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/auth/onboarding", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["name"] != "My Company" { + t.Fatalf("expected name 'My Company', got %v", body["name"]) + } + if body["slug"] != "my-company" { + t.Fatalf("expected slug 'my-company', got %v", body["slug"]) + } + if body["owner_id"] != testUser.ID { + t.Fatalf("expected owner_id %s, got %v", testUser.ID, body["owner_id"]) + } + }) + + t.Run("success with minimal fields", func(t *testing.T) { + ms := &mockStore{} + ms.CreateOrganizationFn = func(_ context.Context, org *store.Organization) error { + return nil + } + ms.CreateOrgMemberFn = func(_ context.Context, m *store.OrgMember) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"org_name":"test-org-name"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/auth/onboarding", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["slug"] != "test-org-name" { + t.Fatalf("expected slug 'test-org-name', got %v", body["slug"]) + } + }) + + t.Run("missing org_name", func(t *testing.T) { + ms := &mockStore{} + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"role":"devops"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/auth/onboarding", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("invalid slug from org_name", func(t *testing.T) { + ms := &mockStore{} + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"org_name":"A!"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/auth/onboarding", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("special chars apostrophe", func(t *testing.T) { + ms := &mockStore{} + ms.CreateOrganizationFn = func(_ context.Context, org *store.Organization) error { + return nil + } + ms.CreateOrgMemberFn = func(_ context.Context, m *store.OrgMember) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"org_name":"Collin's Team"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/auth/onboarding", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["slug"] != "collins-team" { + t.Fatalf("expected slug 'collins-team', got %v", body["slug"]) + } + }) + + t.Run("special chars ampersand", func(t *testing.T) { + ms := &mockStore{} + ms.CreateOrganizationFn = func(_ context.Context, org *store.Organization) error { + return nil + } + ms.CreateOrgMemberFn = func(_ context.Context, m *store.OrgMember) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"org_name":"Acme & Co."}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/auth/onboarding", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["slug"] != "acme-co" { + t.Fatalf("expected slug 'acme-co', got %v", body["slug"]) + } + }) + + t.Run("numbers in name", func(t *testing.T) { + ms := &mockStore{} + ms.CreateOrganizationFn = func(_ context.Context, org *store.Organization) error { + return nil + } + ms.CreateOrgMemberFn = func(_ context.Context, m *store.OrgMember) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"org_name":"Team 42"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/auth/onboarding", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["slug"] != "team-42" { + t.Fatalf("expected slug 'team-42', got %v", body["slug"]) + } + }) + + t.Run("leading trailing special chars", func(t *testing.T) { + ms := &mockStore{} + ms.CreateOrganizationFn = func(_ context.Context, org *store.Organization) error { + return nil + } + ms.CreateOrgMemberFn = func(_ context.Context, m *store.OrgMember) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"org_name":"--My Org--"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/auth/onboarding", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["slug"] != "my-org" { + t.Fatalf("expected slug 'my-org', got %v", body["slug"]) + } + }) + + t.Run("consecutive special chars stripped", func(t *testing.T) { + ms := &mockStore{} + ms.CreateOrganizationFn = func(_ context.Context, org *store.Organization) error { + return nil + } + ms.CreateOrgMemberFn = func(_ context.Context, m *store.OrgMember) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"org_name":"Foo...Bar"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/auth/onboarding", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["slug"] != "foobar" { + t.Fatalf("expected slug 'foobar', got %v", body["slug"]) + } + }) + + t.Run("underscores become hyphens parens stripped", func(t *testing.T) { + ms := &mockStore{} + ms.CreateOrganizationFn = func(_ context.Context, org *store.Organization) error { + return nil + } + ms.CreateOrgMemberFn = func(_ context.Context, m *store.OrgMember) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"org_name":"My_Org (Dev)"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/auth/onboarding", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["slug"] != "my-org-dev" { + t.Fatalf("expected slug 'my-org-dev', got %v", body["slug"]) + } + }) + + t.Run("all special chars fails", func(t *testing.T) { + ms := &mockStore{} + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"org_name":"!!!@@@"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/auth/onboarding", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("mixed case preserved as lowercase", func(t *testing.T) { + ms := &mockStore{} + ms.CreateOrganizationFn = func(_ context.Context, org *store.Organization) error { + return nil + } + ms.CreateOrgMemberFn = func(_ context.Context, m *store.OrgMember) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"org_name":"MyAwesomeTeam"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/auth/onboarding", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["slug"] != "myawesometeam" { + t.Fatalf("expected slug 'myawesometeam', got %v", body["slug"]) + } + }) + + t.Run("duplicate slug auto-resolves with suffix", func(t *testing.T) { + ms := &mockStore{} + calls := 0 + ms.CreateOrganizationFn = func(_ context.Context, org *store.Organization) error { + calls++ + if calls == 1 { + return store.ErrAlreadyExists + } + return nil + } + ms.CreateOrgMemberFn = func(_ context.Context, m *store.OrgMember) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"org_name":"Existing Org"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/auth/onboarding", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + slug, ok := body["slug"].(string) + if !ok { + t.Fatal("expected slug in response") + } + if !strings.HasPrefix(slug, "existing-org-") { + t.Fatalf("expected slug to start with 'existing-org-', got %q", slug) + } + if len(slug) != len("existing-org-")+4 { + t.Fatalf("expected 4-char hex suffix, got slug %q", slug) + } + }) + + t.Run("unauthenticated", func(t *testing.T) { + ms := &mockStore{} + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := httptest.NewRequest("POST", "/v1/auth/onboarding", + strings.NewReader(`{"org_name":"My Company"}`)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusUnauthorized { + t.Fatalf("expected 401, got %d: %s", rr.Code, rr.Body.String()) + } + }) +} + +func TestHandleMe(t *testing.T) { + ms := &mockStore{} + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/auth/me", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + user, ok := body["user"].(map[string]any) + if !ok { + t.Fatal("expected user in response") + } + if user["id"] != testUser.ID { + t.Fatalf("expected user id %s, got %v", testUser.ID, user["id"]) + } + if user["email"] != testUser.Email { + t.Fatalf("expected email %s, got %v", testUser.Email, user["email"]) + } +} diff --git a/api/internal/rest/billing_handlers.go b/api/internal/rest/billing_handlers.go new file mode 100644 index 00000000..005a45c8 --- /dev/null +++ b/api/internal/rest/billing_handlers.go @@ -0,0 +1,405 @@ +package rest + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "log/slog" + "math" + "net/http" + "time" + + "github.com/google/uuid" + "github.com/stripe/stripe-go/v82" + billingportal "github.com/stripe/stripe-go/v82/billingportal/session" + "github.com/stripe/stripe-go/v82/checkout/session" + "github.com/stripe/stripe-go/v82/customer" + "github.com/stripe/stripe-go/v82/webhook" + + "github.com/aspectrr/fluid.sh/api/internal/auth" + serverError "github.com/aspectrr/fluid.sh/api/internal/error" + serverJSON "github.com/aspectrr/fluid.sh/api/internal/json" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +// --- Get Billing --- + +type billingResponse struct { + Plan string `json:"plan"` + Status string `json:"status"` + FreeTier *freeTierInfo `json:"free_tier,omitempty"` + Usage *usageSummary `json:"usage,omitempty"` +} + +type freeTierInfo struct { + MaxConcurrentSandboxes int `json:"max_concurrent_sandboxes"` + MaxSourceVMs int `json:"max_source_vms"` + MaxAgentHosts int `json:"max_agent_hosts"` +} + +type usageSummary struct { + MaxConcurrentSandboxes float64 `json:"max_concurrent_sandboxes"` + SourceVMs float64 `json:"source_vms"` + AgentHosts float64 `json:"agent_hosts"` + TokensUsed float64 `json:"tokens_used"` +} + +func (s *Server) handleGetBilling(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + sub, err := s.store.GetSubscriptionByOrg(r.Context(), org.ID) + if err != nil && !errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get subscription")) + return + } + + resp := billingResponse{ + Plan: string(store.PlanFree), + Status: string(store.SubStatusActive), + FreeTier: &freeTierInfo{ + MaxConcurrentSandboxes: s.cfg.Billing.FreeTier.MaxConcurrentSandboxes, + MaxSourceVMs: s.cfg.Billing.FreeTier.MaxSourceVMs, + MaxAgentHosts: s.cfg.Billing.FreeTier.MaxAgentHosts, + }, + } + + if sub != nil { + resp.Plan = string(sub.Plan) + resp.Status = string(sub.Status) + if sub.Plan == store.PlanUsageBased { + resp.FreeTier = nil + } + } + + // Get current month usage + now := time.Now().UTC() + startOfMonth := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, time.UTC) + records, err := s.store.ListUsageRecords(r.Context(), org.ID, startOfMonth, now) + if err == nil && len(records) > 0 { + summary := &usageSummary{} + for _, rec := range records { + switch rec.ResourceType { + case "max_concurrent_sandboxes": + summary.MaxConcurrentSandboxes += rec.Quantity + case "source_vm": + summary.SourceVMs += rec.Quantity + case "agent_host": + summary.AgentHosts += rec.Quantity + case "llm_token": + summary.TokensUsed += rec.Quantity + } + } + resp.Usage = summary + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, resp) +} + +// --- Subscribe --- + +func (s *Server) handleSubscribe(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgRole(w, r, store.OrgRoleOwner) + if !ok { + return + } + + user := auth.UserFromContext(r.Context()) + + if s.cfg.Billing.StripeSecretKey == "" || s.cfg.Billing.StripePriceID == "" { + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]string{ + "message": "Stripe integration pending configuration", + "status": "not_configured", + }) + return + } + + // Create or reuse Stripe customer + customerID := org.StripeCustomerID + if customerID == "" { + cust, err := customer.New(&stripe.CustomerParams{ + Email: stripe.String(user.Email), + Name: stripe.String(org.Name), + Params: stripe.Params{ + Metadata: map[string]string{ + "org_id": org.ID, + }, + }, + }) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create Stripe customer")) + return + } + customerID = cust.ID + org.StripeCustomerID = customerID + if err := s.store.UpdateOrganization(r.Context(), org); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to save stripe customer")) + return + } + } + + // Create checkout session + params := &stripe.CheckoutSessionParams{ + Customer: stripe.String(customerID), + Mode: stripe.String(string(stripe.CheckoutSessionModeSubscription)), + LineItems: []*stripe.CheckoutSessionLineItemParams{ + { + Price: stripe.String(s.cfg.Billing.StripePriceID), // usage-based price ID + Quantity: nil, // usage-based - no fixed quantity + }, + }, + SuccessURL: stripe.String(s.cfg.Frontend.URL + "/billing?success=true"), + CancelURL: stripe.String(s.cfg.Frontend.URL + "/billing?canceled=true"), + } + + sess, err := session.New(params) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create checkout session")) + return + } + + s.telemetry.Track(user.ID, "billing_subscribed", map[string]any{"org_id": org.ID}) + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]string{ + "checkout_url": sess.URL, + }) +} + +// --- Billing Portal --- + +func (s *Server) handleBillingPortal(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgRole(w, r, store.OrgRoleOwner) + if !ok { + return + } + + if s.cfg.Billing.StripeSecretKey == "" || org.StripeCustomerID == "" { + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]string{ + "message": "Stripe integration pending configuration", + "status": "not_configured", + }) + return + } + + params := &stripe.BillingPortalSessionParams{ + Customer: stripe.String(org.StripeCustomerID), + ReturnURL: stripe.String(s.cfg.Frontend.URL + "/billing"), + } + + sess, err := billingportal.New(params) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create billing portal session")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]string{ + "portal_url": sess.URL, + }) +} + +// --- Usage --- + +func (s *Server) handleGetUsage(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + now := time.Now().UTC() + startOfMonth := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, time.UTC) + + records, err := s.store.ListUsageRecords(r.Context(), org.ID, startOfMonth, now) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get usage records")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "records": records, + "total": len(records), + }) +} + +// --- Calculator (public) --- + +type calculatorRequest struct { + ConcurrentSandboxes int `json:"concurrent_sandboxes"` + SourceVMs int `json:"source_vms"` + AgentHosts int `json:"agent_hosts"` +} + +type calculatorResponse struct { + SandboxCost float64 `json:"sandbox_cost"` + SourceVMCost float64 `json:"source_vm_cost"` + AgentHostCost float64 `json:"agent_host_cost"` + TotalMonthly float64 `json:"total_monthly"` + Currency string `json:"currency"` +} + +func (s *Server) handleCalculator(w http.ResponseWriter, r *http.Request) { + var req calculatorRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + prices := s.cfg.Billing.Prices + freeTier := s.cfg.Billing.FreeTier + + // Apply free tier deductions (min 0 billable) + billableSandboxes := req.ConcurrentSandboxes - freeTier.MaxConcurrentSandboxes + if billableSandboxes < 0 { + billableSandboxes = 0 + } + billableSourceVMs := req.SourceVMs - freeTier.MaxSourceVMs + if billableSourceVMs < 0 { + billableSourceVMs = 0 + } + billableAgentHosts := req.AgentHosts - freeTier.MaxAgentHosts + if billableAgentHosts < 0 { + billableAgentHosts = 0 + } + + sandboxCost := float64(billableSandboxes) * float64(prices.SandboxMonthlyCents) / 100.0 + sourceVMCost := float64(billableSourceVMs) * float64(prices.SourceVMMonthly) / 100.0 + agentHostCost := float64(billableAgentHosts) * float64(prices.AgentHostMonthly) / 100.0 + + total := sandboxCost + sourceVMCost + agentHostCost + + _ = serverJSON.RespondJSON(w, http.StatusOK, calculatorResponse{ + SandboxCost: math.Round(sandboxCost*100) / 100, + SourceVMCost: math.Round(sourceVMCost*100) / 100, + AgentHostCost: math.Round(agentHostCost*100) / 100, + TotalMonthly: math.Round(total*100) / 100, + Currency: "USD", + }) +} + +// --- Stripe Webhook --- + +func (s *Server) handleStripeWebhook(w http.ResponseWriter, r *http.Request) { + if s.cfg.Billing.StripeSecretKey == "" { + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]string{"status": "not_configured"}) + return + } + + if s.cfg.Billing.StripeWebhookSecret == "" { + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]string{"status": "webhook_not_configured"}) + return + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 65536)) + if err != nil { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to read body")) + return + } + + event, err := webhook.ConstructEventWithOptions(body, r.Header.Get("Stripe-Signature"), s.cfg.Billing.StripeWebhookSecret, webhook.ConstructEventOptions{ + IgnoreAPIVersionMismatch: true, + }) + if err != nil { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("webhook signature verification failed")) + return + } + + switch event.Type { + case "checkout.session.completed": + var sess stripe.CheckoutSession + if err := json.Unmarshal(event.Data.Raw, &sess); err != nil { + slog.Error("webhook: unmarshal checkout session", "error", err) + break + } + if sess.Customer != nil { + if sess.Subscription == nil { + slog.Warn("webhook: checkout session has no subscription") + break + } + org, err := s.store.GetOrganizationByStripeCustomerID(r.Context(), sess.Customer.ID) + if err != nil { + slog.Error("webhook: lookup org by stripe customer", "error", err) + break + } + // Idempotency: check if subscription already exists + if _, err := s.store.GetSubscriptionByStripeID(r.Context(), sess.Subscription.ID); err == nil { + break // already processed + } + sub := &store.Subscription{ + ID: uuid.New().String(), + OrgID: org.ID, + Plan: store.PlanUsageBased, + StripeSubscriptionID: sess.Subscription.ID, + Status: store.SubStatusActive, + CurrentPeriodStart: time.Now().UTC(), + CurrentPeriodEnd: time.Now().UTC().AddDate(0, 1, 0), + } + if err := s.store.CreateSubscription(r.Context(), sub); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create subscription")) + return + } + } + + case "customer.subscription.updated": + var sub stripe.Subscription + if err := json.Unmarshal(event.Data.Raw, &sub); err != nil { + slog.Error("webhook: unmarshal subscription update", "error", err) + break + } + var existing *store.Subscription + if sub.Customer != nil { + org, err := s.store.GetOrganizationByStripeCustomerID(r.Context(), sub.Customer.ID) + if err != nil { + slog.Error("webhook: lookup org for subscription update", "error", err) + break + } + existing, err = s.store.GetSubscriptionByOrg(r.Context(), org.ID) + if err != nil { + slog.Error("webhook: get subscription for update", "error", err) + break + } + } else { + break + } + newStatus := store.SubscriptionStatus(sub.Status) + switch newStatus { + case store.SubStatusActive, store.SubStatusPastDue, store.SubStatusCancelled: + existing.Status = newStatus + default: + existing.Status = store.SubStatusPastDue + } + if err := s.store.UpdateSubscription(r.Context(), existing); err != nil { + slog.Error("webhook: update subscription", "error", err) + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to update subscription")) + return + } + + case "customer.subscription.deleted": + var sub stripe.Subscription + if err := json.Unmarshal(event.Data.Raw, &sub); err != nil { + slog.Error("webhook: unmarshal subscription delete", "error", err) + break + } + if sub.Customer != nil { + org, err := s.store.GetOrganizationByStripeCustomerID(r.Context(), sub.Customer.ID) + if err != nil { + slog.Error("webhook: lookup org for subscription delete", "error", err) + break + } + existing, err := s.store.GetSubscriptionByOrg(r.Context(), org.ID) + if err != nil { + slog.Error("webhook: get subscription for delete", "error", err) + break + } + existing.Status = store.SubStatusCancelled + if err := s.store.UpdateSubscription(r.Context(), existing); err != nil { + slog.Error("webhook: cancel subscription", "error", err) + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to cancel subscription")) + return + } + } + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]string{"status": "received"}) +} diff --git a/api/internal/rest/billing_handlers_test.go b/api/internal/rest/billing_handlers_test.go new file mode 100644 index 00000000..38d1cdeb --- /dev/null +++ b/api/internal/rest/billing_handlers_test.go @@ -0,0 +1,167 @@ +package rest + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +func TestHandleGetBilling(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + + // No subscription - free plan + ms.GetSubscriptionByOrgFn = func(_ context.Context, orgID string) (*store.Subscription, error) { + return nil, store.ErrNotFound + } + ms.ListUsageRecordsFn = func(_ context.Context, orgID string, from, to time.Time) ([]*store.UsageRecord, error) { + return nil, nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/billing", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + var resp billingResponse + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to parse JSON: %v", err) + } + + if resp.Plan != string(store.PlanFree) { + t.Fatalf("expected plan 'free', got %q", resp.Plan) + } + if resp.Status != string(store.SubStatusActive) { + t.Fatalf("expected status 'active', got %q", resp.Status) + } + if resp.FreeTier == nil { + t.Fatal("expected free_tier to be present") + } + cfg := testConfig() + if resp.FreeTier.MaxConcurrentSandboxes != cfg.Billing.FreeTier.MaxConcurrentSandboxes { + t.Fatalf("expected max_concurrent_sandboxes=%d, got %d", cfg.Billing.FreeTier.MaxConcurrentSandboxes, resp.FreeTier.MaxConcurrentSandboxes) + } + if resp.FreeTier.MaxSourceVMs != cfg.Billing.FreeTier.MaxSourceVMs { + t.Fatalf("expected max_source_vms=%d, got %d", cfg.Billing.FreeTier.MaxSourceVMs, resp.FreeTier.MaxSourceVMs) + } + if resp.FreeTier.MaxAgentHosts != cfg.Billing.FreeTier.MaxAgentHosts { + t.Fatalf("expected max_agent_hosts=%d, got %d", cfg.Billing.FreeTier.MaxAgentHosts, resp.FreeTier.MaxAgentHosts) + } +} + +func TestHandleGetBillingWithUsage(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + + ms.GetSubscriptionByOrgFn = func(_ context.Context, orgID string) (*store.Subscription, error) { + return nil, store.ErrNotFound + } + ms.ListUsageRecordsFn = func(_ context.Context, orgID string, from, to time.Time) ([]*store.UsageRecord, error) { + return []*store.UsageRecord{ + {ID: "u1", OrgID: orgID, ResourceType: "max_concurrent_sandboxes", Quantity: 10.5}, + {ID: "u2", OrgID: orgID, ResourceType: "source_vm", Quantity: 2}, + {ID: "u3", OrgID: orgID, ResourceType: "agent_host", Quantity: 1}, + {ID: "u4", OrgID: orgID, ResourceType: "llm_token", Quantity: 5000}, + }, nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/billing", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + var resp billingResponse + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to parse JSON: %v", err) + } + if resp.Usage == nil { + t.Fatal("expected usage to be present") + } + if resp.Usage.MaxConcurrentSandboxes != 10.5 { + t.Fatalf("expected max_concurrent_sandboxes=10.5, got %v", resp.Usage.MaxConcurrentSandboxes) + } + if resp.Usage.SourceVMs != 2 { + t.Fatalf("expected source_vms=2, got %v", resp.Usage.SourceVMs) + } + if resp.Usage.AgentHosts != 1 { + t.Fatalf("expected agent_hosts=1, got %v", resp.Usage.AgentHosts) + } + if resp.Usage.TokensUsed != 5000 { + t.Fatalf("expected tokens_used=5000, got %v", resp.Usage.TokensUsed) + } +} + +func TestHandleCalculator(t *testing.T) { + ms := &mockStore{} + cfg := testConfig() + s := newTestServer(ms, cfg) + + body := bytes.NewBufferString(`{ + "concurrent_sandboxes": 2, + "source_vms": 5, + "agent_hosts": 2 + }`) + + rr := httptest.NewRecorder() + req := httptest.NewRequest("POST", "/v1/billing/calculator", body) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + var resp calculatorResponse + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to parse JSON: %v", err) + } + + // SandboxCost: (2 - 1 free) * 5000 cents / 100 = $50.00 + expectedSandboxCost := 50.0 + if resp.SandboxCost != expectedSandboxCost { + t.Fatalf("expected sandbox_cost=%v, got %v", expectedSandboxCost, resp.SandboxCost) + } + + // SourceVMCost: (5 - 3 free) * 500 cents / 100 = $10.00 + expectedSourceVMCost := 10.0 + if resp.SourceVMCost != expectedSourceVMCost { + t.Fatalf("expected source_vm_cost=%v, got %v", expectedSourceVMCost, resp.SourceVMCost) + } + + // AgentHostCost: (2 - 1 free) * 1000 cents / 100 = $10.00 + expectedAgentHostCost := 10.0 + if resp.AgentHostCost != expectedAgentHostCost { + t.Fatalf("expected agent_host_cost=%v, got %v", expectedAgentHostCost, resp.AgentHostCost) + } + + // Total: 50 + 10 + 10 = 70.00 + expectedTotal := 70.0 + if resp.TotalMonthly != expectedTotal { + t.Fatalf("expected total_monthly=%v, got %v", expectedTotal, resp.TotalMonthly) + } + + if resp.Currency != "USD" { + t.Fatalf("expected currency=USD, got %q", resp.Currency) + } +} + +// TestHandleCalculatorWithTokens - commented out, token billing not yet ready for integration. +/* +func TestHandleCalculatorWithTokens(t *testing.T) { + ... +} +*/ diff --git a/api/internal/rest/docs_progress.go b/api/internal/rest/docs_progress.go new file mode 100644 index 00000000..456824d1 --- /dev/null +++ b/api/internal/rest/docs_progress.go @@ -0,0 +1,149 @@ +package rest + +import ( + "crypto/rand" + "fmt" + "net/http" + "sync" + "time" + + serverError "github.com/aspectrr/fluid.sh/api/internal/error" + serverJSON "github.com/aspectrr/fluid.sh/api/internal/json" +) + +func generateSessionCode() string { + const chars = "ABCDEFGHJKLMNPQRSTUVWXYZ23456789" + b := make([]byte, 6) + for i := range b { + for { + _, _ = rand.Read(b[i : i+1]) + idx := int(b[i]) + if idx < 256-(256%len(chars)) { + b[i] = chars[idx%len(chars)] + break + } + } + } + return string(b) +} + +type docsSession struct { + StorageKey string + CompletedSteps map[int]bool + CreatedAt time.Time +} + +type docsProgressStore struct { + mu sync.Mutex + sessions map[string]*docsSession +} + +var docsProgress = &docsProgressStore{ + sessions: make(map[string]*docsSession), +} + +func (d *docsProgressStore) cleanup() { + d.mu.Lock() + defer d.mu.Unlock() + cutoff := time.Now().Add(-1 * time.Hour) + for code, s := range d.sessions { + if s.CreatedAt.Before(cutoff) { + delete(d.sessions, code) + } + } +} + +type docsRegisterRequest struct { + StorageKey string `json:"storage_key"` +} + +type docsRegisterResponse struct { + SessionCode string `json:"session_code"` +} + +func (s *Server) handleDocsProgressRegister(w http.ResponseWriter, r *http.Request) { + var req docsRegisterRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + docsProgress.cleanup() + + code := generateSessionCode() + + docsProgress.mu.Lock() + if len(docsProgress.sessions) >= 10000 { + docsProgress.mu.Unlock() + serverError.RespondError(w, http.StatusServiceUnavailable, fmt.Errorf("too many active sessions")) + return + } + docsProgress.sessions[code] = &docsSession{ + StorageKey: req.StorageKey, + CompletedSteps: make(map[int]bool), + CreatedAt: time.Now(), + } + docsProgress.mu.Unlock() + + _ = serverJSON.RespondJSON(w, http.StatusOK, docsRegisterResponse{SessionCode: code}) +} + +type completeRequest struct { + SessionCode string `json:"session_code"` + StepIndex int `json:"step_index"` +} + +func (s *Server) handleDocsProgressComplete(w http.ResponseWriter, r *http.Request) { + var req completeRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + if req.SessionCode == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("session code is required")) + return + } + + docsProgress.mu.Lock() + session, ok := docsProgress.sessions[req.SessionCode] + if ok { + session.CompletedSteps[req.StepIndex] = true + } + docsProgress.mu.Unlock() + + if !ok { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("session not found")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]string{"status": "ok"}) +} + +type progressResponse struct { + CompletedSteps []int `json:"completed_steps"` +} + +func (s *Server) handleDocsProgressGet(w http.ResponseWriter, r *http.Request) { + code := r.URL.Query().Get("code") + if code == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("code is required")) + return + } + + docsProgress.mu.Lock() + session, ok := docsProgress.sessions[code] + var steps []int + if ok { + for idx := range session.CompletedSteps { + steps = append(steps, idx) + } + } + docsProgress.mu.Unlock() + + if !ok { + _ = serverJSON.RespondJSON(w, http.StatusOK, progressResponse{CompletedSteps: []int{}}) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, progressResponse{CompletedSteps: steps}) +} diff --git a/api/internal/rest/host_handlers.go b/api/internal/rest/host_handlers.go new file mode 100644 index 00000000..7ff4555f --- /dev/null +++ b/api/internal/rest/host_handlers.go @@ -0,0 +1,233 @@ +package rest + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "net/http" + + "github.com/go-chi/chi/v5" + + "github.com/aspectrr/fluid.sh/api/internal/auth" + serverError "github.com/aspectrr/fluid.sh/api/internal/error" + "github.com/aspectrr/fluid.sh/api/internal/id" + serverJSON "github.com/aspectrr/fluid.sh/api/internal/json" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +// handleListHosts godoc +// @Summary List hosts +// @Description List all connected sandbox hosts +// @Tags Hosts +// @Produce json +// @Param slug path string true "Organization slug" +// @Success 200 {object} map[string]interface{} +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/hosts [get] +func (s *Server) handleListHosts(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + hosts, err := s.orchestrator.ListHosts(r.Context(), org.ID) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to list hosts")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "hosts": hosts, + "count": len(hosts), + }) +} + +// handleGetHost godoc +// @Summary Get host +// @Description Get details of a specific connected host +// @Tags Hosts +// @Produce json +// @Param slug path string true "Organization slug" +// @Param hostID path string true "Host ID" +// @Success 200 {object} orchestrator.HostInfo +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/hosts/{hostID} [get] +func (s *Server) handleGetHost(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + hostID := chi.URLParam(r, "hostID") + host, err := s.orchestrator.GetHost(r.Context(), hostID, org.ID) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("host not found or not connected")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, host) +} + +// --- Host Tokens --- + +type createHostTokenRequest struct { + Name string `json:"name"` +} + +type hostTokenResponse struct { + ID string `json:"id"` + Name string `json:"name"` + Token string `json:"token,omitempty"` // Only set on creation. + CreatedAt string `json:"created_at"` +} + +// handleCreateHostToken godoc +// @Summary Create host token +// @Description Generate a new host authentication token (owner or admin only) +// @Tags Host Tokens +// @Accept json +// @Produce json +// @Param slug path string true "Organization slug" +// @Param request body createHostTokenRequest true "Token details" +// @Success 201 {object} hostTokenResponse +// @Failure 400 {object} error.ErrorResponse +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/hosts/tokens [post] +func (s *Server) handleCreateHostToken(w http.ResponseWriter, r *http.Request) { + org, member, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + if member.Role != store.OrgRoleOwner && member.Role != store.OrgRoleAdmin { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("insufficient permissions")) + return + } + + var req createHostTokenRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.Name == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("name is required")) + return + } + + // Generate raw token. + rawBytes := make([]byte, 32) + if _, err := rand.Read(rawBytes); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to generate token")) + return + } + rawToken := hex.EncodeToString(rawBytes) + + tokenID, err := id.Generate("HTK-") + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to generate token ID")) + return + } + + token := &store.HostToken{ + ID: tokenID, + OrgID: org.ID, + Name: req.Name, + TokenHash: auth.HashToken(rawToken), + } + + if err := s.store.CreateHostToken(r.Context(), token); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create host token")) + return + } + + if user := auth.UserFromContext(r.Context()); user != nil { + s.telemetry.Track(user.ID, "host_token_created", map[string]any{"org_id": org.ID}) + } + + _ = serverJSON.RespondJSON(w, http.StatusCreated, hostTokenResponse{ + ID: token.ID, + Name: token.Name, + Token: rawToken, + CreatedAt: token.CreatedAt.Format("2006-01-02T15:04:05Z07:00"), + }) +} + +// handleListHostTokens godoc +// @Summary List host tokens +// @Description List all host tokens for the organization +// @Tags Host Tokens +// @Produce json +// @Param slug path string true "Organization slug" +// @Success 200 {object} map[string]interface{} +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/hosts/tokens [get] +func (s *Server) handleListHostTokens(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + tokens, err := s.store.ListHostTokensByOrg(r.Context(), org.ID) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to list host tokens")) + return + } + + result := make([]hostTokenResponse, 0, len(tokens)) + for _, t := range tokens { + result = append(result, hostTokenResponse{ + ID: t.ID, + Name: t.Name, + CreatedAt: t.CreatedAt.Format("2006-01-02T15:04:05Z07:00"), + }) + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "tokens": result, + "count": len(result), + }) +} + +// handleDeleteHostToken godoc +// @Summary Delete host token +// @Description Delete a host token (owner or admin only) +// @Tags Host Tokens +// @Produce json +// @Param slug path string true "Organization slug" +// @Param tokenID path string true "Token ID" +// @Success 200 {object} map[string]string +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/hosts/tokens/{tokenID} [delete] +func (s *Server) handleDeleteHostToken(w http.ResponseWriter, r *http.Request) { + org, member, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + if member.Role != store.OrgRoleOwner && member.Role != store.OrgRoleAdmin { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("insufficient permissions")) + return + } + + tokenID := chi.URLParam(r, "tokenID") + if err := s.store.DeleteHostToken(r.Context(), org.ID, tokenID); err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("host token not found")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]string{"status": "deleted"}) +} diff --git a/api/internal/rest/host_handlers_test.go b/api/internal/rest/host_handlers_test.go new file mode 100644 index 00000000..1e9f4c56 --- /dev/null +++ b/api/internal/rest/host_handlers_test.go @@ -0,0 +1,196 @@ +package rest + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +func TestHandleListHostTokens(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.ListHostTokensByOrgFn = func(_ context.Context, orgID string) ([]store.HostToken, error) { + return []store.HostToken{ + { + ID: "HTK-1234", + OrgID: testOrg.ID, + Name: "test-token", + }, + }, nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/hosts/tokens", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + tokens, ok := body["tokens"].([]any) + if !ok { + t.Fatal("expected tokens array in response") + } + if len(tokens) != 1 { + t.Fatalf("expected 1 token, got %d", len(tokens)) + } +} + +func TestHandleCreateHostToken(t *testing.T) { + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.CreateHostTokenFn = func(_ context.Context, token *store.HostToken) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/orgs/test-org/hosts/tokens", + strings.NewReader(`{"name":"my-host-token"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/hosts/tokens", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["name"] != "my-host-token" { + t.Fatalf("expected name 'my-host-token', got %v", body["name"]) + } + // Token should be returned on creation. + if body["token"] == nil || body["token"] == "" { + t.Fatal("expected token to be returned on creation") + } + }) + + t.Run("missing name", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/orgs/test-org/hosts/tokens", + strings.NewReader(`{}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/hosts/tokens", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("insufficient permissions - member role", func(t *testing.T) { + ms := &mockStore{} + ms.GetOrganizationBySlugFn = func(_ context.Context, slug string) (*store.Organization, error) { + if slug == testOrg.Slug { + return testOrg, nil + } + return nil, store.ErrNotFound + } + ms.GetOrgMemberFn = func(_ context.Context, orgID, userID string) (*store.OrgMember, error) { + return &store.OrgMember{ + ID: "MBR-regular", + OrgID: testOrg.ID, + UserID: testUser.ID, + Role: store.OrgRoleMember, + }, nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/orgs/test-org/hosts/tokens", + strings.NewReader(`{"name":"forbidden-token"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/hosts/tokens", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusForbidden { + t.Fatalf("expected 403, got %d: %s", rr.Code, rr.Body.String()) + } + }) +} + +func TestHandleDeleteHostToken(t *testing.T) { + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.DeleteHostTokenFn = func(_ context.Context, orgID, id string) error { + if orgID != testOrg.ID { + return store.ErrNotFound + } + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org/hosts/tokens/HTK-1234", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["status"] != "deleted" { + t.Fatalf("expected status 'deleted', got %v", body["status"]) + } + }) + + t.Run("cross-org deletion rejected", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.DeleteHostTokenFn = func(_ context.Context, orgID, id string) error { + // Token belongs to a different org, so scoped WHERE returns no rows + if orgID != "ORG-other" { + return store.ErrNotFound + } + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org/hosts/tokens/HTK-other-org", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404 for cross-org token, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("insufficient permissions", func(t *testing.T) { + ms := &mockStore{} + ms.GetOrganizationBySlugFn = func(_ context.Context, slug string) (*store.Organization, error) { + if slug == testOrg.Slug { + return testOrg, nil + } + return nil, store.ErrNotFound + } + ms.GetOrgMemberFn = func(_ context.Context, orgID, userID string) (*store.OrgMember, error) { + return &store.OrgMember{ + ID: "MBR-regular", + OrgID: testOrg.ID, + UserID: testUser.ID, + Role: store.OrgRoleMember, + }, nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org/hosts/tokens/HTK-1234", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusForbidden { + t.Fatalf("expected 403, got %d: %s", rr.Code, rr.Body.String()) + } + }) +} diff --git a/api/internal/rest/org_handlers.go b/api/internal/rest/org_handlers.go new file mode 100644 index 00000000..3f6c7770 --- /dev/null +++ b/api/internal/rest/org_handlers.go @@ -0,0 +1,585 @@ +package rest + +import ( + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/go-chi/chi/v5" + + "github.com/aspectrr/fluid.sh/api/internal/auth" + serverError "github.com/aspectrr/fluid.sh/api/internal/error" + "github.com/aspectrr/fluid.sh/api/internal/id" + serverJSON "github.com/aspectrr/fluid.sh/api/internal/json" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +var slugRegex = regexp.MustCompile(`^[a-z0-9][a-z0-9-]{1,48}[a-z0-9]$`) + +const maxSlugLen = 50 + +func generateSlugSuffix() string { + b := make([]byte, 2) + _, _ = rand.Read(b) + return hex.EncodeToString(b) +} + +func isDuplicateSlugErr(err error) bool { + return errors.Is(err, store.ErrAlreadyExists) || strings.Contains(err.Error(), "duplicate key") +} + +// --- Create Org --- + +type createOrgRequest struct { + Name string `json:"name"` + Slug string `json:"slug"` +} + +type orgResponse struct { + ID string `json:"id"` + Name string `json:"name"` + Slug string `json:"slug"` + OwnerID string `json:"owner_id"` + StripeCustomerID string `json:"stripe_customer_id,omitempty"` + CreatedAt string `json:"created_at"` +} + +func toOrgResponse(o *store.Organization) *orgResponse { + return &orgResponse{ + ID: o.ID, + Name: o.Name, + Slug: o.Slug, + OwnerID: o.OwnerID, + CreatedAt: o.CreatedAt.Format("2006-01-02T15:04:05Z07:00"), + } +} + +func toOrgResponseForOwner(o *store.Organization) *orgResponse { + r := toOrgResponse(o) + r.StripeCustomerID = o.StripeCustomerID + return r +} + +// handleCreateOrg godoc +// @Summary Create organization +// @Description Create a new organization and add the current user as owner +// @Tags Organizations +// @Accept json +// @Produce json +// @Param request body createOrgRequest true "Organization details" +// @Success 201 {object} orgResponse +// @Failure 400 {object} error.ErrorResponse +// @Failure 409 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs [post] +func (s *Server) handleCreateOrg(w http.ResponseWriter, r *http.Request) { + user := auth.UserFromContext(r.Context()) + + var req createOrgRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.Name == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("name is required")) + return + } + + slug := req.Slug + if slug == "" { + slug = strings.ToLower(req.Name) + slug = strings.Map(func(r rune) rune { + if (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') { + return r + } + if r == ' ' || r == '-' || r == '_' { + return '-' + } + return -1 + }, slug) + for strings.Contains(slug, "--") { + slug = strings.ReplaceAll(slug, "--", "-") + } + slug = strings.Trim(slug, "-") + } + if !slugRegex.MatchString(slug) { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("slug must be 3-50 lowercase alphanumeric chars and hyphens")) + return + } + + autoSlug := req.Slug == "" + + orgID, err := id.Generate("ORG-") + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to generate org ID")) + return + } + + org := &store.Organization{ + ID: orgID, + Name: req.Name, + Slug: slug, + OwnerID: user.ID, + } + + baseSlug := slug + for attempt := 0; attempt < 3; attempt++ { + if attempt > 0 { + if !autoSlug { + break + } + suffix := generateSlugSuffix() + // Truncate base so base-XXXX fits within maxSlugLen + maxBase := maxSlugLen - 1 - len(suffix) // 1 for the hyphen + b := baseSlug + if len(b) > maxBase { + b = strings.TrimRight(b[:maxBase], "-") + } + slug = b + "-" + suffix + org.Slug = slug + } + + err = s.store.WithTx(r.Context(), func(tx store.DataStore) error { + if err := tx.CreateOrganization(r.Context(), org); err != nil { + return err + } + memberID, err := id.Generate("MBR-") + if err != nil { + return fmt.Errorf("generate member ID: %w", err) + } + member := &store.OrgMember{ + ID: memberID, + OrgID: org.ID, + UserID: user.ID, + Role: store.OrgRoleOwner, + } + return tx.CreateOrgMember(r.Context(), member) + }) + if err == nil { + break + } + if !isDuplicateSlugErr(err) { + break + } + } + + if err != nil { + if isDuplicateSlugErr(err) { + serverError.RespondErrorMsg(w, http.StatusConflict, "organization slug already taken", err) + return + } + serverError.RespondErrorMsg(w, http.StatusInternalServerError, "failed to create organization", err) + return + } + + s.telemetry.Track(user.ID, "org_created", map[string]any{"org_slug": slug}) + + _ = serverJSON.RespondJSON(w, http.StatusCreated, toOrgResponseForOwner(org)) +} + +// --- List Orgs --- + +// handleListOrgs godoc +// @Summary List organizations +// @Description List all organizations the current user belongs to +// @Tags Organizations +// @Produce json +// @Success 200 {object} map[string]interface{} +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs [get] +func (s *Server) handleListOrgs(w http.ResponseWriter, r *http.Request) { + user := auth.UserFromContext(r.Context()) + orgs, err := s.store.ListOrganizationsByUser(r.Context(), user.ID) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to list organizations")) + return + } + + result := make([]*orgResponse, 0, len(orgs)) + for _, o := range orgs { + result = append(result, toOrgResponse(o)) + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "organizations": result, + "total": len(result), + }) +} + +// --- Get Org --- + +// handleGetOrg godoc +// @Summary Get organization +// @Description Get organization details by slug +// @Tags Organizations +// @Produce json +// @Param slug path string true "Organization slug" +// @Success 200 {object} orgResponse +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug} [get] +func (s *Server) handleGetOrg(w http.ResponseWriter, r *http.Request) { + slug := chi.URLParam(r, "slug") + user := auth.UserFromContext(r.Context()) + + org, err := s.store.GetOrganizationBySlug(r.Context(), slug) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("organization not found")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get organization")) + return + } + + // Check membership + member, err := s.store.GetOrgMember(r.Context(), org.ID, user.ID) + if err != nil { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("not a member of this organization")) + return + } + + if member.Role == store.OrgRoleOwner { + _ = serverJSON.RespondJSON(w, http.StatusOK, toOrgResponseForOwner(org)) + } else { + _ = serverJSON.RespondJSON(w, http.StatusOK, toOrgResponse(org)) + } +} + +// --- Update Org --- + +type updateOrgRequest struct { + Name *string `json:"name,omitempty"` +} + +// handleUpdateOrg godoc +// @Summary Update organization +// @Description Update organization details (owner or admin only) +// @Tags Organizations +// @Accept json +// @Produce json +// @Param slug path string true "Organization slug" +// @Param request body updateOrgRequest true "Fields to update" +// @Success 200 {object} orgResponse +// @Failure 400 {object} error.ErrorResponse +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug} [patch] +func (s *Server) handleUpdateOrg(w http.ResponseWriter, r *http.Request) { + slug := chi.URLParam(r, "slug") + user := auth.UserFromContext(r.Context()) + + org, err := s.store.GetOrganizationBySlug(r.Context(), slug) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("organization not found")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get organization")) + return + } + + member, err := s.store.GetOrgMember(r.Context(), org.ID, user.ID) + if err != nil || (member.Role != store.OrgRoleOwner && member.Role != store.OrgRoleAdmin) { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("insufficient permissions")) + return + } + + var req updateOrgRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.Name != nil { + org.Name = *req.Name + } + + if err := s.store.UpdateOrganization(r.Context(), org); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to update organization")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, toOrgResponse(org)) +} + +// --- Delete Org --- + +// handleDeleteOrg godoc +// @Summary Delete organization +// @Description Delete an organization (owner only) +// @Tags Organizations +// @Produce json +// @Param slug path string true "Organization slug" +// @Success 200 {object} map[string]string +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug} [delete] +func (s *Server) handleDeleteOrg(w http.ResponseWriter, r *http.Request) { + slug := chi.URLParam(r, "slug") + user := auth.UserFromContext(r.Context()) + + org, err := s.store.GetOrganizationBySlug(r.Context(), slug) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("organization not found")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get organization")) + return + } + + if org.OwnerID != user.ID { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("only the owner can delete an organization")) + return + } + + if err := s.store.DeleteOrganization(r.Context(), org.ID); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to delete organization")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]string{"status": "deleted"}) +} + +// --- Members --- + +type memberResponse struct { + ID string `json:"id"` + UserID string `json:"user_id"` + Role string `json:"role"` + CreatedAt string `json:"created_at"` +} + +// handleListMembers godoc +// @Summary List members +// @Description List all members of an organization +// @Tags Members +// @Produce json +// @Param slug path string true "Organization slug" +// @Success 200 {object} map[string]interface{} +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/members [get] +func (s *Server) handleListMembers(w http.ResponseWriter, r *http.Request) { + slug := chi.URLParam(r, "slug") + user := auth.UserFromContext(r.Context()) + + org, err := s.store.GetOrganizationBySlug(r.Context(), slug) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("organization not found")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get organization")) + return + } + + _, err = s.store.GetOrgMember(r.Context(), org.ID, user.ID) + if err != nil { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("not a member of this organization")) + return + } + + members, err := s.store.ListOrgMembers(r.Context(), org.ID) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to list members")) + return + } + + result := make([]*memberResponse, 0, len(members)) + for _, m := range members { + result = append(result, &memberResponse{ + ID: m.ID, + UserID: m.UserID, + Role: string(m.Role), + CreatedAt: m.CreatedAt.Format("2006-01-02T15:04:05Z07:00"), + }) + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "members": result, + "total": len(result), + }) +} + +type addMemberRequest struct { + Email string `json:"email"` + Role string `json:"role"` +} + +// handleAddMember godoc +// @Summary Add member +// @Description Add a user to an organization (owner or admin only) +// @Tags Members +// @Accept json +// @Produce json +// @Param slug path string true "Organization slug" +// @Param request body addMemberRequest true "Member details" +// @Success 201 {object} memberResponse +// @Failure 400 {object} error.ErrorResponse +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 409 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/members [post] +func (s *Server) handleAddMember(w http.ResponseWriter, r *http.Request) { + slug := chi.URLParam(r, "slug") + user := auth.UserFromContext(r.Context()) + + org, err := s.store.GetOrganizationBySlug(r.Context(), slug) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("organization not found")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get organization")) + return + } + + member, err := s.store.GetOrgMember(r.Context(), org.ID, user.ID) + if err != nil || (member.Role != store.OrgRoleOwner && member.Role != store.OrgRoleAdmin) { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("insufficient permissions")) + return + } + + var req addMemberRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.Email == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("email is required")) + return + } + + role := store.OrgRole(req.Role) + if role == "" { + role = store.OrgRoleMember + } + if role == store.OrgRoleOwner { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("cannot add another owner")) + return + } + if role != store.OrgRoleMember && role != store.OrgRoleAdmin { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("invalid role: must be member or admin")) + return + } + + targetUser, err := s.store.GetUserByEmail(r.Context(), req.Email) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("user not found")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to look up user")) + return + } + + memberID, err := id.Generate("MBR-") + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to generate member ID")) + return + } + + newMember := &store.OrgMember{ + ID: memberID, + OrgID: org.ID, + UserID: targetUser.ID, + Role: role, + } + + if err := s.store.CreateOrgMember(r.Context(), newMember); err != nil { + if errors.Is(err, store.ErrAlreadyExists) { + serverError.RespondError(w, http.StatusConflict, fmt.Errorf("user is already a member")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to add member")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusCreated, &memberResponse{ + ID: newMember.ID, + UserID: newMember.UserID, + Role: string(newMember.Role), + CreatedAt: newMember.CreatedAt.Format("2006-01-02T15:04:05Z07:00"), + }) +} + +// handleRemoveMember godoc +// @Summary Remove member +// @Description Remove a member from an organization (owner or admin only) +// @Tags Members +// @Produce json +// @Param slug path string true "Organization slug" +// @Param memberID path string true "Member ID" +// @Success 200 {object} map[string]string +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/members/{memberID} [delete] +func (s *Server) handleRemoveMember(w http.ResponseWriter, r *http.Request) { + slug := chi.URLParam(r, "slug") + memberID := chi.URLParam(r, "memberID") + user := auth.UserFromContext(r.Context()) + + org, err := s.store.GetOrganizationBySlug(r.Context(), slug) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("organization not found")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get organization")) + return + } + + callerMember, err := s.store.GetOrgMember(r.Context(), org.ID, user.ID) + if err != nil || (callerMember.Role != store.OrgRoleOwner && callerMember.Role != store.OrgRoleAdmin) { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("insufficient permissions")) + return + } + + // Prevent removing the org owner + targetMember, err := s.store.GetOrgMemberByID(r.Context(), org.ID, memberID) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("member not found")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get member")) + return + } + if targetMember.Role == store.OrgRoleOwner { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("cannot remove the organization owner")) + return + } + + if err := s.store.DeleteOrgMember(r.Context(), org.ID, memberID); err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("member not found")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to remove member")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]string{"status": "removed"}) +} diff --git a/api/internal/rest/org_handlers_test.go b/api/internal/rest/org_handlers_test.go new file mode 100644 index 00000000..5bf204b6 --- /dev/null +++ b/api/internal/rest/org_handlers_test.go @@ -0,0 +1,494 @@ +package rest + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +func TestHandleCreateOrg(t *testing.T) { + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + ms.CreateOrganizationFn = func(_ context.Context, org *store.Organization) error { + return nil + } + ms.CreateOrgMemberFn = func(_ context.Context, m *store.OrgMember) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/orgs/", + strings.NewReader(`{"name":"My Org","slug":"my-org"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/orgs/", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["name"] != "My Org" { + t.Fatalf("expected name 'My Org', got %v", body["name"]) + } + if body["slug"] != "my-org" { + t.Fatalf("expected slug 'my-org', got %v", body["slug"]) + } + }) + + t.Run("invalid slug", func(t *testing.T) { + ms := &mockStore{} + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/orgs/", + strings.NewReader(`{"name":"Bad Org","slug":"A!"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/orgs/", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("duplicate explicit slug returns 409", func(t *testing.T) { + ms := &mockStore{} + ms.CreateOrganizationFn = func(_ context.Context, org *store.Organization) error { + return store.ErrAlreadyExists + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/orgs/", + strings.NewReader(`{"name":"Dup Org","slug":"dup-org"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/orgs/", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusConflict { + t.Fatalf("expected 409, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("duplicate auto slug auto-resolves with suffix", func(t *testing.T) { + ms := &mockStore{} + calls := 0 + ms.CreateOrganizationFn = func(_ context.Context, org *store.Organization) error { + calls++ + if calls == 1 { + return store.ErrAlreadyExists + } + return nil + } + ms.CreateOrgMemberFn = func(_ context.Context, m *store.OrgMember) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/orgs/", + strings.NewReader(`{"name":"Dup Org"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/orgs/", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + slug, ok := body["slug"].(string) + if !ok { + t.Fatal("expected slug in response") + } + if !strings.HasPrefix(slug, "dup-org-") { + t.Fatalf("expected slug to start with 'dup-org-', got %q", slug) + } + if len(slug) != len("dup-org-")+4 { + t.Fatalf("expected 4-char hex suffix, got slug %q", slug) + } + }) +} + +func TestHandleListOrgs(t *testing.T) { + ms := &mockStore{} + ms.ListOrganizationsByUserFn = func(_ context.Context, userID string) ([]*store.Organization, error) { + return []*store.Organization{testOrg}, nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + orgs, ok := body["organizations"].([]any) + if !ok { + t.Fatal("expected organizations array in response") + } + if len(orgs) != 1 { + t.Fatalf("expected 1 org, got %d", len(orgs)) + } +} + +func TestHandleGetOrg(t *testing.T) { + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["slug"] != testOrg.Slug { + t.Fatalf("expected slug %s, got %v", testOrg.Slug, body["slug"]) + } + }) + + t.Run("not found", func(t *testing.T) { + ms := &mockStore{} + ms.GetOrganizationBySlugFn = func(_ context.Context, slug string) (*store.Organization, error) { + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/nonexistent", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("not member", func(t *testing.T) { + ms := &mockStore{} + ms.GetOrganizationBySlugFn = func(_ context.Context, slug string) (*store.Organization, error) { + return testOrg, nil + } + ms.GetOrgMemberFn = func(_ context.Context, orgID, userID string) (*store.OrgMember, error) { + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusForbidden { + t.Fatalf("expected 403, got %d: %s", rr.Code, rr.Body.String()) + } + }) +} + +func TestHandleUpdateOrg(t *testing.T) { + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.UpdateOrganizationFn = func(_ context.Context, org *store.Organization) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("PATCH", "/v1/orgs/test-org", + strings.NewReader(`{"name":"Updated Org"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "PATCH", "/v1/orgs/test-org", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["name"] != "Updated Org" { + t.Fatalf("expected name 'Updated Org', got %v", body["name"]) + } + }) + + t.Run("insufficient permissions", func(t *testing.T) { + ms := &mockStore{} + ms.GetOrganizationBySlugFn = func(_ context.Context, slug string) (*store.Organization, error) { + return testOrg, nil + } + ms.GetOrgMemberFn = func(_ context.Context, orgID, userID string) (*store.OrgMember, error) { + return &store.OrgMember{ + ID: "MBR-member1", + OrgID: testOrg.ID, + UserID: testUser.ID, + Role: store.OrgRoleMember, + }, nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("PATCH", "/v1/orgs/test-org", + strings.NewReader(`{"name":"Updated Org"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "PATCH", "/v1/orgs/test-org", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusForbidden { + t.Fatalf("expected 403, got %d: %s", rr.Code, rr.Body.String()) + } + }) +} + +func TestHandleDeleteOrg(t *testing.T) { + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.DeleteOrganizationFn = func(_ context.Context, id string) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["status"] != "deleted" { + t.Fatalf("expected status 'deleted', got %v", body["status"]) + } + }) + + t.Run("not owner", func(t *testing.T) { + ms := &mockStore{} + notOwnedOrg := &store.Organization{ + ID: "ORG-other", + Name: "Other Org", + Slug: "other-org", + OwnerID: "USR-someone-else", + } + ms.GetOrganizationBySlugFn = func(_ context.Context, slug string) (*store.Organization, error) { + if slug == "other-org" { + return notOwnedOrg, nil + } + return nil, store.ErrNotFound + } + ms.GetOrgMemberFn = func(_ context.Context, orgID, userID string) (*store.OrgMember, error) { + return &store.OrgMember{ + ID: "MBR-admin1", + OrgID: notOwnedOrg.ID, + UserID: testUser.ID, + Role: store.OrgRoleAdmin, + }, nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/other-org", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusForbidden { + t.Fatalf("expected 403, got %d: %s", rr.Code, rr.Body.String()) + } + }) +} + +func TestHandleListMembers(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.ListOrgMembersFn = func(_ context.Context, orgID string) ([]*store.OrgMember, error) { + return []*store.OrgMember{testMember}, nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/members", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + members, ok := body["members"].([]any) + if !ok { + t.Fatal("expected members array in response") + } + if len(members) != 1 { + t.Fatalf("expected 1 member, got %d", len(members)) + } +} + +func TestHandleAddMember(t *testing.T) { + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + + targetUser := &store.User{ + ID: "USR-target", + Email: "target@example.com", + DisplayName: "Target User", + } + ms.GetUserByEmailFn = func(_ context.Context, email string) (*store.User, error) { + if email == targetUser.Email { + return targetUser, nil + } + return nil, store.ErrNotFound + } + ms.CreateOrgMemberFn = func(_ context.Context, m *store.OrgMember) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/orgs/test-org/members", + strings.NewReader(`{"email":"target@example.com","role":"member"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/members", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["user_id"] != targetUser.ID { + t.Fatalf("expected user_id %s, got %v", targetUser.ID, body["user_id"]) + } + }) + + t.Run("missing email", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/orgs/test-org/members", + strings.NewReader(`{"role":"member"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/members", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("cannot add owner role", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + bodyReq := httptest.NewRequest("POST", "/v1/orgs/test-org/members", + strings.NewReader(`{"email":"target@example.com","role":"owner"}`)) + bodyReq.Header.Set("Content-Type", "application/json") + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/members", bodyReq) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rr.Code, rr.Body.String()) + } + }) +} + +func TestHandleRemoveMember(t *testing.T) { + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetOrgMemberByIDFn = func(_ context.Context, orgID, memberID string) (*store.OrgMember, error) { + return &store.OrgMember{ + ID: memberID, + OrgID: orgID, + UserID: "USR-target", + Role: store.OrgRoleMember, + }, nil + } + ms.DeleteOrgMemberFn = func(_ context.Context, orgID, id string) error { + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org/members/MBR-target1234", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["status"] != "removed" { + t.Fatalf("expected status 'removed', got %v", body["status"]) + } + }) + + t.Run("insufficient permissions", func(t *testing.T) { + ms := &mockStore{} + ms.GetOrganizationBySlugFn = func(_ context.Context, slug string) (*store.Organization, error) { + if slug == testOrg.Slug { + return testOrg, nil + } + return nil, store.ErrNotFound + } + ms.GetOrgMemberFn = func(_ context.Context, orgID, userID string) (*store.OrgMember, error) { + return &store.OrgMember{ + ID: "MBR-regular", + OrgID: testOrg.ID, + UserID: testUser.ID, + Role: store.OrgRoleMember, + }, nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org/members/MBR-target1234", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusForbidden { + t.Fatalf("expected 403, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("IDOR cross-org member deletion returns 404", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + // GetOrgMemberByID scopes by org_id: member exists in another org but not in testOrg + ms.GetOrgMemberByIDFn = func(_ context.Context, orgID, memberID string) (*store.OrgMember, error) { + if orgID == testOrg.ID { + return nil, store.ErrNotFound + } + return &store.OrgMember{ID: memberID, OrgID: orgID, Role: store.OrgRoleMember}, nil + } + ms.DeleteOrgMemberFn = func(_ context.Context, orgID, id string) error { + if orgID == testOrg.ID { + return store.ErrNotFound + } + return nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org/members/MBR-other-org", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404 for cross-org IDOR, got %d: %s", rr.Code, rr.Body.String()) + } + }) +} diff --git a/api/internal/rest/playbook_handlers.go b/api/internal/rest/playbook_handlers.go new file mode 100644 index 00000000..06285751 --- /dev/null +++ b/api/internal/rest/playbook_handlers.go @@ -0,0 +1,365 @@ +package rest + +// Playbook handlers - commented out, not yet ready for integration. + +/* +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/go-chi/chi/v5" + "github.com/google/uuid" + + "github.com/aspectrr/fluid.sh/api/internal/auth" + serverError "github.com/aspectrr/fluid.sh/api/internal/error" + serverJSON "github.com/aspectrr/fluid.sh/api/internal/json" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +// --- Playbook CRUD --- + +func (s *Server) handleCreatePlaybook(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + var req struct { + Name string `json:"name"` + Description string `json:"description"` + } + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + if req.Name == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("name is required")) + return + } + + pb := &store.Playbook{ + ID: uuid.New().String(), + OrgID: org.ID, + Name: req.Name, + Description: req.Description, + } + if err := s.store.CreatePlaybook(r.Context(), pb); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create playbook")) + return + } + + if user := auth.UserFromContext(r.Context()); user != nil { + s.telemetry.Track(user.ID, "playbook_created", map[string]any{"org_id": org.ID}) + } + + _ = serverJSON.RespondJSON(w, http.StatusCreated, pb) +} + +func (s *Server) handleListPlaybooks(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + playbooks, err := s.store.ListPlaybooksByOrg(r.Context(), org.ID) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to list playbooks")) + return + } + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "playbooks": playbooks, + "count": len(playbooks), + }) +} + +func (s *Server) handleGetPlaybook(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + playbookID := chi.URLParam(r, "playbookID") + pb, err := s.store.GetPlaybook(r.Context(), playbookID) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("playbook not found")) + return + } + if pb.OrgID != org.ID { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("playbook does not belong to this organization")) + return + } + + tasks, _ := s.store.ListPlaybookTasks(r.Context(), playbookID) + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "playbook": pb, + "tasks": tasks, + }) +} + +func (s *Server) handleUpdatePlaybook(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + playbookID := chi.URLParam(r, "playbookID") + pb, err := s.store.GetPlaybook(r.Context(), playbookID) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("playbook not found")) + return + } + if pb.OrgID != org.ID { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("playbook does not belong to this organization")) + return + } + + var req struct { + Name *string `json:"name"` + Description *string `json:"description"` + } + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.Name != nil { + pb.Name = *req.Name + } + if req.Description != nil { + pb.Description = *req.Description + } + + if err := s.store.UpdatePlaybook(r.Context(), pb); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to update playbook")) + return + } + _ = serverJSON.RespondJSON(w, http.StatusOK, pb) +} + +func (s *Server) handleDeletePlaybook(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + playbookID := chi.URLParam(r, "playbookID") + pb, err := s.store.GetPlaybook(r.Context(), playbookID) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("playbook not found")) + return + } + if pb.OrgID != org.ID { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("playbook does not belong to this organization")) + return + } + + if err := s.store.DeletePlaybook(r.Context(), playbookID); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to delete playbook")) + return + } + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{"deleted": true, "playbook_id": playbookID}) +} + +// --- Playbook Task CRUD --- + +func (s *Server) handleCreatePlaybookTask(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + playbookID := chi.URLParam(r, "playbookID") + pb, err := s.store.GetPlaybook(r.Context(), playbookID) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("playbook not found")) + return + } + if pb.OrgID != org.ID { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("playbook does not belong to this organization")) + return + } + + var req struct { + Name string `json:"name"` + Module string `json:"module"` + Params json.RawMessage `json:"params"` + } + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + if req.Name == "" || req.Module == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("name and module are required")) + return + } + + paramsStr := "{}" + if len(req.Params) > 0 { + paramsStr = string(req.Params) + } + + task := &store.PlaybookTask{ + ID: uuid.New().String(), + PlaybookID: playbookID, + Name: req.Name, + Module: req.Module, + Params: paramsStr, + } + if err := s.store.CreatePlaybookTask(r.Context(), task); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create task")) + return + } + _ = serverJSON.RespondJSON(w, http.StatusCreated, task) +} + +func (s *Server) handleListPlaybookTasks(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + playbookID := chi.URLParam(r, "playbookID") + pb, err := s.store.GetPlaybook(r.Context(), playbookID) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("playbook not found")) + return + } + if pb.OrgID != org.ID { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("playbook does not belong to this organization")) + return + } + + tasks, err := s.store.ListPlaybookTasks(r.Context(), playbookID) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to list tasks")) + return + } + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "tasks": tasks, + "count": len(tasks), + }) +} + +func (s *Server) handleUpdatePlaybookTask(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + playbookID := chi.URLParam(r, "playbookID") + pb, err := s.store.GetPlaybook(r.Context(), playbookID) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("playbook not found")) + return + } + if pb.OrgID != org.ID { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("playbook does not belong to this organization")) + return + } + + taskID := chi.URLParam(r, "taskID") + task, err := s.store.GetPlaybookTask(r.Context(), taskID) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("task not found")) + return + } + if task.PlaybookID != playbookID { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("task does not belong to this playbook")) + return + } + + var req struct { + Name *string `json:"name"` + Module *string `json:"module"` + Params *json.RawMessage `json:"params"` + } + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.Name != nil { + task.Name = *req.Name + } + if req.Module != nil { + task.Module = *req.Module + } + if req.Params != nil { + task.Params = string(*req.Params) + } + + if err := s.store.UpdatePlaybookTask(r.Context(), task); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to update task")) + return + } + _ = serverJSON.RespondJSON(w, http.StatusOK, task) +} + +func (s *Server) handleDeletePlaybookTask(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + playbookID := chi.URLParam(r, "playbookID") + pb, err := s.store.GetPlaybook(r.Context(), playbookID) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("playbook not found")) + return + } + if pb.OrgID != org.ID { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("playbook does not belong to this organization")) + return + } + + taskID := chi.URLParam(r, "taskID") + task, err := s.store.GetPlaybookTask(r.Context(), taskID) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("task not found")) + return + } + if task.PlaybookID != playbookID { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("task does not belong to this playbook")) + return + } + + if err := s.store.DeletePlaybookTask(r.Context(), taskID); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to delete task")) + return + } + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{"deleted": true, "task_id": taskID}) +} + +func (s *Server) handleReorderPlaybookTasks(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + playbookID := chi.URLParam(r, "playbookID") + pb, err := s.store.GetPlaybook(r.Context(), playbookID) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("playbook not found")) + return + } + if pb.OrgID != org.ID { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("playbook does not belong to this organization")) + return + } + + var req struct { + TaskIDs []string `json:"task_ids"` + } + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if err := s.store.ReorderPlaybookTasks(r.Context(), playbookID, req.TaskIDs); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to reorder tasks")) + return + } + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{"reordered": true}) +} +*/ diff --git a/api/internal/rest/playbook_handlers_test.go b/api/internal/rest/playbook_handlers_test.go new file mode 100644 index 00000000..f030c707 --- /dev/null +++ b/api/internal/rest/playbook_handlers_test.go @@ -0,0 +1,377 @@ +package rest + +// Playbook handler tests - commented out, not yet ready for integration. + +/* +import ( + "bytes" + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +func TestHandleCreatePlaybook(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + + var created *store.Playbook + ms.CreatePlaybookFn = func(_ context.Context, pb *store.Playbook) error { + created = pb + return nil + } + + s := newTestServer(ms, nil) + body := httptest.NewRequest("POST", "/v1/orgs/test-org/playbooks", + bytes.NewBufferString(`{"name":"Deploy App","description":"Deploy the application"}`)) + body.Header.Set("Content-Type", "application/json") + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/playbooks", body) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + if created == nil { + t.Fatal("expected CreatePlaybook to be called") + } + if created.Name != "Deploy App" { + t.Fatalf("expected name 'Deploy App', got %q", created.Name) + } + if created.OrgID != testOrg.ID { + t.Fatalf("expected orgID %q, got %q", testOrg.ID, created.OrgID) + } +} + +func TestHandleListPlaybooks(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.ListPlaybooksByOrgFn = func(_ context.Context, orgID string) ([]*store.Playbook, error) { + return []*store.Playbook{ + {ID: "pb-1", OrgID: testOrg.ID, Name: "Playbook 1"}, + {ID: "pb-2", OrgID: testOrg.ID, Name: "Playbook 2"}, + }, nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/playbooks", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + resp := parseJSONResponse(rr) + if resp["count"] != float64(2) { + t.Fatalf("expected count=2, got %v", resp["count"]) + } +} + +func TestHandleGetPlaybook(t *testing.T) { + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetPlaybookFn = func(_ context.Context, id string) (*store.Playbook, error) { + if id == "pb-1" { + return &store.Playbook{ID: "pb-1", OrgID: testOrg.ID, Name: "Deploy"}, nil + } + return nil, store.ErrNotFound + } + ms.ListPlaybookTasksFn = func(_ context.Context, pbID string) ([]*store.PlaybookTask, error) { + return []*store.PlaybookTask{ + {ID: "task-1", PlaybookID: pbID, Name: "Install"}, + }, nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/playbooks/pb-1", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + resp := parseJSONResponse(rr) + pb, ok := resp["playbook"].(map[string]any) + if !ok { + t.Fatalf("expected playbook object, got %v", resp["playbook"]) + } + if pb["id"] != "pb-1" { + t.Fatalf("expected playbook id=pb-1, got %v", pb["id"]) + } + tasks, ok := resp["tasks"].([]any) + if !ok || len(tasks) != 1 { + t.Fatalf("expected 1 task, got %v", resp["tasks"]) + } + }) + + t.Run("not_found", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetPlaybookFn = func(_ context.Context, id string) (*store.Playbook, error) { + return nil, store.ErrNotFound + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/playbooks/nonexistent", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("wrong_org", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetPlaybookFn = func(_ context.Context, id string) (*store.Playbook, error) { + return &store.Playbook{ID: "pb-1", OrgID: "ORG-other", Name: "Other"}, nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/playbooks/pb-1", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusForbidden { + t.Fatalf("expected 403, got %d: %s", rr.Code, rr.Body.String()) + } + }) +} + +func TestHandleUpdatePlaybook(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetPlaybookFn = func(_ context.Context, id string) (*store.Playbook, error) { + if id == "pb-1" { + return &store.Playbook{ID: "pb-1", OrgID: testOrg.ID, Name: "Old Name", Description: "Old Desc"}, nil + } + return nil, store.ErrNotFound + } + var updated *store.Playbook + ms.UpdatePlaybookFn = func(_ context.Context, pb *store.Playbook) error { + updated = pb + return nil + } + + s := newTestServer(ms, nil) + body := httptest.NewRequest("PATCH", "/v1/orgs/test-org/playbooks/pb-1", + bytes.NewBufferString(`{"name":"New Name"}`)) + body.Header.Set("Content-Type", "application/json") + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "PATCH", "/v1/orgs/test-org/playbooks/pb-1", body) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + if updated == nil { + t.Fatal("expected UpdatePlaybook to be called") + } + if updated.Name != "New Name" { + t.Fatalf("expected name 'New Name', got %q", updated.Name) + } + // Description should remain unchanged + if updated.Description != "Old Desc" { + t.Fatalf("expected description 'Old Desc', got %q", updated.Description) + } +} + +func TestHandleDeletePlaybook(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetPlaybookFn = func(_ context.Context, id string) (*store.Playbook, error) { + if id == "pb-1" { + return &store.Playbook{ID: "pb-1", OrgID: testOrg.ID}, nil + } + return nil, store.ErrNotFound + } + deleted := false + ms.DeletePlaybookFn = func(_ context.Context, id string) error { + if id == "pb-1" { + deleted = true + } + return nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org/playbooks/pb-1", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + if !deleted { + t.Fatal("expected DeletePlaybook to be called") + } + resp := parseJSONResponse(rr) + if resp["deleted"] != true { + t.Fatalf("expected deleted=true, got %v", resp["deleted"]) + } +} + +func TestHandleCreatePlaybookTask(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetPlaybookFn = func(_ context.Context, id string) (*store.Playbook, error) { + if id == "pb-1" { + return &store.Playbook{ID: "pb-1", OrgID: testOrg.ID}, nil + } + return nil, store.ErrNotFound + } + ms.ListPlaybookTasksFn = func(_ context.Context, pbID string) ([]*store.PlaybookTask, error) { + return nil, nil + } + var created *store.PlaybookTask + ms.CreatePlaybookTaskFn = func(_ context.Context, task *store.PlaybookTask) error { + created = task + return nil + } + + s := newTestServer(ms, nil) + body := httptest.NewRequest("POST", "/v1/orgs/test-org/playbooks/pb-1/tasks", + bytes.NewBufferString(`{"name":"Install nginx","module":"apt","params":{"name":"nginx","state":"present"}}`)) + body.Header.Set("Content-Type", "application/json") + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/playbooks/pb-1/tasks", body) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + if created == nil { + t.Fatal("expected CreatePlaybookTask to be called") + } + if created.Name != "Install nginx" { + t.Fatalf("expected task name 'Install nginx', got %q", created.Name) + } + if created.Module != "apt" { + t.Fatalf("expected module 'apt', got %q", created.Module) + } + if created.PlaybookID != "pb-1" { + t.Fatalf("expected playbookID 'pb-1', got %q", created.PlaybookID) + } +} + +func TestHandleListPlaybookTasks(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetPlaybookFn = func(_ context.Context, id string) (*store.Playbook, error) { + if id == "pb-1" { + return &store.Playbook{ID: "pb-1", OrgID: testOrg.ID}, nil + } + return nil, store.ErrNotFound + } + ms.ListPlaybookTasksFn = func(_ context.Context, pbID string) ([]*store.PlaybookTask, error) { + return []*store.PlaybookTask{ + {ID: "task-1", PlaybookID: pbID, Name: "Task 1", Module: "shell"}, + {ID: "task-2", PlaybookID: pbID, Name: "Task 2", Module: "copy"}, + }, nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/playbooks/pb-1/tasks", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + resp := parseJSONResponse(rr) + if resp["count"] != float64(2) { + t.Fatalf("expected count=2, got %v", resp["count"]) + } +} + +func TestHandleUpdatePlaybookTask(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetPlaybookFn = func(_ context.Context, id string) (*store.Playbook, error) { + if id == "pb-1" { + return &store.Playbook{ID: "pb-1", OrgID: testOrg.ID}, nil + } + return nil, store.ErrNotFound + } + ms.GetPlaybookTaskFn = func(_ context.Context, id string) (*store.PlaybookTask, error) { + if id == "task-1" { + return &store.PlaybookTask{ID: "task-1", PlaybookID: "pb-1", Name: "Old Task", Module: "shell", Params: "{}"}, nil + } + return nil, store.ErrNotFound + } + var updated *store.PlaybookTask + ms.UpdatePlaybookTaskFn = func(_ context.Context, task *store.PlaybookTask) error { + updated = task + return nil + } + + s := newTestServer(ms, nil) + body := httptest.NewRequest("PATCH", "/v1/orgs/test-org/playbooks/pb-1/tasks/task-1", + bytes.NewBufferString(`{"name":"Updated Task"}`)) + body.Header.Set("Content-Type", "application/json") + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "PATCH", "/v1/orgs/test-org/playbooks/pb-1/tasks/task-1", body) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + if updated == nil { + t.Fatal("expected UpdatePlaybookTask to be called") + } + if updated.Name != "Updated Task" { + t.Fatalf("expected name 'Updated Task', got %q", updated.Name) + } + // Module should remain unchanged + if updated.Module != "shell" { + t.Fatalf("expected module 'shell', got %q", updated.Module) + } +} + +func TestHandleDeletePlaybookTask(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetPlaybookFn = func(_ context.Context, id string) (*store.Playbook, error) { + if id == "pb-1" { + return &store.Playbook{ID: "pb-1", OrgID: testOrg.ID}, nil + } + return nil, store.ErrNotFound + } + ms.GetPlaybookTaskFn = func(_ context.Context, id string) (*store.PlaybookTask, error) { + if id == "task-1" { + return &store.PlaybookTask{ID: "task-1", PlaybookID: "pb-1"}, nil + } + return nil, store.ErrNotFound + } + deleted := false + ms.DeletePlaybookTaskFn = func(_ context.Context, id string) error { + if id == "task-1" { + deleted = true + } + return nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org/playbooks/pb-1/tasks/task-1", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + if !deleted { + t.Fatal("expected DeletePlaybookTask to be called") + } + resp := parseJSONResponse(rr) + if resp["deleted"] != true { + t.Fatalf("expected deleted=true, got %v", resp["deleted"]) + } +} +*/ diff --git a/api/internal/rest/ratelimit.go b/api/internal/rest/ratelimit.go new file mode 100644 index 00000000..b3668818 --- /dev/null +++ b/api/internal/rest/ratelimit.go @@ -0,0 +1,125 @@ +package rest + +import ( + "log/slog" + "net" + "net/http" + "strings" + "sync" + "time" + + "golang.org/x/time/rate" +) + +type ipLimiter struct { + limiter *rate.Limiter + lastSeen time.Time +} + +// parseCIDRs parses a slice of CIDR strings into net.IPNet values. +// Invalid entries are skipped with a warning log. +func parseCIDRs(cidrs []string, logger *slog.Logger) []*net.IPNet { + if logger == nil { + logger = slog.Default() + } + var nets []*net.IPNet + for _, c := range cidrs { + _, ipNet, err := net.ParseCIDR(c) + if err != nil { + // Try as bare IP by appending /32 or /128. + ip := net.ParseIP(c) + if ip == nil { + logger.Warn("skipping invalid trusted proxy CIDR", "cidr", c, "error", err) + continue + } + bits := 32 + if ip.To4() == nil { + bits = 128 + } + ipNet = &net.IPNet{IP: ip, Mask: net.CIDRMask(bits, bits)} + } + nets = append(nets, ipNet) + } + return nets +} + +// clientIP extracts the real client IP from a request. Proxy headers +// (X-Real-IP, X-Forwarded-For) are only trusted when RemoteAddr falls +// within one of the trustedProxies CIDRs. +func clientIP(r *http.Request, trustedProxies []*net.IPNet) string { + remoteIP, _, _ := net.SplitHostPort(r.RemoteAddr) + if remoteIP == "" { + remoteIP = r.RemoteAddr + } + + if len(trustedProxies) > 0 { + parsed := net.ParseIP(remoteIP) + if parsed != nil { + for _, cidr := range trustedProxies { + if cidr.Contains(parsed) { + if xri := r.Header.Get("X-Real-IP"); xri != "" { + return xri + } + if xff := r.Header.Get("X-Forwarded-For"); xff != "" { + ip, _, _ := strings.Cut(xff, ",") + return strings.TrimSpace(ip) + } + break + } + } + } + } + + return remoteIP +} + +// rateLimitByIP returns middleware that rate-limits requests per client IP. +// Proxy headers are only trusted when the direct connection comes from a +// trustedProxies CIDR. +// +// NOTE: Rate limit state is in-memory and per-process. In a multi-instance +// deployment, each instance maintains its own counters, so effective limits +// are multiplied by the number of instances. This is acceptable for +// single-instance deployments. For multi-instance, consider a shared store. +func rateLimitByIP(rps float64, burst int, trustedProxies []*net.IPNet) func(http.Handler) http.Handler { + var mu sync.Mutex + limiters := make(map[string]*ipLimiter) + + // Periodically clean up stale entries. This goroutine is intentionally + // process-scoped: rateLimitByIP is called at startup and lives for the + // lifetime of the server, so no shutdown mechanism is needed. + go func() { + for { + time.Sleep(time.Minute) + mu.Lock() + for ip, l := range limiters { + if time.Since(l.lastSeen) > 10*time.Minute { + delete(limiters, ip) + } + } + mu.Unlock() + } + }() + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ip := clientIP(r, trustedProxies) + + mu.Lock() + l, ok := limiters[ip] + if !ok { + l = &ipLimiter{limiter: rate.NewLimiter(rate.Limit(rps), burst)} + limiters[ip] = l + } + l.lastSeen = time.Now() + mu.Unlock() + + if !l.limiter.Allow() { + http.Error(w, "rate limit exceeded", http.StatusTooManyRequests) + return + } + + next.ServeHTTP(w, r) + }) + } +} diff --git a/api/internal/rest/ratelimit_test.go b/api/internal/rest/ratelimit_test.go new file mode 100644 index 00000000..8d479c90 --- /dev/null +++ b/api/internal/rest/ratelimit_test.go @@ -0,0 +1,176 @@ +package rest + +import ( + "net" + "net/http" + "net/http/httptest" + "testing" +) + +func TestRateLimitByIP_AllowsBurst(t *testing.T) { + handler := rateLimitByIP(1, 3, nil)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + for i := 0; i < 3; i++ { + req := httptest.NewRequest("POST", "/test", nil) + req.RemoteAddr = "192.168.1.1:12345" + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + if rr.Code != http.StatusOK { + t.Fatalf("request %d: got status %d, want %d", i, rr.Code, http.StatusOK) + } + } +} + +func TestRateLimitByIP_RejectsAfterBurst(t *testing.T) { + handler := rateLimitByIP(0.001, 2, nil)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + // Exhaust burst + for i := 0; i < 2; i++ { + req := httptest.NewRequest("POST", "/test", nil) + req.RemoteAddr = "10.0.0.1:9999" + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + if rr.Code != http.StatusOK { + t.Fatalf("burst request %d: got status %d, want %d", i, rr.Code, http.StatusOK) + } + } + + // Next request should be rejected + req := httptest.NewRequest("POST", "/test", nil) + req.RemoteAddr = "10.0.0.1:9999" + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + if rr.Code != http.StatusTooManyRequests { + t.Fatalf("over-limit request: got status %d, want %d", rr.Code, http.StatusTooManyRequests) + } +} + +func TestRateLimitByIP_DifferentIPsIndependent(t *testing.T) { + handler := rateLimitByIP(0.001, 1, nil)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + // Exhaust IP A + req := httptest.NewRequest("POST", "/test", nil) + req.RemoteAddr = "1.1.1.1:1000" + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + if rr.Code != http.StatusOK { + t.Fatalf("IP A first: got %d, want %d", rr.Code, http.StatusOK) + } + + // IP A is now rate-limited + req = httptest.NewRequest("POST", "/test", nil) + req.RemoteAddr = "1.1.1.1:1000" + rr = httptest.NewRecorder() + handler.ServeHTTP(rr, req) + if rr.Code != http.StatusTooManyRequests { + t.Fatalf("IP A second: got %d, want %d", rr.Code, http.StatusTooManyRequests) + } + + // IP B should still be allowed + req = httptest.NewRequest("POST", "/test", nil) + req.RemoteAddr = "2.2.2.2:2000" + rr = httptest.NewRecorder() + handler.ServeHTTP(rr, req) + if rr.Code != http.StatusOK { + t.Fatalf("IP B first: got %d, want %d", rr.Code, http.StatusOK) + } +} + +func TestRateLimitByIP_SpoofedHeaderIgnoredWithoutTrustedProxy(t *testing.T) { + handler := rateLimitByIP(0.001, 1, nil)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + // First request with spoofed X-Forwarded-For - should use RemoteAddr + req := httptest.NewRequest("POST", "/test", nil) + req.RemoteAddr = "10.0.0.1:9999" + req.Header.Set("X-Forwarded-For", "1.2.3.4") + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + if rr.Code != http.StatusOK { + t.Fatalf("first request: got %d, want %d", rr.Code, http.StatusOK) + } + + // Second request from same RemoteAddr but different spoofed header - + // should still be rate-limited because we use RemoteAddr, not the header + req = httptest.NewRequest("POST", "/test", nil) + req.RemoteAddr = "10.0.0.1:9999" + req.Header.Set("X-Forwarded-For", "5.6.7.8") + rr = httptest.NewRecorder() + handler.ServeHTTP(rr, req) + if rr.Code != http.StatusTooManyRequests { + t.Fatalf("spoofed header should not bypass rate limit: got %d, want %d", rr.Code, http.StatusTooManyRequests) + } +} + +func TestRateLimitByIP_HeaderHonoredFromTrustedProxy(t *testing.T) { + _, proxyNet, _ := net.ParseCIDR("10.0.0.0/8") + trusted := []*net.IPNet{proxyNet} + + handler := rateLimitByIP(0.001, 1, trusted)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + // Request from trusted proxy with X-Forwarded-For + req := httptest.NewRequest("POST", "/test", nil) + req.RemoteAddr = "10.0.0.1:9999" + req.Header.Set("X-Forwarded-For", "203.0.113.50") + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + if rr.Code != http.StatusOK { + t.Fatalf("first request: got %d, want %d", rr.Code, http.StatusOK) + } + + // Second request from same proxy but different real client - should be allowed + req = httptest.NewRequest("POST", "/test", nil) + req.RemoteAddr = "10.0.0.1:9999" + req.Header.Set("X-Forwarded-For", "203.0.113.51") + rr = httptest.NewRecorder() + handler.ServeHTTP(rr, req) + if rr.Code != http.StatusOK { + t.Fatalf("different client via proxy: got %d, want %d", rr.Code, http.StatusOK) + } +} + +func TestRateLimitByIP_XRealIPHonoredFromTrustedProxy(t *testing.T) { + _, proxyNet, _ := net.ParseCIDR("10.0.0.0/8") + trusted := []*net.IPNet{proxyNet} + + handler := rateLimitByIP(0.001, 1, trusted)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + // Request from trusted proxy with X-Real-IP (takes priority over XFF) + req := httptest.NewRequest("POST", "/test", nil) + req.RemoteAddr = "10.0.0.1:9999" + req.Header.Set("X-Real-IP", "203.0.113.50") + req.Header.Set("X-Forwarded-For", "203.0.113.99") + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + if rr.Code != http.StatusOK { + t.Fatalf("first request: got %d, want %d", rr.Code, http.StatusOK) + } + + // Same X-Real-IP should be rate-limited + req = httptest.NewRequest("POST", "/test", nil) + req.RemoteAddr = "10.0.0.1:9999" + req.Header.Set("X-Real-IP", "203.0.113.50") + rr = httptest.NewRecorder() + handler.ServeHTTP(rr, req) + if rr.Code != http.StatusTooManyRequests { + t.Fatalf("same client via X-Real-IP: got %d, want %d", rr.Code, http.StatusTooManyRequests) + } +} + +func TestParseCIDRs(t *testing.T) { + nets := parseCIDRs([]string{"10.0.0.0/8", "192.168.1.1", "invalid", "::1"}, nil) + if len(nets) != 3 { + t.Fatalf("expected 3 valid CIDRs, got %d", len(nets)) + } +} diff --git a/api/internal/rest/sandbox_handlers.go b/api/internal/rest/sandbox_handlers.go new file mode 100644 index 00000000..3b2ae89b --- /dev/null +++ b/api/internal/rest/sandbox_handlers.go @@ -0,0 +1,463 @@ +package rest + +import ( + "errors" + "fmt" + "net/http" + + "github.com/go-chi/chi/v5" + + "github.com/aspectrr/fluid.sh/api/internal/auth" + serverError "github.com/aspectrr/fluid.sh/api/internal/error" + serverJSON "github.com/aspectrr/fluid.sh/api/internal/json" + "github.com/aspectrr/fluid.sh/api/internal/orchestrator" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +// handleCreateSandbox godoc +// @Summary Create sandbox +// @Description Create a new sandbox in the organization from a source VM or base image +// @Tags Sandboxes +// @Accept json +// @Produce json +// @Param slug path string true "Organization slug" +// @Param request body orchestrator.CreateSandboxRequest true "Sandbox configuration" +// @Success 201 {object} store.Sandbox +// @Failure 400 {object} error.ErrorResponse +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/sandboxes [post] +func (s *Server) handleCreateSandbox(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + var req orchestrator.CreateSandboxRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.SourceVM == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("source_vm is required")) + return + } + + req.OrgID = org.ID + + sandbox, err := s.orchestrator.CreateSandbox(r.Context(), req) + if err != nil { + s.logger.Error("failed to create sandbox", "error", err) + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create sandbox")) + return + } + + if user := auth.UserFromContext(r.Context()); user != nil { + s.telemetry.Track(user.ID, "sandbox_created", map[string]any{"org_id": org.ID, "source_vm": req.SourceVM}) + } + + _ = serverJSON.RespondJSON(w, http.StatusCreated, sandbox) +} + +// handleListSandboxes godoc +// @Summary List sandboxes +// @Description List all sandboxes in the organization +// @Tags Sandboxes +// @Produce json +// @Param slug path string true "Organization slug" +// @Success 200 {object} map[string]interface{} +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/sandboxes [get] +func (s *Server) handleListSandboxes(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + sandboxes, err := s.orchestrator.ListSandboxesByOrg(r.Context(), org.ID) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to list sandboxes")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "sandboxes": sandboxes, + "count": len(sandboxes), + }) +} + +// handleGetSandbox godoc +// @Summary Get sandbox +// @Description Get sandbox details by ID +// @Tags Sandboxes +// @Produce json +// @Param slug path string true "Organization slug" +// @Param sandboxID path string true "Sandbox ID" +// @Success 200 {object} store.Sandbox +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/sandboxes/{sandboxID} [get] +func (s *Server) handleGetSandbox(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + sandboxID := chi.URLParam(r, "sandboxID") + sandbox, err := s.orchestrator.GetSandbox(r.Context(), org.ID, sandboxID) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("sandbox not found")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, sandbox) +} + +// handleDestroySandbox godoc +// @Summary Destroy sandbox +// @Description Destroy a sandbox and release its resources +// @Tags Sandboxes +// @Produce json +// @Param slug path string true "Organization slug" +// @Param sandboxID path string true "Sandbox ID" +// @Success 200 {object} map[string]interface{} +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/sandboxes/{sandboxID} [delete] +func (s *Server) handleDestroySandbox(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgRole(w, r, store.OrgRoleAdmin) + if !ok { + return + } + + sandboxID := chi.URLParam(r, "sandboxID") + + if err := s.orchestrator.DestroySandbox(r.Context(), org.ID, sandboxID); err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("sandbox not found")) + return + } + s.logger.Error("failed to destroy sandbox", "sandbox_id", sandboxID, "error", err) + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to destroy sandbox")) + return + } + + if user := auth.UserFromContext(r.Context()); user != nil { + s.telemetry.Track(user.ID, "sandbox_destroyed", map[string]any{"org_id": org.ID}) + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "destroyed": true, + "sandbox_id": sandboxID, + }) +} + +// handleRunCommand godoc +// @Summary Run command +// @Description Execute a command in a sandbox +// @Tags Sandboxes +// @Accept json +// @Produce json +// @Param slug path string true "Organization slug" +// @Param sandboxID path string true "Sandbox ID" +// @Param request body orchestrator.RunCommandRequest true "Command to run" +// @Success 200 {object} store.Command +// @Failure 400 {object} error.ErrorResponse +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/sandboxes/{sandboxID}/run [post] +func (s *Server) handleRunCommand(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + sandboxID := chi.URLParam(r, "sandboxID") + + var req orchestrator.RunCommandRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.Command == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("command is required")) + return + } + + const maxCommandLen = 65536 // 64 KiB + if len(req.Command) > maxCommandLen { + serverError.RespondError(w, http.StatusBadRequest, + fmt.Errorf("command exceeds maximum length of %d bytes", maxCommandLen)) + return + } + + const maxTimeoutSec = 3600 + if req.TimeoutSec > maxTimeoutSec { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("timeout_seconds must be <= %d", maxTimeoutSec)) + return + } + + result, err := s.orchestrator.RunCommand(r.Context(), org.ID, sandboxID, req.Command, req.TimeoutSec) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("sandbox not found")) + return + } + s.logger.Error("failed to run command", "sandbox_id", sandboxID, "error", err) + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to run command")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, result) +} + +// handleStartSandbox godoc +// @Summary Start sandbox +// @Description Start a stopped sandbox +// @Tags Sandboxes +// @Produce json +// @Param slug path string true "Organization slug" +// @Param sandboxID path string true "Sandbox ID" +// @Success 200 {object} map[string]interface{} +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/sandboxes/{sandboxID}/start [post] +func (s *Server) handleStartSandbox(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + sandboxID := chi.URLParam(r, "sandboxID") + + if err := s.orchestrator.StartSandbox(r.Context(), org.ID, sandboxID); err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("sandbox not found")) + return + } + s.logger.Error("failed to start sandbox", "sandbox_id", sandboxID, "error", err) + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to start sandbox")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "started": true, + "sandbox_id": sandboxID, + }) +} + +// handleStopSandbox godoc +// @Summary Stop sandbox +// @Description Stop a running sandbox +// @Tags Sandboxes +// @Produce json +// @Param slug path string true "Organization slug" +// @Param sandboxID path string true "Sandbox ID" +// @Success 200 {object} map[string]interface{} +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/sandboxes/{sandboxID}/stop [post] +func (s *Server) handleStopSandbox(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + sandboxID := chi.URLParam(r, "sandboxID") + + if err := s.orchestrator.StopSandbox(r.Context(), org.ID, sandboxID); err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("sandbox not found")) + return + } + s.logger.Error("failed to stop sandbox", "sandbox_id", sandboxID, "error", err) + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to stop sandbox")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "stopped": true, + "sandbox_id": sandboxID, + }) +} + +// handleGetSandboxIP godoc +// @Summary Get sandbox IP +// @Description Get the IP address of a sandbox +// @Tags Sandboxes +// @Produce json +// @Param slug path string true "Organization slug" +// @Param sandboxID path string true "Sandbox ID" +// @Success 200 {object} map[string]interface{} +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/sandboxes/{sandboxID}/ip [get] +func (s *Server) handleGetSandboxIP(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + sandboxID := chi.URLParam(r, "sandboxID") + + sandbox, err := s.orchestrator.GetSandbox(r.Context(), org.ID, sandboxID) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("sandbox not found")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "sandbox_id": sandboxID, + "ip_address": sandbox.IPAddress, + }) +} + +// handleCreateSnapshot godoc +// @Summary Create snapshot +// @Description Create a snapshot of a sandbox +// @Tags Sandboxes +// @Accept json +// @Produce json +// @Param slug path string true "Organization slug" +// @Param sandboxID path string true "Sandbox ID" +// @Param request body orchestrator.SnapshotRequest true "Snapshot details" +// @Success 201 {object} orchestrator.SnapshotResponse +// @Failure 400 {object} error.ErrorResponse +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/sandboxes/{sandboxID}/snapshot [post] +func (s *Server) handleCreateSnapshot(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + sandboxID := chi.URLParam(r, "sandboxID") + + var req orchestrator.SnapshotRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + result, err := s.orchestrator.CreateSnapshot(r.Context(), org.ID, sandboxID, req.Name) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("sandbox not found")) + return + } + s.logger.Error("failed to create snapshot", "sandbox_id", sandboxID, "error", err) + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to create snapshot")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusCreated, result) +} + +// handleListCommands godoc +// @Summary List commands +// @Description List all commands executed in a sandbox +// @Tags Sandboxes +// @Produce json +// @Param slug path string true "Organization slug" +// @Param sandboxID path string true "Sandbox ID" +// @Success 200 {object} map[string]interface{} +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/sandboxes/{sandboxID}/commands [get] +func (s *Server) handleListCommands(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + sandboxID := chi.URLParam(r, "sandboxID") + + commands, err := s.orchestrator.ListCommands(r.Context(), org.ID, sandboxID) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("sandbox not found")) + return + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to list commands")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "commands": commands, + "count": len(commands), + }) +} + +// resolveOrgMembership resolves org from {slug} URL param and verifies user membership. +// Returns the org, member, and true if successful; writes error response and returns false otherwise. +func (s *Server) resolveOrgMembership(w http.ResponseWriter, r *http.Request) (*store.Organization, *store.OrgMember, bool) { + slug := chi.URLParam(r, "slug") + user := auth.UserFromContext(r.Context()) + if user == nil { + serverError.RespondError(w, http.StatusUnauthorized, fmt.Errorf("authentication required")) + return nil, nil, false + } + + org, err := s.store.GetOrganizationBySlug(r.Context(), slug) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("organization not found")) + return nil, nil, false + } + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get organization")) + return nil, nil, false + } + + member, err := s.store.GetOrgMember(r.Context(), org.ID, user.ID) + if err != nil { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("not a member of this organization")) + return nil, nil, false + } + + return org, member, true +} + +// resolveOrgRole resolves org membership and checks the member has at least the given role. +func (s *Server) resolveOrgRole(w http.ResponseWriter, r *http.Request, minRole store.OrgRole) (*store.Organization, *store.OrgMember, bool) { + org, member, ok := s.resolveOrgMembership(w, r) + if !ok { + return nil, nil, false + } + if !hasMinRole(member.Role, minRole) { + serverError.RespondError(w, http.StatusForbidden, fmt.Errorf("insufficient permissions")) + return nil, nil, false + } + return org, member, true +} + +var roleRanks = map[store.OrgRole]int{ + store.OrgRoleOwner: 3, + store.OrgRoleAdmin: 2, + store.OrgRoleMember: 1, +} + +func hasMinRole(role, minRole store.OrgRole) bool { + return roleRanks[role] >= roleRanks[minRole] +} diff --git a/api/internal/rest/sandbox_handlers_test.go b/api/internal/rest/sandbox_handlers_test.go new file mode 100644 index 00000000..9431abd1 --- /dev/null +++ b/api/internal/rest/sandbox_handlers_test.go @@ -0,0 +1,781 @@ +package rest + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/aspectrr/fluid.sh/api/internal/store" + + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" +) + +func TestHandleListSandboxes(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.ListSandboxesByOrgFn = func(_ context.Context, orgID string) ([]store.Sandbox, error) { + return []store.Sandbox{ + { + ID: "SBX-1234", + OrgID: testOrg.ID, + Name: "test-sandbox", + State: store.SandboxStateRunning, + }, + }, nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/sandboxes", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + sandboxes, ok := body["sandboxes"].([]any) + if !ok { + t.Fatal("expected sandboxes array in response") + } + if len(sandboxes) != 1 { + t.Fatalf("expected 1 sandbox, got %d", len(sandboxes)) + } +} + +func TestHandleGetSandbox(t *testing.T) { + testSandbox := &store.Sandbox{ + ID: "SBX-get1234", + OrgID: testOrg.ID, + HostID: "HOST-1", + Name: "get-sandbox", + State: store.SandboxStateRunning, + IPAddress: "10.0.0.5", + } + + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == testSandbox.ID { + return testSandbox, nil + } + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/sandboxes/"+testSandbox.ID, nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["id"] != testSandbox.ID { + t.Fatalf("expected id %s, got %v", testSandbox.ID, body["id"]) + } + }) + + t.Run("not found", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/sandboxes/SBX-nonexistent", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("wrong org", func(t *testing.T) { + wrongOrgSandbox := &store.Sandbox{ + ID: "SBX-wrongorg", + OrgID: "ORG-different", + Name: "wrong-org-sandbox", + State: store.SandboxStateRunning, + } + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == wrongOrgSandbox.ID { + return wrongOrgSandbox, nil + } + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/sandboxes/"+wrongOrgSandbox.ID, nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) +} + +func TestHandleListCommands(t *testing.T) { + testSandbox := &store.Sandbox{ + ID: "SBX-cmds1234", + OrgID: testOrg.ID, + Name: "cmds-sandbox", + State: store.SandboxStateRunning, + } + + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == testSandbox.ID { + return testSandbox, nil + } + return nil, store.ErrNotFound + } + ms.ListSandboxCommandsFn = func(_ context.Context, sandboxID string) ([]store.Command, error) { + return []store.Command{ + { + ID: "CMD-1", + SandboxID: testSandbox.ID, + Command: "ls -la", + ExitCode: 0, + }, + }, nil + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/sandboxes/"+testSandbox.ID+"/commands", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + commands, ok := body["commands"].([]any) + if !ok { + t.Fatal("expected commands array in response") + } + if len(commands) != 1 { + t.Fatalf("expected 1 command, got %d", len(commands)) + } +} + +func TestHandleDestroySandbox(t *testing.T) { + testSandbox := &store.Sandbox{ + ID: "SBX-destroy1", + OrgID: testOrg.ID, + HostID: "HOST-1", + Name: "destroy-sandbox", + State: store.SandboxStateRunning, + } + + t.Run("not_found", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org/sandboxes/SBX-nonexistent", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("wrong_org", func(t *testing.T) { + wrongOrgSandbox := &store.Sandbox{ + ID: "SBX-wrongorg", + OrgID: "ORG-different", + HostID: "HOST-1", + Name: "wrong-org-sandbox", + State: store.SandboxStateRunning, + } + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == wrongOrgSandbox.ID { + return wrongOrgSandbox, nil + } + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org/sandboxes/"+wrongOrgSandbox.ID, nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == testSandbox.ID { + return testSandbox, nil + } + return nil, store.ErrNotFound + } + ms.DeleteSandboxFn = func(_ context.Context, sandboxID string) error { + return nil + } + sender := &mockHostSender{ + SendAndWaitFn: func(_ context.Context, hostID string, msg *fluidv1.ControlMessage, timeout time.Duration) (*fluidv1.HostMessage, error) { + return &fluidv1.HostMessage{ + RequestId: msg.RequestId, + Payload: &fluidv1.HostMessage_SandboxDestroyed{ + SandboxDestroyed: &fluidv1.SandboxDestroyed{ + SandboxId: testSandbox.ID, + }, + }, + }, nil + }, + } + s := newTestServerWithSender(ms, sender, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org/sandboxes/"+testSandbox.ID, nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["destroyed"] != true { + t.Fatalf("expected destroyed=true, got %v", body["destroyed"]) + } + }) +} + +func TestHandleRunCommand(t *testing.T) { + testSandbox := &store.Sandbox{ + ID: "SBX-runcmd1", + OrgID: testOrg.ID, + HostID: "HOST-1", + Name: "runcmd-sandbox", + State: store.SandboxStateRunning, + } + + t.Run("not_found", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + body := strings.NewReader(`{"command":"ls -la"}`) + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/sandboxes/SBX-nonexistent/run", + httptest.NewRequest("POST", "/v1/orgs/test-org/sandboxes/SBX-nonexistent/run", body)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("wrong_org", func(t *testing.T) { + wrongOrgSandbox := &store.Sandbox{ + ID: "SBX-wrongorg", + OrgID: "ORG-different", + HostID: "HOST-1", + Name: "wrong-org-sandbox", + State: store.SandboxStateRunning, + } + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == wrongOrgSandbox.ID { + return wrongOrgSandbox, nil + } + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + body := strings.NewReader(`{"command":"ls -la"}`) + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/sandboxes/"+wrongOrgSandbox.ID+"/run", + httptest.NewRequest("POST", "/v1/orgs/test-org/sandboxes/"+wrongOrgSandbox.ID+"/run", body)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("empty_command", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == testSandbox.ID { + return testSandbox, nil + } + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + body := strings.NewReader(`{"command":""}`) + path := "/v1/orgs/test-org/sandboxes/" + testSandbox.ID + "/run" + req := authenticatedRequest(ms, "POST", path, + httptest.NewRequest("POST", path, body)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == testSandbox.ID { + return testSandbox, nil + } + return nil, store.ErrNotFound + } + ms.CreateCommandFn = func(_ context.Context, cmd *store.Command) error { + return nil + } + sender := &mockHostSender{ + SendAndWaitFn: func(_ context.Context, hostID string, msg *fluidv1.ControlMessage, timeout time.Duration) (*fluidv1.HostMessage, error) { + return &fluidv1.HostMessage{ + RequestId: msg.RequestId, + Payload: &fluidv1.HostMessage_CommandResult{ + CommandResult: &fluidv1.CommandResult{ + SandboxId: testSandbox.ID, + Stdout: "file1\nfile2\n", + Stderr: "", + ExitCode: 0, + DurationMs: 50, + }, + }, + }, nil + }, + } + s := newTestServerWithSender(ms, sender, nil) + + rr := httptest.NewRecorder() + body := strings.NewReader(`{"command":"ls -la"}`) + path := "/v1/orgs/test-org/sandboxes/" + testSandbox.ID + "/run" + req := authenticatedRequest(ms, "POST", path, + httptest.NewRequest("POST", path, body)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + respBody := parseJSONResponse(rr) + if respBody["command"] != "ls -la" { + t.Fatalf("expected command 'ls -la', got %v", respBody["command"]) + } + }) +} + +func TestHandleStartSandbox(t *testing.T) { + testSandbox := &store.Sandbox{ + ID: "SBX-start1", + OrgID: testOrg.ID, + HostID: "HOST-1", + Name: "start-sandbox", + State: store.SandboxStateStopped, + } + + t.Run("not_found", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/sandboxes/SBX-nonexistent/start", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("wrong_org", func(t *testing.T) { + wrongOrgSandbox := &store.Sandbox{ + ID: "SBX-wrongorg", + OrgID: "ORG-different", + HostID: "HOST-1", + Name: "wrong-org-sandbox", + State: store.SandboxStateStopped, + } + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == wrongOrgSandbox.ID { + return wrongOrgSandbox, nil + } + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/sandboxes/"+wrongOrgSandbox.ID+"/start", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == testSandbox.ID { + return testSandbox, nil + } + return nil, store.ErrNotFound + } + ms.UpdateSandboxFn = func(_ context.Context, sandbox *store.Sandbox) error { + return nil + } + sender := &mockHostSender{ + SendAndWaitFn: func(_ context.Context, hostID string, msg *fluidv1.ControlMessage, timeout time.Duration) (*fluidv1.HostMessage, error) { + return &fluidv1.HostMessage{ + RequestId: msg.RequestId, + Payload: &fluidv1.HostMessage_SandboxStarted{ + SandboxStarted: &fluidv1.SandboxStarted{ + SandboxId: testSandbox.ID, + State: "running", + IpAddress: "10.0.0.10", + }, + }, + }, nil + }, + } + s := newTestServerWithSender(ms, sender, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/sandboxes/"+testSandbox.ID+"/start", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["started"] != true { + t.Fatalf("expected started=true, got %v", body["started"]) + } + }) +} + +func TestHandleStopSandbox(t *testing.T) { + testSandbox := &store.Sandbox{ + ID: "SBX-stop1", + OrgID: testOrg.ID, + HostID: "HOST-1", + Name: "stop-sandbox", + State: store.SandboxStateRunning, + } + + t.Run("not_found", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/sandboxes/SBX-nonexistent/stop", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("wrong_org", func(t *testing.T) { + wrongOrgSandbox := &store.Sandbox{ + ID: "SBX-wrongorg", + OrgID: "ORG-different", + HostID: "HOST-1", + Name: "wrong-org-sandbox", + State: store.SandboxStateRunning, + } + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == wrongOrgSandbox.ID { + return wrongOrgSandbox, nil + } + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/sandboxes/"+wrongOrgSandbox.ID+"/stop", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == testSandbox.ID { + return testSandbox, nil + } + return nil, store.ErrNotFound + } + ms.UpdateSandboxFn = func(_ context.Context, sandbox *store.Sandbox) error { + return nil + } + sender := &mockHostSender{ + SendAndWaitFn: func(_ context.Context, hostID string, msg *fluidv1.ControlMessage, timeout time.Duration) (*fluidv1.HostMessage, error) { + return &fluidv1.HostMessage{ + RequestId: msg.RequestId, + Payload: &fluidv1.HostMessage_SandboxStopped{ + SandboxStopped: &fluidv1.SandboxStopped{ + SandboxId: testSandbox.ID, + State: "stopped", + }, + }, + }, nil + }, + } + s := newTestServerWithSender(ms, sender, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "POST", "/v1/orgs/test-org/sandboxes/"+testSandbox.ID+"/stop", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["stopped"] != true { + t.Fatalf("expected stopped=true, got %v", body["stopped"]) + } + }) +} + +func TestHandleGetSandboxIP(t *testing.T) { + testSandbox := &store.Sandbox{ + ID: "SBX-ip1", + OrgID: testOrg.ID, + HostID: "HOST-1", + Name: "ip-sandbox", + State: store.SandboxStateRunning, + IPAddress: "10.0.0.42", + } + + t.Run("not_found", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/sandboxes/SBX-nonexistent/ip", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("wrong_org", func(t *testing.T) { + wrongOrgSandbox := &store.Sandbox{ + ID: "SBX-wrongorg", + OrgID: "ORG-different", + HostID: "HOST-1", + Name: "wrong-org-sandbox", + State: store.SandboxStateRunning, + IPAddress: "10.0.0.99", + } + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == wrongOrgSandbox.ID { + return wrongOrgSandbox, nil + } + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/sandboxes/"+wrongOrgSandbox.ID+"/ip", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == testSandbox.ID { + return testSandbox, nil + } + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/sandboxes/"+testSandbox.ID+"/ip", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + + body := parseJSONResponse(rr) + if body["ip_address"] != "10.0.0.42" { + t.Fatalf("expected ip_address=10.0.0.42, got %v", body["ip_address"]) + } + if body["sandbox_id"] != testSandbox.ID { + t.Fatalf("expected sandbox_id=%s, got %v", testSandbox.ID, body["sandbox_id"]) + } + }) +} + +func TestHandleCreateSnapshot(t *testing.T) { + testSandbox := &store.Sandbox{ + ID: "SBX-snap1", + OrgID: testOrg.ID, + HostID: "HOST-1", + Name: "snap-sandbox", + State: store.SandboxStateRunning, + } + + t.Run("not_found", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + body := strings.NewReader(`{"name":"my-snapshot"}`) + path := "/v1/orgs/test-org/sandboxes/SBX-nonexistent/snapshot" + req := authenticatedRequest(ms, "POST", path, + httptest.NewRequest("POST", path, body)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("wrong_org", func(t *testing.T) { + wrongOrgSandbox := &store.Sandbox{ + ID: "SBX-wrongorg", + OrgID: "ORG-different", + HostID: "HOST-1", + Name: "wrong-org-sandbox", + State: store.SandboxStateRunning, + } + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == wrongOrgSandbox.ID { + return wrongOrgSandbox, nil + } + return nil, store.ErrNotFound + } + s := newTestServer(ms, nil) + + rr := httptest.NewRecorder() + body := strings.NewReader(`{"name":"my-snapshot"}`) + path := "/v1/orgs/test-org/sandboxes/" + wrongOrgSandbox.ID + "/snapshot" + req := authenticatedRequest(ms, "POST", path, + httptest.NewRequest("POST", path, body)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d: %s", rr.Code, rr.Body.String()) + } + }) + + t.Run("success", func(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.GetSandboxFn = func(_ context.Context, sandboxID string) (*store.Sandbox, error) { + if sandboxID == testSandbox.ID { + return testSandbox, nil + } + return nil, store.ErrNotFound + } + sender := &mockHostSender{ + SendAndWaitFn: func(_ context.Context, hostID string, msg *fluidv1.ControlMessage, timeout time.Duration) (*fluidv1.HostMessage, error) { + return &fluidv1.HostMessage{ + RequestId: msg.RequestId, + Payload: &fluidv1.HostMessage_SnapshotCreated{ + SnapshotCreated: &fluidv1.SnapshotCreated{ + SandboxId: testSandbox.ID, + SnapshotId: "SNAP-abc123", + SnapshotName: "my-snapshot", + }, + }, + }, nil + }, + } + s := newTestServerWithSender(ms, sender, nil) + + rr := httptest.NewRecorder() + body := strings.NewReader(`{"name":"my-snapshot"}`) + path := "/v1/orgs/test-org/sandboxes/" + testSandbox.ID + "/snapshot" + req := authenticatedRequest(ms, "POST", path, + httptest.NewRequest("POST", path, body)) + req.Header.Set("Content-Type", "application/json") + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusCreated { + t.Fatalf("expected 201, got %d: %s", rr.Code, rr.Body.String()) + } + + respBody := parseJSONResponse(rr) + if respBody["snapshot_name"] != "my-snapshot" { + t.Fatalf("expected snapshot_name=my-snapshot, got %v", respBody["snapshot_name"]) + } + if respBody["sandbox_id"] != testSandbox.ID { + t.Fatalf("expected sandbox_id=%s, got %v", testSandbox.ID, respBody["sandbox_id"]) + } + }) +} diff --git a/api/internal/rest/server.go b/api/internal/rest/server.go new file mode 100644 index 00000000..0d987498 --- /dev/null +++ b/api/internal/rest/server.go @@ -0,0 +1,219 @@ +package rest + +import ( + "fmt" + "log/slog" + "net/http" + + scalar "github.com/MarceloPetrucio/go-scalar-api-reference" + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" + + "github.com/aspectrr/fluid.sh/api/internal/auth" + "github.com/aspectrr/fluid.sh/api/internal/config" + "github.com/aspectrr/fluid.sh/api/internal/orchestrator" + "github.com/aspectrr/fluid.sh/api/internal/store" + "github.com/aspectrr/fluid.sh/api/internal/telemetry" +) + +type Server struct { + Router *chi.Mux + store store.Store + cfg *config.Config + orchestrator *orchestrator.Orchestrator + telemetry telemetry.Service + logger *slog.Logger + openapiYAML []byte +} + +func NewServer(st store.Store, cfg *config.Config, orch *orchestrator.Orchestrator, tel telemetry.Service, openapiYAML []byte) *Server { + if tel == nil { + tel = &telemetry.NoopService{} + } + s := &Server{ + store: st, + cfg: cfg, + orchestrator: orch, + telemetry: tel, + logger: slog.Default().With("component", "rest"), + openapiYAML: openapiYAML, + } + // stripe.Key is set once in billing.NewMeterManager to avoid race conditions. + + s.Router = s.routes() + return s +} + +func (s *Server) routes() *chi.Mux { + r := chi.NewRouter() + + // Middleware + r.Use(middleware.RequestID) + r.Use(middleware.Logger) + r.Use(middleware.Recoverer) + r.Use(corsMiddleware(s.cfg.Frontend.URL)) + + trustedNets := parseCIDRs(s.cfg.API.TrustedProxies, s.logger) + + // Public routes + r.Get("/v1/health", s.handleHealth) + + r.Get("/v1/docs/openapi.yaml", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/x-yaml") + _, _ = w.Write(s.openapiYAML) + }) + + if s.cfg.API.EnableDocs { + r.Get("/v1/docs", func(w http.ResponseWriter, r *http.Request) { + html, err := scalar.ApiReferenceHTML(&scalar.Options{ + SpecURL: "/v1/docs/openapi.yaml", + CustomOptions: scalar.CustomOptions{ + PageTitle: "Fluid API Reference", + }, + DarkMode: true, + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "text/html") + _, _ = fmt.Fprintln(w, html) + }) + } + + // Auth routes (public) + r.Route("/v1/auth", func(r chi.Router) { + r.With(rateLimitByIP(0.1, 5, trustedNets)).Post("/register", s.handleRegister) + r.With(rateLimitByIP(0.2, 10, trustedNets)).Post("/login", s.handleLogin) + + // OAuth (rate-limited) + r.Group(func(r chi.Router) { + r.Use(rateLimitByIP(0.5, 10, trustedNets)) + r.Get("/github", s.handleGitHubLogin) + r.Get("/github/callback", s.handleGitHubCallback) + r.Get("/google", s.handleGoogleLogin) + r.Get("/google/callback", s.handleGoogleCallback) + }) + + // Protected auth routes + r.Group(func(r chi.Router) { + r.Use(auth.RequireAuth(s.store, s.cfg.Auth.SecureCookies)) + r.Post("/logout", s.handleLogout) + r.Get("/me", s.handleMe) + r.Post("/onboarding", s.handleOnboarding) + }) + }) + + // Protected routes + r.Group(func(r chi.Router) { + r.Use(auth.RequireAuth(s.store, s.cfg.Auth.SecureCookies)) + + // Organizations + r.Route("/v1/orgs", func(r chi.Router) { + r.Post("/", s.handleCreateOrg) + r.Get("/", s.handleListOrgs) + r.Route("/{slug}", func(r chi.Router) { + r.Get("/", s.handleGetOrg) + r.Patch("/", s.handleUpdateOrg) + r.Delete("/", s.handleDeleteOrg) + + // Members + r.Get("/members", s.handleListMembers) + r.Post("/members", s.handleAddMember) + r.Delete("/members/{memberID}", s.handleRemoveMember) + + // Billing + r.Get("/billing", s.handleGetBilling) + r.Post("/billing/subscribe", s.handleSubscribe) + r.Post("/billing/portal", s.handleBillingPortal) + r.Get("/billing/usage", s.handleGetUsage) + + // Sandboxes + r.Post("/sandboxes", s.handleCreateSandbox) + r.Get("/sandboxes", s.handleListSandboxes) + r.Route("/sandboxes/{sandboxID}", func(r chi.Router) { + r.Get("/", s.handleGetSandbox) + r.Delete("/", s.handleDestroySandbox) + r.Post("/run", s.handleRunCommand) + r.Post("/start", s.handleStartSandbox) + r.Post("/stop", s.handleStopSandbox) + r.Get("/ip", s.handleGetSandboxIP) + r.Post("/snapshot", s.handleCreateSnapshot) + r.Get("/commands", s.handleListCommands) + }) + + // Hosts + tokens + r.Get("/hosts", s.handleListHosts) + r.Get("/hosts/{hostID}", s.handleGetHost) + r.Post("/hosts/tokens", s.handleCreateHostToken) + r.Get("/hosts/tokens", s.handleListHostTokens) + r.Delete("/hosts/tokens/{tokenID}", s.handleDeleteHostToken) + + // Source Hosts + r.Post("/source-hosts/discover", s.handleDiscoverSourceHosts) + r.Post("/source-hosts", s.handleConfirmSourceHosts) + r.Get("/source-hosts", s.handleListSourceHosts) + r.Delete("/source-hosts/{sourceHostID}", s.handleDeleteSourceHost) + + // Source VMs + r.Get("/vms", s.handleListVMs) + r.Post("/sources/{vm}/prepare", s.handlePrepareSourceVM) + r.Post("/sources/{vm}/run", s.handleRunSourceCommand) + r.Post("/sources/{vm}/read", s.handleReadSourceFile) + + // Agent - commented out, not yet ready for integration + // r.Post("/agent/chat", s.handleAgentChat) + // r.Get("/agent/conversations", s.handleListConversations) + // r.Get("/agent/conversations/{conversationID}", s.handleGetConversation) + // r.Get("/agent/conversations/{conversationID}/messages", s.handleListMessages) + // r.Delete("/agent/conversations/{conversationID}", s.handleDeleteConversation) + // r.Get("/agent/models", s.handleListModels) + + // Playbooks - commented out, not yet ready for integration + // r.Post("/playbooks", s.handleCreatePlaybook) + // r.Get("/playbooks", s.handleListPlaybooks) + // r.Route("/playbooks/{playbookID}", func(r chi.Router) { + // r.Get("/", s.handleGetPlaybook) + // r.Patch("/", s.handleUpdatePlaybook) + // r.Delete("/", s.handleDeletePlaybook) + // r.Post("/tasks", s.handleCreatePlaybookTask) + // r.Get("/tasks", s.handleListPlaybookTasks) + // r.Put("/tasks/reorder", s.handleReorderPlaybookTasks) + // r.Patch("/tasks/{taskID}", s.handleUpdatePlaybookTask) + // r.Delete("/tasks/{taskID}", s.handleDeletePlaybookTask) + // }) + }) + }) + }) + + // Docs progress (public, ephemeral session codes) + r.Post("/v1/docs-progress/register", s.handleDocsProgressRegister) + r.Post("/v1/docs-progress/complete", s.handleDocsProgressComplete) + r.Get("/v1/docs-progress/progress", s.handleDocsProgressGet) + + // Public billing endpoints + r.Post("/v1/billing/calculator", s.handleCalculator) + r.Post("/v1/webhooks/stripe", s.handleStripeWebhook) + + return r +} + +func corsMiddleware(frontendURL string) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", frontendURL) + w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PATCH, PUT, DELETE, OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization") + w.Header().Set("Access-Control-Allow-Credentials", "true") + w.Header().Set("Access-Control-Max-Age", "86400") + w.Header().Add("Vary", "Origin") + + if r.Method == http.MethodOptions { + w.WriteHeader(http.StatusNoContent) + return + } + + next.ServeHTTP(w, r) + }) + } +} diff --git a/api/internal/rest/source_handlers.go b/api/internal/rest/source_handlers.go new file mode 100644 index 00000000..b9c38eb4 --- /dev/null +++ b/api/internal/rest/source_handlers.go @@ -0,0 +1,183 @@ +package rest + +import ( + "fmt" + "net/http" + "path/filepath" + "strings" + + "github.com/go-chi/chi/v5" + + serverError "github.com/aspectrr/fluid.sh/api/internal/error" + serverJSON "github.com/aspectrr/fluid.sh/api/internal/json" + "github.com/aspectrr/fluid.sh/api/internal/orchestrator" +) + +// handleListVMs godoc +// @Summary List source VMs +// @Description List all source VMs across connected hosts +// @Tags Source VMs +// @Produce json +// @Param slug path string true "Organization slug" +// @Success 200 {object} map[string]interface{} +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/vms [get] +func (s *Server) handleListVMs(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + vms, err := s.orchestrator.ListVMs(r.Context(), org.ID) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to list VMs")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "vms": vms, + "count": len(vms), + }) +} + +// handlePrepareSourceVM godoc +// @Summary Prepare source VM +// @Description Prepare a source VM for sandbox cloning +// @Tags Source VMs +// @Accept json +// @Produce json +// @Param slug path string true "Organization slug" +// @Param vm path string true "Source VM name" +// @Param request body orchestrator.PrepareRequest true "SSH credentials" +// @Success 200 {object} map[string]interface{} +// @Failure 400 {object} error.ErrorResponse +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/sources/{vm}/prepare [post] +func (s *Server) handlePrepareSourceVM(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + vm := chi.URLParam(r, "vm") + + var req orchestrator.PrepareRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + result, err := s.orchestrator.PrepareSourceVM(r.Context(), org.ID, vm, req.SSHUser, req.SSHKeyPath) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to prepare source VM")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, result) +} + +// handleRunSourceCommand godoc +// @Summary Run source command +// @Description Execute a read-only command on a source VM +// @Tags Source VMs +// @Accept json +// @Produce json +// @Param slug path string true "Organization slug" +// @Param vm path string true "Source VM name" +// @Param request body orchestrator.RunSourceRequest true "Command to run" +// @Success 200 {object} orchestrator.SourceCommandResult +// @Failure 400 {object} error.ErrorResponse +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/sources/{vm}/run [post] +func (s *Server) handleRunSourceCommand(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + vm := chi.URLParam(r, "vm") + + var req orchestrator.RunSourceRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.Command == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("command is required")) + return + } + + if len(req.Command) > 4096 { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("command too long (max 4096 bytes)")) + return + } + + result, err := s.orchestrator.RunSourceCommand(r.Context(), org.ID, vm, req.Command, req.TimeoutSec) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to run source command")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, result) +} + +// handleReadSourceFile godoc +// @Summary Read source file +// @Description Read a file from a source VM +// @Tags Source VMs +// @Accept json +// @Produce json +// @Param slug path string true "Organization slug" +// @Param vm path string true "Source VM name" +// @Param request body orchestrator.ReadSourceRequest true "File path" +// @Success 200 {object} orchestrator.SourceFileResult +// @Failure 400 {object} error.ErrorResponse +// @Failure 403 {object} error.ErrorResponse +// @Failure 404 {object} error.ErrorResponse +// @Failure 500 {object} error.ErrorResponse +// @Security CookieAuth +// @Router /orgs/{slug}/sources/{vm}/read [post] +func (s *Server) handleReadSourceFile(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + vm := chi.URLParam(r, "vm") + + var req orchestrator.ReadSourceRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.Path == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("path is required")) + return + } + + cleaned := filepath.Clean(req.Path) + if !strings.HasPrefix(cleaned, "/") || strings.Contains(cleaned, "..") { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("invalid file path")) + return + } + req.Path = cleaned + + result, err := s.orchestrator.ReadSourceFile(r.Context(), org.ID, vm, req.Path) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to read source file")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, result) +} diff --git a/api/internal/rest/source_handlers_test.go b/api/internal/rest/source_handlers_test.go new file mode 100644 index 00000000..ee79fdf8 --- /dev/null +++ b/api/internal/rest/source_handlers_test.go @@ -0,0 +1,41 @@ +package rest + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestHandleListVMs(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + + // With an empty registry (no connected hosts), ListVMs returns empty. + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/vms", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + resp := parseJSONResponse(rr) + count, ok := resp["count"].(float64) + if !ok { + t.Fatalf("expected count field, got %v", resp) + } + if count != 0 { + t.Fatalf("expected count=0 (no connected hosts), got %v", count) + } + vms, ok := resp["vms"] + if !ok { + t.Fatal("expected vms field in response") + } + // vms should be nil or empty slice serialized as null or [] + if vms != nil { + vmList, ok := vms.([]any) + if ok && len(vmList) != 0 { + t.Fatalf("expected empty vms list, got %v", vms) + } + } +} diff --git a/api/internal/rest/source_host_handlers.go b/api/internal/rest/source_host_handlers.go new file mode 100644 index 00000000..6d2f7f2c --- /dev/null +++ b/api/internal/rest/source_host_handlers.go @@ -0,0 +1,175 @@ +package rest + +import ( + "fmt" + "net/http" + + "github.com/go-chi/chi/v5" + "github.com/google/uuid" + + serverError "github.com/aspectrr/fluid.sh/api/internal/error" + serverJSON "github.com/aspectrr/fluid.sh/api/internal/json" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +type discoverSourceHostsRequest struct { + SSHConfigContent string `json:"ssh_config_content"` +} + +type confirmSourceHostsRequest struct { + Hosts []confirmSourceHost `json:"hosts"` +} + +type confirmSourceHost struct { + Name string `json:"name"` + Hostname string `json:"hostname"` + Type string `json:"type"` // "libvirt" or "proxmox" + SSHUser string `json:"ssh_user"` + SSHPort int `json:"ssh_port"` + SSHIdentityFile string `json:"ssh_identity_file"` + ProxmoxHost string `json:"proxmox_host,omitempty"` + ProxmoxTokenID string `json:"proxmox_token_id,omitempty"` + ProxmoxSecret string `json:"proxmox_secret,omitempty"` + ProxmoxNode string `json:"proxmox_node,omitempty"` + ProxmoxVerifySSL bool `json:"proxmox_verify_ssl,omitempty"` + VMs []string `json:"vms,omitempty"` +} + +func (s *Server) handleDiscoverSourceHosts(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + var req discoverSourceHostsRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if req.SSHConfigContent == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("ssh_config_content is required")) + return + } + + // Use the orchestrator to discover hosts via the daemon + results, err := s.orchestrator.DiscoverSourceHosts(r.Context(), org.ID, req.SSHConfigContent) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("discovery failed")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "hosts": results, + "count": len(results), + }) +} + +func (s *Server) handleConfirmSourceHosts(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + var req confirmSourceHostsRequest + if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { + serverError.RespondError(w, http.StatusBadRequest, err) + return + } + + if len(req.Hosts) == 0 { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("at least one host is required")) + return + } + + created := make([]*store.SourceHost, 0, len(req.Hosts)) + for _, h := range req.Hosts { + hostType := h.Type + if hostType == "" { + hostType = "libvirt" + } + port := h.SSHPort + if port == 0 { + port = 22 + } + + sh := &store.SourceHost{ + ID: uuid.New().String(), + OrgID: org.ID, + Name: h.Name, + Hostname: h.Hostname, + Type: hostType, + SSHUser: h.SSHUser, + SSHPort: port, + SSHIdentityFile: h.SSHIdentityFile, + ProxmoxHost: h.ProxmoxHost, + ProxmoxTokenID: h.ProxmoxTokenID, + ProxmoxSecret: h.ProxmoxSecret, + ProxmoxNode: h.ProxmoxNode, + ProxmoxVerifySSL: h.ProxmoxVerifySSL, + VMs: h.VMs, + } + + if err := s.store.CreateSourceHost(r.Context(), sh); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to save source host")) + return + } + created = append(created, sh) + } + + _ = serverJSON.RespondJSON(w, http.StatusCreated, map[string]any{ + "source_hosts": created, + "count": len(created), + }) +} + +func (s *Server) handleListSourceHosts(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + hosts, err := s.store.ListSourceHostsByOrg(r.Context(), org.ID) + if err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("list source hosts failed")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "source_hosts": hosts, + "count": len(hosts), + }) +} + +func (s *Server) handleDeleteSourceHost(w http.ResponseWriter, r *http.Request) { + org, _, ok := s.resolveOrgMembership(w, r) + if !ok { + return + } + + id := chi.URLParam(r, "sourceHostID") + if id == "" { + serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("sourceHostID is required")) + return + } + + // Verify ownership + host, err := s.store.GetSourceHost(r.Context(), id) + if err != nil { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("source host not found")) + return + } + if host.OrgID != org.ID { + serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("source host not found")) + return + } + + if err := s.store.DeleteSourceHost(r.Context(), id); err != nil { + serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to delete source host")) + return + } + + _ = serverJSON.RespondJSON(w, http.StatusOK, map[string]any{ + "deleted": true, + }) +} diff --git a/api/internal/rest/source_host_handlers_test.go b/api/internal/rest/source_host_handlers_test.go new file mode 100644 index 00000000..2320ed9b --- /dev/null +++ b/api/internal/rest/source_host_handlers_test.go @@ -0,0 +1,72 @@ +package rest + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +func TestHandleListSourceHosts(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + ms.ListSourceHostsByOrgFn = func(_ context.Context, orgID string) ([]*store.SourceHost, error) { + if orgID != testOrg.ID { + t.Fatalf("unexpected orgID: %s", orgID) + } + return []*store.SourceHost{ + {ID: "sh-1", OrgID: testOrg.ID, Name: "host-1", Hostname: "192.168.1.10", Type: "libvirt"}, + {ID: "sh-2", OrgID: testOrg.ID, Name: "host-2", Hostname: "192.168.1.11", Type: "proxmox"}, + }, nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "GET", "/v1/orgs/test-org/source-hosts", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + resp := parseJSONResponse(rr) + if resp["count"] != float64(2) { + t.Fatalf("expected count=2, got %v", resp["count"]) + } + hosts, ok := resp["source_hosts"].([]any) + if !ok || len(hosts) != 2 { + t.Fatalf("expected 2 source hosts, got %v", resp["source_hosts"]) + } +} + +func TestHandleDeleteSourceHost(t *testing.T) { + ms := &mockStore{} + setupOrgMembership(ms) + deleted := false + ms.GetSourceHostFn = func(_ context.Context, id string) (*store.SourceHost, error) { + return &store.SourceHost{ID: id, OrgID: testOrg.ID, Name: "host-1", Hostname: "192.168.1.10", Type: "libvirt"}, nil + } + ms.DeleteSourceHostFn = func(_ context.Context, id string) error { + if id == "sh-1" { + deleted = true + } + return nil + } + + s := newTestServer(ms, nil) + rr := httptest.NewRecorder() + req := authenticatedRequest(ms, "DELETE", "/v1/orgs/test-org/source-hosts/sh-1", nil) + s.Router.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected 200, got %d: %s", rr.Code, rr.Body.String()) + } + if !deleted { + t.Fatal("expected DeleteSourceHost to be called with id sh-1") + } + resp := parseJSONResponse(rr) + if resp["deleted"] != true { + t.Fatalf("expected deleted=true, got %v", resp["deleted"]) + } +} diff --git a/api/internal/rest/testhelpers_test.go b/api/internal/rest/testhelpers_test.go new file mode 100644 index 00000000..f8ad0893 --- /dev/null +++ b/api/internal/rest/testhelpers_test.go @@ -0,0 +1,883 @@ +package rest + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "time" + + "github.com/aspectrr/fluid.sh/api/internal/auth" + "github.com/aspectrr/fluid.sh/api/internal/config" + "github.com/aspectrr/fluid.sh/api/internal/orchestrator" + "github.com/aspectrr/fluid.sh/api/internal/registry" + "github.com/aspectrr/fluid.sh/api/internal/store" + + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" +) + +// --------------------------------------------------------------------------- +// mockStore implements store.Store with function-field delegation +// --------------------------------------------------------------------------- + +type mockStore struct { + // User + CreateUserFn func(ctx context.Context, u *store.User) error + GetUserFn func(ctx context.Context, id string) (*store.User, error) + GetUserByEmailFn func(ctx context.Context, email string) (*store.User, error) + UpdateUserFn func(ctx context.Context, u *store.User) error + + // OAuth + CreateOAuthAccountFn func(ctx context.Context, oa *store.OAuthAccount) error + GetOAuthAccountFn func(ctx context.Context, provider, providerID string) (*store.OAuthAccount, error) + GetOAuthAccountsByUserFn func(ctx context.Context, userID string) ([]*store.OAuthAccount, error) + + // Session + CreateSessionFn func(ctx context.Context, s *store.Session) error + GetSessionFn func(ctx context.Context, id string) (*store.Session, error) + DeleteSessionFn func(ctx context.Context, id string) error + DeleteExpiredSessionsFn func(ctx context.Context) error + + // Organization + CreateOrganizationFn func(ctx context.Context, org *store.Organization) error + GetOrganizationFn func(ctx context.Context, id string) (*store.Organization, error) + GetOrganizationBySlugFn func(ctx context.Context, slug string) (*store.Organization, error) + ListOrganizationsByUserFn func(ctx context.Context, userID string) ([]*store.Organization, error) + UpdateOrganizationFn func(ctx context.Context, org *store.Organization) error + DeleteOrganizationFn func(ctx context.Context, id string) error + + // OrgMember + CreateOrgMemberFn func(ctx context.Context, m *store.OrgMember) error + GetOrgMemberFn func(ctx context.Context, orgID, userID string) (*store.OrgMember, error) + GetOrgMemberByIDFn func(ctx context.Context, orgID, memberID string) (*store.OrgMember, error) + ListOrgMembersFn func(ctx context.Context, orgID string) ([]*store.OrgMember, error) + DeleteOrgMemberFn func(ctx context.Context, orgID, id string) error + + // Subscription + CreateSubscriptionFn func(ctx context.Context, sub *store.Subscription) error + GetSubscriptionByOrgFn func(ctx context.Context, orgID string) (*store.Subscription, error) + UpdateSubscriptionFn func(ctx context.Context, sub *store.Subscription) error + + // Usage + CreateUsageRecordFn func(ctx context.Context, rec *store.UsageRecord) error + ListUsageRecordsFn func(ctx context.Context, orgID string, from, to time.Time) ([]*store.UsageRecord, error) + + // Host + CreateHostFn func(ctx context.Context, host *store.Host) error + GetHostFn func(ctx context.Context, hostID string) (*store.Host, error) + ListHostsFn func(ctx context.Context) ([]store.Host, error) + UpdateHostFn func(ctx context.Context, host *store.Host) error + UpdateHostHeartbeatFn func(ctx context.Context, hostID string, availCPUs int32, availMemMB int64, availDiskMB int64) error + + // Sandbox + CreateSandboxFn func(ctx context.Context, sandbox *store.Sandbox) error + GetSandboxFn func(ctx context.Context, sandboxID string) (*store.Sandbox, error) + ListSandboxesFn func(ctx context.Context) ([]store.Sandbox, error) + ListSandboxesByOrgFn func(ctx context.Context, orgID string) ([]store.Sandbox, error) + UpdateSandboxFn func(ctx context.Context, sandbox *store.Sandbox) error + DeleteSandboxFn func(ctx context.Context, sandboxID string) error + GetSandboxesByHostIDFn func(ctx context.Context, hostID string) ([]store.Sandbox, error) + ListExpiredSandboxesFn func(ctx context.Context, defaultTTL time.Duration) ([]store.Sandbox, error) + + // Command + CreateCommandFn func(ctx context.Context, cmd *store.Command) error + ListSandboxCommandsFn func(ctx context.Context, sandboxID string) ([]store.Command, error) + + // SourceHost + CreateSourceHostFn func(ctx context.Context, sh *store.SourceHost) error + GetSourceHostFn func(ctx context.Context, id string) (*store.SourceHost, error) + ListSourceHostsByOrgFn func(ctx context.Context, orgID string) ([]*store.SourceHost, error) + DeleteSourceHostFn func(ctx context.Context, id string) error + + // HostToken + CreateHostTokenFn func(ctx context.Context, token *store.HostToken) error + GetHostTokenByHashFn func(ctx context.Context, hash string) (*store.HostToken, error) + ListHostTokensByOrgFn func(ctx context.Context, orgID string) ([]store.HostToken, error) + DeleteHostTokenFn func(ctx context.Context, orgID, id string) error + + // Agent Conversations, Messages, Playbooks, Tasks - commented out + // (types are commented out in store.go) + + // Billing helpers + GetOrganizationByStripeCustomerIDFn func(ctx context.Context, customerID string) (*store.Organization, error) + GetModelMeterFn func(ctx context.Context, modelID string) (*store.ModelMeter, error) + CreateModelMeterFn func(ctx context.Context, m *store.ModelMeter) error + GetOrgModelSubscriptionFn func(ctx context.Context, orgID, modelID string) (*store.OrgModelSubscription, error) + CreateOrgModelSubscriptionFn func(ctx context.Context, s *store.OrgModelSubscription) error + SumTokenUsageFn func(ctx context.Context, orgID string, from, to time.Time) (float64, error) + ListActiveSubscriptionsFn func(ctx context.Context) ([]*store.Subscription, error) + + // WithTx + WithTxFn func(ctx context.Context, fn func(tx store.DataStore) error) error +} + +func (m *mockStore) call(name string) { panic(fmt.Sprintf("mockStore.%s not configured", name)) } + +// Store interface +func (m *mockStore) Config() store.Config { return store.Config{} } +func (m *mockStore) Ping(context.Context) error { return nil } +func (m *mockStore) Close() error { return nil } + +func (m *mockStore) WithTx(ctx context.Context, fn func(tx store.DataStore) error) error { + if m.WithTxFn != nil { + return m.WithTxFn(ctx, fn) + } + return fn(m) +} + +// User +func (m *mockStore) CreateUser(ctx context.Context, u *store.User) error { + if m.CreateUserFn != nil { + return m.CreateUserFn(ctx, u) + } + m.call("CreateUser") + return nil +} +func (m *mockStore) GetUser(ctx context.Context, id string) (*store.User, error) { + if m.GetUserFn != nil { + return m.GetUserFn(ctx, id) + } + m.call("GetUser") + return nil, nil +} +func (m *mockStore) GetUserByEmail(ctx context.Context, email string) (*store.User, error) { + if m.GetUserByEmailFn != nil { + return m.GetUserByEmailFn(ctx, email) + } + m.call("GetUserByEmail") + return nil, nil +} +func (m *mockStore) UpdateUser(ctx context.Context, u *store.User) error { + if m.UpdateUserFn != nil { + return m.UpdateUserFn(ctx, u) + } + m.call("UpdateUser") + return nil +} + +// OAuth +func (m *mockStore) CreateOAuthAccount(ctx context.Context, oa *store.OAuthAccount) error { + if m.CreateOAuthAccountFn != nil { + return m.CreateOAuthAccountFn(ctx, oa) + } + m.call("CreateOAuthAccount") + return nil +} +func (m *mockStore) GetOAuthAccount(ctx context.Context, provider, providerID string) (*store.OAuthAccount, error) { + if m.GetOAuthAccountFn != nil { + return m.GetOAuthAccountFn(ctx, provider, providerID) + } + m.call("GetOAuthAccount") + return nil, nil +} +func (m *mockStore) GetOAuthAccountsByUser(ctx context.Context, userID string) ([]*store.OAuthAccount, error) { + if m.GetOAuthAccountsByUserFn != nil { + return m.GetOAuthAccountsByUserFn(ctx, userID) + } + m.call("GetOAuthAccountsByUser") + return nil, nil +} + +// Session +func (m *mockStore) CreateSession(ctx context.Context, s *store.Session) error { + if m.CreateSessionFn != nil { + return m.CreateSessionFn(ctx, s) + } + m.call("CreateSession") + return nil +} +func (m *mockStore) GetSession(ctx context.Context, id string) (*store.Session, error) { + if m.GetSessionFn != nil { + return m.GetSessionFn(ctx, id) + } + m.call("GetSession") + return nil, nil +} +func (m *mockStore) DeleteSession(ctx context.Context, id string) error { + if m.DeleteSessionFn != nil { + return m.DeleteSessionFn(ctx, id) + } + m.call("DeleteSession") + return nil +} +func (m *mockStore) DeleteExpiredSessions(ctx context.Context) error { + if m.DeleteExpiredSessionsFn != nil { + return m.DeleteExpiredSessionsFn(ctx) + } + m.call("DeleteExpiredSessions") + return nil +} + +// Organization +func (m *mockStore) CreateOrganization(ctx context.Context, org *store.Organization) error { + if m.CreateOrganizationFn != nil { + return m.CreateOrganizationFn(ctx, org) + } + m.call("CreateOrganization") + return nil +} +func (m *mockStore) GetOrganization(ctx context.Context, id string) (*store.Organization, error) { + if m.GetOrganizationFn != nil { + return m.GetOrganizationFn(ctx, id) + } + m.call("GetOrganization") + return nil, nil +} +func (m *mockStore) GetOrganizationBySlug(ctx context.Context, slug string) (*store.Organization, error) { + if m.GetOrganizationBySlugFn != nil { + return m.GetOrganizationBySlugFn(ctx, slug) + } + m.call("GetOrganizationBySlug") + return nil, nil +} +func (m *mockStore) ListOrganizationsByUser(ctx context.Context, userID string) ([]*store.Organization, error) { + if m.ListOrganizationsByUserFn != nil { + return m.ListOrganizationsByUserFn(ctx, userID) + } + m.call("ListOrganizationsByUser") + return nil, nil +} +func (m *mockStore) UpdateOrganization(ctx context.Context, org *store.Organization) error { + if m.UpdateOrganizationFn != nil { + return m.UpdateOrganizationFn(ctx, org) + } + m.call("UpdateOrganization") + return nil +} +func (m *mockStore) DeleteOrganization(ctx context.Context, id string) error { + if m.DeleteOrganizationFn != nil { + return m.DeleteOrganizationFn(ctx, id) + } + m.call("DeleteOrganization") + return nil +} + +// OrgMember +func (m *mockStore) CreateOrgMember(ctx context.Context, mem *store.OrgMember) error { + if m.CreateOrgMemberFn != nil { + return m.CreateOrgMemberFn(ctx, mem) + } + m.call("CreateOrgMember") + return nil +} +func (m *mockStore) GetOrgMember(ctx context.Context, orgID, userID string) (*store.OrgMember, error) { + if m.GetOrgMemberFn != nil { + return m.GetOrgMemberFn(ctx, orgID, userID) + } + m.call("GetOrgMember") + return nil, nil +} +func (m *mockStore) GetOrgMemberByID(ctx context.Context, orgID, memberID string) (*store.OrgMember, error) { + if m.GetOrgMemberByIDFn != nil { + return m.GetOrgMemberByIDFn(ctx, orgID, memberID) + } + m.call("GetOrgMemberByID") + return nil, nil +} +func (m *mockStore) ListOrgMembers(ctx context.Context, orgID string) ([]*store.OrgMember, error) { + if m.ListOrgMembersFn != nil { + return m.ListOrgMembersFn(ctx, orgID) + } + m.call("ListOrgMembers") + return nil, nil +} +func (m *mockStore) DeleteOrgMember(ctx context.Context, orgID, id string) error { + if m.DeleteOrgMemberFn != nil { + return m.DeleteOrgMemberFn(ctx, orgID, id) + } + m.call("DeleteOrgMember") + return nil +} + +// Subscription +func (m *mockStore) CreateSubscription(ctx context.Context, sub *store.Subscription) error { + if m.CreateSubscriptionFn != nil { + return m.CreateSubscriptionFn(ctx, sub) + } + m.call("CreateSubscription") + return nil +} +func (m *mockStore) GetSubscriptionByOrg(ctx context.Context, orgID string) (*store.Subscription, error) { + if m.GetSubscriptionByOrgFn != nil { + return m.GetSubscriptionByOrgFn(ctx, orgID) + } + m.call("GetSubscriptionByOrg") + return nil, nil +} +func (m *mockStore) UpdateSubscription(ctx context.Context, sub *store.Subscription) error { + if m.UpdateSubscriptionFn != nil { + return m.UpdateSubscriptionFn(ctx, sub) + } + m.call("UpdateSubscription") + return nil +} + +// Usage +func (m *mockStore) CreateUsageRecord(ctx context.Context, rec *store.UsageRecord) error { + if m.CreateUsageRecordFn != nil { + return m.CreateUsageRecordFn(ctx, rec) + } + m.call("CreateUsageRecord") + return nil +} +func (m *mockStore) ListUsageRecords(ctx context.Context, orgID string, from, to time.Time) ([]*store.UsageRecord, error) { + if m.ListUsageRecordsFn != nil { + return m.ListUsageRecordsFn(ctx, orgID, from, to) + } + m.call("ListUsageRecords") + return nil, nil +} + +// Host +func (m *mockStore) CreateHost(ctx context.Context, host *store.Host) error { + if m.CreateHostFn != nil { + return m.CreateHostFn(ctx, host) + } + m.call("CreateHost") + return nil +} +func (m *mockStore) GetHost(ctx context.Context, hostID string) (*store.Host, error) { + if m.GetHostFn != nil { + return m.GetHostFn(ctx, hostID) + } + m.call("GetHost") + return nil, nil +} +func (m *mockStore) ListHosts(ctx context.Context) ([]store.Host, error) { + if m.ListHostsFn != nil { + return m.ListHostsFn(ctx) + } + m.call("ListHosts") + return nil, nil +} +func (m *mockStore) ListHostsByOrg(_ context.Context, _ string) ([]store.Host, error) { + return nil, nil +} +func (m *mockStore) UpdateHost(ctx context.Context, host *store.Host) error { + if m.UpdateHostFn != nil { + return m.UpdateHostFn(ctx, host) + } + m.call("UpdateHost") + return nil +} +func (m *mockStore) UpdateHostHeartbeat(ctx context.Context, hostID string, availCPUs int32, availMemMB int64, availDiskMB int64) error { + if m.UpdateHostHeartbeatFn != nil { + return m.UpdateHostHeartbeatFn(ctx, hostID, availCPUs, availMemMB, availDiskMB) + } + m.call("UpdateHostHeartbeat") + return nil +} + +// Sandbox +func (m *mockStore) CreateSandbox(ctx context.Context, sandbox *store.Sandbox) error { + if m.CreateSandboxFn != nil { + return m.CreateSandboxFn(ctx, sandbox) + } + m.call("CreateSandbox") + return nil +} +func (m *mockStore) GetSandbox(ctx context.Context, sandboxID string) (*store.Sandbox, error) { + if m.GetSandboxFn != nil { + return m.GetSandboxFn(ctx, sandboxID) + } + m.call("GetSandbox") + return nil, nil +} +func (m *mockStore) GetSandboxByOrg(ctx context.Context, orgID, sandboxID string) (*store.Sandbox, error) { + if m.GetSandboxFn != nil { + sb, err := m.GetSandboxFn(ctx, sandboxID) + if err != nil { + return nil, err + } + if sb.OrgID != orgID { + return nil, store.ErrNotFound + } + return sb, nil + } + m.call("GetSandboxByOrg") + return nil, nil +} +func (m *mockStore) ListSandboxes(ctx context.Context) ([]store.Sandbox, error) { + if m.ListSandboxesFn != nil { + return m.ListSandboxesFn(ctx) + } + m.call("ListSandboxes") + return nil, nil +} +func (m *mockStore) ListSandboxesByOrg(ctx context.Context, orgID string) ([]store.Sandbox, error) { + if m.ListSandboxesByOrgFn != nil { + return m.ListSandboxesByOrgFn(ctx, orgID) + } + m.call("ListSandboxesByOrg") + return nil, nil +} +func (m *mockStore) UpdateSandbox(ctx context.Context, sandbox *store.Sandbox) error { + if m.UpdateSandboxFn != nil { + return m.UpdateSandboxFn(ctx, sandbox) + } + m.call("UpdateSandbox") + return nil +} +func (m *mockStore) DeleteSandbox(ctx context.Context, sandboxID string) error { + if m.DeleteSandboxFn != nil { + return m.DeleteSandboxFn(ctx, sandboxID) + } + m.call("DeleteSandbox") + return nil +} +func (m *mockStore) GetSandboxesByHostID(ctx context.Context, hostID string) ([]store.Sandbox, error) { + if m.GetSandboxesByHostIDFn != nil { + return m.GetSandboxesByHostIDFn(ctx, hostID) + } + m.call("GetSandboxesByHostID") + return nil, nil +} +func (m *mockStore) CountSandboxesByHostIDs(_ context.Context, _ []string) (map[string]int, error) { + return map[string]int{}, nil +} +func (m *mockStore) ListExpiredSandboxes(ctx context.Context, defaultTTL time.Duration) ([]store.Sandbox, error) { + if m.ListExpiredSandboxesFn != nil { + return m.ListExpiredSandboxesFn(ctx, defaultTTL) + } + m.call("ListExpiredSandboxes") + return nil, nil +} + +// Command +func (m *mockStore) CreateCommand(ctx context.Context, cmd *store.Command) error { + if m.CreateCommandFn != nil { + return m.CreateCommandFn(ctx, cmd) + } + m.call("CreateCommand") + return nil +} +func (m *mockStore) ListSandboxCommands(ctx context.Context, sandboxID string) ([]store.Command, error) { + if m.ListSandboxCommandsFn != nil { + return m.ListSandboxCommandsFn(ctx, sandboxID) + } + m.call("ListSandboxCommands") + return nil, nil +} + +// SourceHost +func (m *mockStore) CreateSourceHost(ctx context.Context, sh *store.SourceHost) error { + if m.CreateSourceHostFn != nil { + return m.CreateSourceHostFn(ctx, sh) + } + m.call("CreateSourceHost") + return nil +} +func (m *mockStore) GetSourceHost(ctx context.Context, id string) (*store.SourceHost, error) { + if m.GetSourceHostFn != nil { + return m.GetSourceHostFn(ctx, id) + } + m.call("GetSourceHost") + return nil, nil +} +func (m *mockStore) ListSourceHostsByOrg(ctx context.Context, orgID string) ([]*store.SourceHost, error) { + if m.ListSourceHostsByOrgFn != nil { + return m.ListSourceHostsByOrgFn(ctx, orgID) + } + m.call("ListSourceHostsByOrg") + return nil, nil +} +func (m *mockStore) DeleteSourceHost(ctx context.Context, id string) error { + if m.DeleteSourceHostFn != nil { + return m.DeleteSourceHostFn(ctx, id) + } + m.call("DeleteSourceHost") + return nil +} + +// HostToken +func (m *mockStore) CreateHostToken(ctx context.Context, token *store.HostToken) error { + if m.CreateHostTokenFn != nil { + return m.CreateHostTokenFn(ctx, token) + } + m.call("CreateHostToken") + return nil +} +func (m *mockStore) GetHostTokenByHash(ctx context.Context, hash string) (*store.HostToken, error) { + if m.GetHostTokenByHashFn != nil { + return m.GetHostTokenByHashFn(ctx, hash) + } + m.call("GetHostTokenByHash") + return nil, nil +} +func (m *mockStore) ListHostTokensByOrg(ctx context.Context, orgID string) ([]store.HostToken, error) { + if m.ListHostTokensByOrgFn != nil { + return m.ListHostTokensByOrgFn(ctx, orgID) + } + m.call("ListHostTokensByOrg") + return nil, nil +} +func (m *mockStore) DeleteHostToken(ctx context.Context, orgID, id string) error { + if m.DeleteHostTokenFn != nil { + return m.DeleteHostTokenFn(ctx, orgID, id) + } + m.call("DeleteHostToken") + return nil +} + +// Agent Conversations, Messages, Playbooks, Tasks mock methods - commented out +/* +func (m *mockStore) CreateAgentConversation(ctx context.Context, conv *store.AgentConversation) error { + if m.CreateAgentConversationFn != nil { + return m.CreateAgentConversationFn(ctx, conv) + } + m.call("CreateAgentConversation") + return nil +} +func (m *mockStore) GetAgentConversation(ctx context.Context, id string) (*store.AgentConversation, error) { + if m.GetAgentConversationFn != nil { + return m.GetAgentConversationFn(ctx, id) + } + m.call("GetAgentConversation") + return nil, nil +} +func (m *mockStore) ListAgentConversationsByOrg(ctx context.Context, orgID string) ([]*store.AgentConversation, error) { + if m.ListAgentConversationsByOrgFn != nil { + return m.ListAgentConversationsByOrgFn(ctx, orgID) + } + m.call("ListAgentConversationsByOrg") + return nil, nil +} +func (m *mockStore) DeleteAgentConversation(ctx context.Context, id string) error { + if m.DeleteAgentConversationFn != nil { + return m.DeleteAgentConversationFn(ctx, id) + } + m.call("DeleteAgentConversation") + return nil +} + +func (m *mockStore) CreateAgentMessage(ctx context.Context, msg *store.AgentMessage) error { + if m.CreateAgentMessageFn != nil { + return m.CreateAgentMessageFn(ctx, msg) + } + m.call("CreateAgentMessage") + return nil +} +func (m *mockStore) ListAgentMessages(ctx context.Context, conversationID string) ([]*store.AgentMessage, error) { + if m.ListAgentMessagesFn != nil { + return m.ListAgentMessagesFn(ctx, conversationID) + } + m.call("ListAgentMessages") + return nil, nil +} + +func (m *mockStore) CreatePlaybook(ctx context.Context, pb *store.Playbook) error { + if m.CreatePlaybookFn != nil { + return m.CreatePlaybookFn(ctx, pb) + } + m.call("CreatePlaybook") + return nil +} +func (m *mockStore) GetPlaybook(ctx context.Context, id string) (*store.Playbook, error) { + if m.GetPlaybookFn != nil { + return m.GetPlaybookFn(ctx, id) + } + m.call("GetPlaybook") + return nil, nil +} +func (m *mockStore) ListPlaybooksByOrg(ctx context.Context, orgID string) ([]*store.Playbook, error) { + if m.ListPlaybooksByOrgFn != nil { + return m.ListPlaybooksByOrgFn(ctx, orgID) + } + m.call("ListPlaybooksByOrg") + return nil, nil +} +func (m *mockStore) UpdatePlaybook(ctx context.Context, pb *store.Playbook) error { + if m.UpdatePlaybookFn != nil { + return m.UpdatePlaybookFn(ctx, pb) + } + m.call("UpdatePlaybook") + return nil +} +func (m *mockStore) DeletePlaybook(ctx context.Context, id string) error { + if m.DeletePlaybookFn != nil { + return m.DeletePlaybookFn(ctx, id) + } + m.call("DeletePlaybook") + return nil +} + +func (m *mockStore) CreatePlaybookTask(ctx context.Context, task *store.PlaybookTask) error { + if m.CreatePlaybookTaskFn != nil { + return m.CreatePlaybookTaskFn(ctx, task) + } + m.call("CreatePlaybookTask") + return nil +} +func (m *mockStore) GetPlaybookTask(ctx context.Context, id string) (*store.PlaybookTask, error) { + if m.GetPlaybookTaskFn != nil { + return m.GetPlaybookTaskFn(ctx, id) + } + m.call("GetPlaybookTask") + return nil, nil +} +func (m *mockStore) ListPlaybookTasks(ctx context.Context, playbookID string) ([]*store.PlaybookTask, error) { + if m.ListPlaybookTasksFn != nil { + return m.ListPlaybookTasksFn(ctx, playbookID) + } + m.call("ListPlaybookTasks") + return nil, nil +} +func (m *mockStore) UpdatePlaybookTask(ctx context.Context, task *store.PlaybookTask) error { + if m.UpdatePlaybookTaskFn != nil { + return m.UpdatePlaybookTaskFn(ctx, task) + } + m.call("UpdatePlaybookTask") + return nil +} +func (m *mockStore) DeletePlaybookTask(ctx context.Context, id string) error { + if m.DeletePlaybookTaskFn != nil { + return m.DeletePlaybookTaskFn(ctx, id) + } + m.call("DeletePlaybookTask") + return nil +} +func (m *mockStore) ReorderPlaybookTasks(ctx context.Context, playbookID string, taskIDs []string) error { + if m.ReorderPlaybookTasksFn != nil { + return m.ReorderPlaybookTasksFn(ctx, playbookID, taskIDs) + } + m.call("ReorderPlaybookTasks") + return nil +} +*/ + +// Billing helpers +func (m *mockStore) GetOrganizationByStripeCustomerID(ctx context.Context, customerID string) (*store.Organization, error) { + if m.GetOrganizationByStripeCustomerIDFn != nil { + return m.GetOrganizationByStripeCustomerIDFn(ctx, customerID) + } + m.call("GetOrganizationByStripeCustomerID") + return nil, nil +} +func (m *mockStore) GetModelMeter(ctx context.Context, modelID string) (*store.ModelMeter, error) { + if m.GetModelMeterFn != nil { + return m.GetModelMeterFn(ctx, modelID) + } + return nil, store.ErrNotFound +} +func (m *mockStore) CreateModelMeter(ctx context.Context, mm *store.ModelMeter) error { + if m.CreateModelMeterFn != nil { + return m.CreateModelMeterFn(ctx, mm) + } + return nil +} +func (m *mockStore) GetOrgModelSubscription(ctx context.Context, orgID, modelID string) (*store.OrgModelSubscription, error) { + if m.GetOrgModelSubscriptionFn != nil { + return m.GetOrgModelSubscriptionFn(ctx, orgID, modelID) + } + return nil, store.ErrNotFound +} +func (m *mockStore) CreateOrgModelSubscription(ctx context.Context, s *store.OrgModelSubscription) error { + if m.CreateOrgModelSubscriptionFn != nil { + return m.CreateOrgModelSubscriptionFn(ctx, s) + } + return nil +} +func (m *mockStore) SumTokenUsage(ctx context.Context, orgID string, from, to time.Time) (float64, error) { + if m.SumTokenUsageFn != nil { + return m.SumTokenUsageFn(ctx, orgID, from, to) + } + return 0, nil +} +func (m *mockStore) ListActiveSubscriptions(ctx context.Context) ([]*store.Subscription, error) { + if m.ListActiveSubscriptionsFn != nil { + return m.ListActiveSubscriptionsFn(ctx) + } + return nil, nil +} +func (m *mockStore) GetSubscriptionByStripeID(_ context.Context, _ string) (*store.Subscription, error) { + return nil, nil +} +func (m *mockStore) AcquireAdvisoryLock(_ context.Context, _ int64) error { return nil } +func (m *mockStore) ReleaseAdvisoryLock(_ context.Context, _ int64) error { return nil } + +// --------------------------------------------------------------------------- +// mockHostSender implements orchestrator.HostSender +// --------------------------------------------------------------------------- + +type mockHostSender struct { + SendAndWaitFn func(ctx context.Context, hostID string, msg *fluidv1.ControlMessage, timeout time.Duration) (*fluidv1.HostMessage, error) +} + +func (m *mockHostSender) SendAndWait(ctx context.Context, hostID string, msg *fluidv1.ControlMessage, timeout time.Duration) (*fluidv1.HostMessage, error) { + if m.SendAndWaitFn != nil { + return m.SendAndWaitFn(ctx, hostID, msg, timeout) + } + return nil, fmt.Errorf("mockHostSender.SendAndWait not configured") +} + +// --------------------------------------------------------------------------- +// testConfig returns a Config with safe defaults for testing +// --------------------------------------------------------------------------- + +func testConfig() *config.Config { + return &config.Config{ + API: config.APIConfig{ + Addr: ":0", + ReadTimeout: 60 * time.Second, + WriteTimeout: 120 * time.Second, + IdleTimeout: 120 * time.Second, + ShutdownTimeout: 5 * time.Second, + }, + Frontend: config.FrontendConfig{ + URL: "http://localhost:5173", + }, + Auth: config.AuthConfig{ + SessionTTL: 720 * time.Hour, + }, + Billing: config.BillingConfig{ + Prices: config.PriceConfig{ + SandboxMonthlyCents: 5000, + SourceVMMonthly: 500, + AgentHostMonthly: 1000, + }, + FreeTier: config.FreeTierConfig{ + MaxConcurrentSandboxes: 1, + MaxSourceVMs: 3, + MaxAgentHosts: 1, + }, + }, + // Agent config - commented out, not yet ready for integration. + // Agent: config.AgentConfig{ + // DefaultModel: "anthropic/claude-sonnet-4", + // MaxTokensPerRequest: 8192, + // FreeTokensPerMonth: 100000, + // }, + Orchestrator: config.OrchestratorConfig{ + HeartbeatTimeout: 90 * time.Second, + DefaultTTL: 24 * time.Hour, + }, + } +} + +// --------------------------------------------------------------------------- +// Test fixtures +// --------------------------------------------------------------------------- + +var ( + testUser = &store.User{ + ID: "USR-test1234", + Email: "test@example.com", + DisplayName: "Test User", + } + + testOrg = &store.Organization{ + ID: "ORG-test1234", + Name: "Test Org", + Slug: "test-org", + OwnerID: testUser.ID, + } + + testMember = &store.OrgMember{ + ID: "MBR-test1234", + OrgID: testOrg.ID, + UserID: testUser.ID, + Role: store.OrgRoleOwner, + } + + testSessionToken = "test-session-token-value" +) + +// --------------------------------------------------------------------------- +// newTestServer creates a Server wired up with the given mock store +// --------------------------------------------------------------------------- + +func newTestServer(ms *mockStore, cfg *config.Config) *Server { + if cfg == nil { + cfg = testConfig() + } + reg := registry.New() + sender := &mockHostSender{} + orch := orchestrator.New(reg, ms, sender, nil, cfg.Orchestrator.DefaultTTL, cfg.Orchestrator.HeartbeatTimeout) + return NewServer(ms, cfg, orch, nil, nil) +} + +// newTestServerWithSender creates a Server with a custom HostSender +func newTestServerWithSender(ms *mockStore, sender *mockHostSender, cfg *config.Config) *Server { + if cfg == nil { + cfg = testConfig() + } + reg := registry.New() + orch := orchestrator.New(reg, ms, sender, nil, cfg.Orchestrator.DefaultTTL, cfg.Orchestrator.HeartbeatTimeout) + return NewServer(ms, cfg, orch, nil, nil) +} + +// --------------------------------------------------------------------------- +// authenticatedRequest creates an http.Request with a valid session cookie +// and configures the mock store to authenticate the test user. +// The auth middleware passes cookie.Value directly to GetSession. +// --------------------------------------------------------------------------- + +func authenticatedRequest(ms *mockStore, method, path string, body *http.Request) *http.Request { + var req *http.Request + if body != nil { + req = body + req.Method = method + req.RequestURI = path + } else { + req = httptest.NewRequest(method, path, nil) + } + + req.AddCookie(&http.Cookie{ + Name: auth.SessionCookieName, + Value: testSessionToken, + }) + + if ms.GetSessionFn == nil { + hashedToken := auth.HashSessionToken(testSessionToken) + ms.GetSessionFn = func(_ context.Context, id string) (*store.Session, error) { + if id == hashedToken { + return &store.Session{ + ID: hashedToken, + UserID: testUser.ID, + ExpiresAt: time.Now().Add(24 * time.Hour), + }, nil + } + return nil, store.ErrNotFound + } + } + + if ms.GetUserFn == nil { + ms.GetUserFn = func(_ context.Context, id string) (*store.User, error) { + if id == testUser.ID { + return testUser, nil + } + return nil, store.ErrNotFound + } + } + + return req +} + +// setupOrgMembership configures the mock store to resolve org by slug and membership +func setupOrgMembership(ms *mockStore) { + if ms.GetOrganizationBySlugFn == nil { + ms.GetOrganizationBySlugFn = func(_ context.Context, slug string) (*store.Organization, error) { + if slug == testOrg.Slug { + return testOrg, nil + } + return nil, store.ErrNotFound + } + } + + if ms.GetOrgMemberFn == nil { + ms.GetOrgMemberFn = func(_ context.Context, orgID, userID string) (*store.OrgMember, error) { + if orgID == testOrg.ID && userID == testUser.ID { + return testMember, nil + } + return nil, store.ErrNotFound + } + } +} + +// parseJSONResponse reads body into a map +func parseJSONResponse(rr *httptest.ResponseRecorder) map[string]any { + var result map[string]any + _ = json.Unmarshal(rr.Body.Bytes(), &result) + return result +} diff --git a/api/internal/store/postgres/postgres.go b/api/internal/store/postgres/postgres.go new file mode 100644 index 00000000..3ce83241 --- /dev/null +++ b/api/internal/store/postgres/postgres.go @@ -0,0 +1,1963 @@ +package postgres + +import ( + "context" + "errors" + "fmt" + "log/slog" + "time" + + "github.com/jackc/pgconn" + "gorm.io/driver/postgres" + "gorm.io/gorm" + "gorm.io/gorm/logger" + + "github.com/aspectrr/fluid.sh/api/internal/crypto" + "github.com/aspectrr/fluid.sh/api/internal/store" +) + +var ( + _ store.Store = (*postgresStore)(nil) + _ store.DataStore = (*postgresStore)(nil) +) + +type postgresStore struct { + db *gorm.DB + conf store.Config + encryptionKey []byte +} + +// GORM models + +type UserModel struct { + ID string `gorm:"column:id;primaryKey"` + Email string `gorm:"column:email;uniqueIndex:idx_users_email_not_deleted,where:deleted_at IS NULL"` + DisplayName string `gorm:"column:display_name"` + AvatarURL string `gorm:"column:avatar_url"` + PasswordHash string `gorm:"column:password_hash"` + EmailVerified bool `gorm:"column:email_verified;default:false"` + CreatedAt time.Time `gorm:"column:created_at"` + UpdatedAt time.Time `gorm:"column:updated_at"` + DeletedAt *time.Time `gorm:"column:deleted_at;index"` +} + +func (UserModel) TableName() string { return "users" } + +type OAuthAccountModel struct { + ID string `gorm:"column:id;primaryKey"` + UserID string `gorm:"column:user_id;index"` + Provider string `gorm:"column:provider"` + ProviderID string `gorm:"column:provider_id;uniqueIndex:idx_oauth_provider_id,composite:provider"` + Email string `gorm:"column:email"` + AccessToken string `gorm:"column:access_token"` + RefreshToken string `gorm:"column:refresh_token"` + TokenExpiry time.Time `gorm:"column:token_expiry"` + CreatedAt time.Time `gorm:"column:created_at"` +} + +func (OAuthAccountModel) TableName() string { return "oauth_accounts" } + +type SessionModel struct { + ID string `gorm:"column:id;primaryKey"` + UserID string `gorm:"column:user_id;index"` + IPAddress string `gorm:"column:ip_address"` + UserAgent string `gorm:"column:user_agent"` + ExpiresAt time.Time `gorm:"column:expires_at;index"` + CreatedAt time.Time `gorm:"column:created_at"` +} + +func (SessionModel) TableName() string { return "sessions" } + +type OrganizationModel struct { + ID string `gorm:"column:id;primaryKey"` + Name string `gorm:"column:name"` + Slug string `gorm:"column:slug;uniqueIndex:idx_orgs_slug_not_deleted,where:deleted_at IS NULL"` + OwnerID string `gorm:"column:owner_id;index"` + StripeCustomerID string `gorm:"column:stripe_customer_id"` + CreatedAt time.Time `gorm:"column:created_at"` + UpdatedAt time.Time `gorm:"column:updated_at"` + DeletedAt *time.Time `gorm:"column:deleted_at;index"` +} + +func (OrganizationModel) TableName() string { return "organizations" } + +type OrgMemberModel struct { + ID string `gorm:"column:id;primaryKey"` + OrgID string `gorm:"column:org_id;uniqueIndex:idx_org_members_unique,composite:org_user"` + UserID string `gorm:"column:user_id;uniqueIndex:idx_org_members_unique,composite:org_user"` + Role string `gorm:"column:role"` + CreatedAt time.Time `gorm:"column:created_at"` +} + +func (OrgMemberModel) TableName() string { return "org_members" } + +type SubscriptionModel struct { + ID string `gorm:"column:id;primaryKey"` + OrgID string `gorm:"column:org_id;index"` + Plan string `gorm:"column:plan"` + StripeSubscriptionID string `gorm:"column:stripe_subscription_id;uniqueIndex"` + StripePriceID string `gorm:"column:stripe_price_id"` + Status string `gorm:"column:status"` + CurrentPeriodStart time.Time `gorm:"column:current_period_start"` + CurrentPeriodEnd time.Time `gorm:"column:current_period_end"` + CreatedAt time.Time `gorm:"column:created_at"` + UpdatedAt time.Time `gorm:"column:updated_at"` +} + +func (SubscriptionModel) TableName() string { return "subscriptions" } + +type UsageRecordModel struct { + ID string `gorm:"column:id;primaryKey"` + OrgID string `gorm:"column:org_id;index:idx_usage_org_recorded,priority:1"` + ResourceType string `gorm:"column:resource_type"` + Quantity float64 `gorm:"column:quantity"` + RecordedAt time.Time `gorm:"column:recorded_at;index:idx_usage_org_recorded,priority:2"` + MetadataJSON string `gorm:"column:metadata_json"` +} + +func (UsageRecordModel) TableName() string { return "usage_records" } + +// --- Infrastructure GORM models --- + +type HostModel struct { + ID string `gorm:"column:id;primaryKey"` + OrgID string `gorm:"column:org_id;not null;index"` + Hostname string `gorm:"column:hostname;not null"` + Version string `gorm:"column:version"` + TotalCPUs int32 `gorm:"column:total_cpus;not null;default:0"` + TotalMemoryMB int64 `gorm:"column:total_memory_mb;not null;default:0"` + TotalDiskMB int64 `gorm:"column:total_disk_mb;not null;default:0"` + AvailableCPUs int32 `gorm:"column:available_cpus;not null;default:0"` + AvailableMemoryMB int64 `gorm:"column:available_memory_mb;not null;default:0"` + AvailableDiskMB int64 `gorm:"column:available_disk_mb;not null;default:0"` + BaseImages store.StringSlice `gorm:"column:base_images;type:jsonb;default:'[]'"` + SourceVMs store.SourceVMSlice `gorm:"column:source_vms;type:jsonb;default:'[]'"` + Bridges store.BridgeSlice `gorm:"column:bridges;type:jsonb;default:'[]'"` + Status string `gorm:"column:status;not null;default:'OFFLINE'"` + LastHeartbeat time.Time `gorm:"column:last_heartbeat;not null"` + CreatedAt time.Time `gorm:"column:created_at"` + UpdatedAt time.Time `gorm:"column:updated_at"` +} + +func (HostModel) TableName() string { return "hosts" } + +type SandboxModel struct { + ID string `gorm:"column:id;primaryKey"` + OrgID string `gorm:"column:org_id;not null;index"` + HostID string `gorm:"column:host_id;not null;index"` + Name string `gorm:"column:name;not null"` + AgentID string `gorm:"column:agent_id"` + BaseImage string `gorm:"column:base_image"` + Bridge string `gorm:"column:bridge"` + TAPDevice string `gorm:"column:tap_device"` + MACAddress string `gorm:"column:mac_address"` + IPAddress string `gorm:"column:ip_address"` + State string `gorm:"column:state;not null;default:'CREATING'"` + VCPUs int32 `gorm:"column:vcpus;not null;default:1"` + MemoryMB int32 `gorm:"column:memory_mb;not null;default:512"` + TTLSeconds int32 `gorm:"column:ttl_seconds;not null;default:0"` + SourceVM string `gorm:"column:source_vm"` + CreatedAt time.Time `gorm:"column:created_at"` + UpdatedAt time.Time `gorm:"column:updated_at"` + DeletedAt *time.Time `gorm:"column:deleted_at;index"` +} + +func (SandboxModel) TableName() string { return "sandboxes" } + +type CommandModel struct { + ID string `gorm:"column:id;primaryKey"` + SandboxID string `gorm:"column:sandbox_id;not null;index:idx_commands_sandbox_started,priority:1"` + Command string `gorm:"column:command;not null"` + Stdout string `gorm:"column:stdout"` + Stderr string `gorm:"column:stderr"` + ExitCode int32 `gorm:"column:exit_code;not null;default:0"` + DurationMS int64 `gorm:"column:duration_ms;not null;default:0"` + StartedAt time.Time `gorm:"column:started_at;not null;index:idx_commands_sandbox_started,priority:2"` + EndedAt time.Time `gorm:"column:ended_at"` +} + +func (CommandModel) TableName() string { return "commands" } + +type HostTokenModel struct { + ID string `gorm:"column:id;primaryKey"` + OrgID string `gorm:"column:org_id;not null;index"` + Name string `gorm:"column:name;not null"` + TokenHash string `gorm:"column:token_hash;not null;uniqueIndex"` + ExpiresAt *time.Time `gorm:"column:expires_at"` + CreatedAt time.Time `gorm:"column:created_at"` +} + +func (HostTokenModel) TableName() string { return "host_tokens" } + +/* +type AgentConversationModel struct { + ID string `gorm:"column:id;primaryKey"` + OrgID string `gorm:"column:org_id;not null;index"` + UserID string `gorm:"column:user_id;not null;index"` + Title string `gorm:"column:title;not null"` + Model string `gorm:"column:model"` + CreatedAt time.Time `gorm:"column:created_at"` + UpdatedAt time.Time `gorm:"column:updated_at"` +} + +func (AgentConversationModel) TableName() string { return "agent_conversations" } + +type AgentMessageModel struct { + ID string `gorm:"column:id;primaryKey"` + ConversationID string `gorm:"column:conversation_id;not null;index"` + Role string `gorm:"column:role;not null"` + Content string `gorm:"column:content"` + ToolCalls string `gorm:"column:tool_calls"` + ToolCallID string `gorm:"column:tool_call_id"` + TokensInput int `gorm:"column:tokens_input;default:0"` + TokensOutput int `gorm:"column:tokens_output;default:0"` + Model string `gorm:"column:model"` + CreatedAt time.Time `gorm:"column:created_at"` +} + +func (AgentMessageModel) TableName() string { return "agent_messages" } + +type PlaybookModel struct { + ID string `gorm:"column:id;primaryKey"` + OrgID string `gorm:"column:org_id;not null;index"` + Name string `gorm:"column:name;not null"` + Description string `gorm:"column:description"` + CreatedAt time.Time `gorm:"column:created_at"` + UpdatedAt time.Time `gorm:"column:updated_at"` +} + +func (PlaybookModel) TableName() string { return "playbooks" } + +type PlaybookTaskModel struct { + ID string `gorm:"column:id;primaryKey"` + PlaybookID string `gorm:"column:playbook_id;not null;index"` + SortOrder int `gorm:"column:sort_order;not null;default:0"` + Name string `gorm:"column:name;not null"` + Module string `gorm:"column:module;not null"` + Params string `gorm:"column:params;type:jsonb;default:'{}'"` + CreatedAt time.Time `gorm:"column:created_at"` + UpdatedAt time.Time `gorm:"column:updated_at"` +} + +func (PlaybookTaskModel) TableName() string { return "playbook_tasks" } +*/ + +type SourceHostModel struct { + ID string `gorm:"column:id;primaryKey"` + OrgID string `gorm:"column:org_id;not null;index"` + Name string `gorm:"column:name;not null"` + Hostname string `gorm:"column:hostname;not null"` + Type string `gorm:"column:type;not null;default:'libvirt'"` + SSHUser string `gorm:"column:ssh_user"` + SSHPort int `gorm:"column:ssh_port;not null;default:22"` + SSHIdentityFile string `gorm:"column:ssh_identity_file"` + ProxmoxHost string `gorm:"column:proxmox_host"` + ProxmoxTokenID string `gorm:"column:proxmox_token_id"` + ProxmoxSecret string `gorm:"column:proxmox_secret"` + ProxmoxNode string `gorm:"column:proxmox_node"` + ProxmoxVerifySSL bool `gorm:"column:proxmox_verify_ssl;default:false"` + VMs store.StringSlice `gorm:"column:vms;type:jsonb;default:'[]'"` + CreatedAt time.Time `gorm:"column:created_at"` + UpdatedAt time.Time `gorm:"column:updated_at"` +} + +func (SourceHostModel) TableName() string { return "source_hosts" } + +type ModelMeterModel struct { + ID string `gorm:"column:id;primaryKey"` + ModelID string `gorm:"column:model_id;uniqueIndex"` + StripeProductID string `gorm:"column:stripe_product_id"` + StripeInputMeterID string `gorm:"column:stripe_input_meter_id"` + StripeOutputMeterID string `gorm:"column:stripe_output_meter_id"` + StripeInputPriceID string `gorm:"column:stripe_input_price_id"` + StripeOutputPriceID string `gorm:"column:stripe_output_price_id"` + InputEventName string `gorm:"column:input_event_name"` + OutputEventName string `gorm:"column:output_event_name"` + InputCostPerToken float64 `gorm:"column:input_cost_per_token"` + OutputCostPerToken float64 `gorm:"column:output_cost_per_token"` + CreatedAt time.Time `gorm:"column:created_at"` +} + +func (ModelMeterModel) TableName() string { return "model_meters" } + +type OrgModelSubscriptionModel struct { + ID string `gorm:"column:id;primaryKey"` + OrgID string `gorm:"column:org_id;uniqueIndex:idx_org_model_sub,composite:org_model"` + ModelID string `gorm:"column:model_id;uniqueIndex:idx_org_model_sub,composite:org_model"` + StripeInputSubItemID string `gorm:"column:stripe_input_sub_item_id"` + StripeOutputSubItemID string `gorm:"column:stripe_output_sub_item_id"` + CreatedAt time.Time `gorm:"column:created_at"` +} + +func (OrgModelSubscriptionModel) TableName() string { return "org_model_subscriptions" } + +// New creates a Store backed by Postgres + GORM. +func New(ctx context.Context, cfg store.Config) (store.Store, error) { + if cfg.DatabaseURL == "" { + return nil, fmt.Errorf("postgres: missing DatabaseURL") + } + + db, err := gorm.Open( + postgres.Open(cfg.DatabaseURL), + &gorm.Config{ + NowFunc: func() time.Time { return time.Now().UTC() }, + Logger: logger.Default.LogMode(logger.Silent), + }, + ) + if err != nil { + return nil, fmt.Errorf("postgres: open: %w", err) + } + + sqlDB, err := db.DB() + if err != nil { + return nil, fmt.Errorf("postgres: sql.DB handle: %w", err) + } + + if cfg.MaxOpenConns > 0 { + sqlDB.SetMaxOpenConns(cfg.MaxOpenConns) + } + if cfg.MaxIdleConns > 0 { + sqlDB.SetMaxIdleConns(cfg.MaxIdleConns) + } + if cfg.ConnMaxLifetime > 0 { + sqlDB.SetConnMaxLifetime(cfg.ConnMaxLifetime) + } + + var encKey []byte + if cfg.EncryptionKey != "" { + encKey = crypto.DeriveKey(cfg.EncryptionKey) + } + pg := &postgresStore{db: db.WithContext(ctx), conf: cfg, encryptionKey: encKey} + + if cfg.AutoMigrate { + if err := pg.autoMigrate(ctx); err != nil { + _ = sqlDB.Close() + return nil, err + } + } + + if err := pg.Ping(ctx); err != nil { + _ = sqlDB.Close() + return nil, err + } + + return pg, nil +} + +func (s *postgresStore) autoMigrate(_ context.Context) error { + return s.db.AutoMigrate( + &UserModel{}, + &OAuthAccountModel{}, + &SessionModel{}, + &OrganizationModel{}, + &OrgMemberModel{}, + &SubscriptionModel{}, + &UsageRecordModel{}, + &HostModel{}, + &SandboxModel{}, + &CommandModel{}, + &HostTokenModel{}, + // &AgentConversationModel{}, + // &AgentMessageModel{}, + // &PlaybookModel{}, + // &PlaybookTaskModel{}, + &SourceHostModel{}, + &ModelMeterModel{}, + &OrgModelSubscriptionModel{}, + ) +} + +func (s *postgresStore) Config() store.Config { return s.conf } + +func (s *postgresStore) Close() error { + sqlDB, err := s.db.DB() + if err != nil { + return err + } + return sqlDB.Close() +} + +func (s *postgresStore) Ping(ctx context.Context) error { + sqlDB, err := s.db.DB() + if err != nil { + return err + } + return sqlDB.PingContext(ctx) +} + +func (s *postgresStore) WithTx(ctx context.Context, fn func(tx store.DataStore) error) error { + return s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + return fn(&postgresStore{db: tx, conf: s.conf, encryptionKey: s.encryptionKey}) + }) +} + +// --- Model converters --- + +func userToModel(u *store.User) *UserModel { + return &UserModel{ + ID: u.ID, + Email: u.Email, + DisplayName: u.DisplayName, + AvatarURL: u.AvatarURL, + PasswordHash: u.PasswordHash, + EmailVerified: u.EmailVerified, + CreatedAt: u.CreatedAt, + UpdatedAt: u.UpdatedAt, + DeletedAt: u.DeletedAt, + } +} + +func userFromModel(m *UserModel) *store.User { + return &store.User{ + ID: m.ID, + Email: m.Email, + DisplayName: m.DisplayName, + AvatarURL: m.AvatarURL, + PasswordHash: m.PasswordHash, + EmailVerified: m.EmailVerified, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + DeletedAt: m.DeletedAt, + } +} + +func (s *postgresStore) oauthToModel(oa *store.OAuthAccount) *OAuthAccountModel { + m := &OAuthAccountModel{ + ID: oa.ID, + UserID: oa.UserID, + Provider: oa.Provider, + ProviderID: oa.ProviderID, + Email: oa.Email, + AccessToken: oa.AccessToken, + RefreshToken: oa.RefreshToken, + TokenExpiry: oa.TokenExpiry, + CreatedAt: oa.CreatedAt, + } + if len(s.encryptionKey) > 0 { + if enc, err := crypto.Encrypt(s.encryptionKey, oa.AccessToken); err == nil { + m.AccessToken = enc + } else { + slog.Warn("failed to encrypt access token", "error", err) + } + if enc, err := crypto.Encrypt(s.encryptionKey, oa.RefreshToken); err == nil { + m.RefreshToken = enc + } else { + slog.Warn("failed to encrypt refresh token", "error", err) + } + } + return m +} + +func (s *postgresStore) oauthFromModel(m *OAuthAccountModel) *store.OAuthAccount { + oa := &store.OAuthAccount{ + ID: m.ID, + UserID: m.UserID, + Provider: m.Provider, + ProviderID: m.ProviderID, + Email: m.Email, + AccessToken: m.AccessToken, + RefreshToken: m.RefreshToken, + TokenExpiry: m.TokenExpiry, + CreatedAt: m.CreatedAt, + } + if len(s.encryptionKey) > 0 { + if dec, err := crypto.Decrypt(s.encryptionKey, m.AccessToken); err == nil { + oa.AccessToken = dec + } + if dec, err := crypto.Decrypt(s.encryptionKey, m.RefreshToken); err == nil { + oa.RefreshToken = dec + } + } + return oa +} + +func sessionToModel(s *store.Session) *SessionModel { + return &SessionModel{ + ID: s.ID, + UserID: s.UserID, + IPAddress: s.IPAddress, + UserAgent: s.UserAgent, + ExpiresAt: s.ExpiresAt, + CreatedAt: s.CreatedAt, + } +} + +func sessionFromModel(m *SessionModel) *store.Session { + return &store.Session{ + ID: m.ID, + UserID: m.UserID, + IPAddress: m.IPAddress, + UserAgent: m.UserAgent, + ExpiresAt: m.ExpiresAt, + CreatedAt: m.CreatedAt, + } +} + +func orgToModel(o *store.Organization) *OrganizationModel { + return &OrganizationModel{ + ID: o.ID, + Name: o.Name, + Slug: o.Slug, + OwnerID: o.OwnerID, + StripeCustomerID: o.StripeCustomerID, + CreatedAt: o.CreatedAt, + UpdatedAt: o.UpdatedAt, + DeletedAt: o.DeletedAt, + } +} + +func orgFromModel(m *OrganizationModel) *store.Organization { + return &store.Organization{ + ID: m.ID, + Name: m.Name, + Slug: m.Slug, + OwnerID: m.OwnerID, + StripeCustomerID: m.StripeCustomerID, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + DeletedAt: m.DeletedAt, + } +} + +func memberToModel(m *store.OrgMember) *OrgMemberModel { + return &OrgMemberModel{ + ID: m.ID, + OrgID: m.OrgID, + UserID: m.UserID, + Role: string(m.Role), + CreatedAt: m.CreatedAt, + } +} + +func memberFromModel(m *OrgMemberModel) *store.OrgMember { + return &store.OrgMember{ + ID: m.ID, + OrgID: m.OrgID, + UserID: m.UserID, + Role: store.OrgRole(m.Role), + CreatedAt: m.CreatedAt, + } +} + +func subToModel(s *store.Subscription) *SubscriptionModel { + return &SubscriptionModel{ + ID: s.ID, + OrgID: s.OrgID, + Plan: string(s.Plan), + StripeSubscriptionID: s.StripeSubscriptionID, + StripePriceID: s.StripePriceID, + Status: string(s.Status), + CurrentPeriodStart: s.CurrentPeriodStart, + CurrentPeriodEnd: s.CurrentPeriodEnd, + CreatedAt: s.CreatedAt, + UpdatedAt: s.UpdatedAt, + } +} + +func subFromModel(m *SubscriptionModel) *store.Subscription { + return &store.Subscription{ + ID: m.ID, + OrgID: m.OrgID, + Plan: store.SubscriptionPlan(m.Plan), + StripeSubscriptionID: m.StripeSubscriptionID, + StripePriceID: m.StripePriceID, + Status: store.SubscriptionStatus(m.Status), + CurrentPeriodStart: m.CurrentPeriodStart, + CurrentPeriodEnd: m.CurrentPeriodEnd, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + } +} + +func usageToModel(r *store.UsageRecord) *UsageRecordModel { + return &UsageRecordModel{ + ID: r.ID, + OrgID: r.OrgID, + ResourceType: r.ResourceType, + Quantity: r.Quantity, + RecordedAt: r.RecordedAt, + MetadataJSON: r.MetadataJSON, + } +} + +func usageFromModel(m *UsageRecordModel) *store.UsageRecord { + return &store.UsageRecord{ + ID: m.ID, + OrgID: m.OrgID, + ResourceType: m.ResourceType, + Quantity: m.Quantity, + RecordedAt: m.RecordedAt, + MetadataJSON: m.MetadataJSON, + } +} + +// mapDBError converts GORM/Postgres errors to sentinel errors. +func mapDBError(err error) error { + if err == nil { + return nil + } + if errors.Is(err, gorm.ErrRecordNotFound) { + return store.ErrNotFound + } + if errors.Is(err, gorm.ErrDuplicatedKey) { + return store.ErrAlreadyExists + } + var pgErr *pgconn.PgError + if errors.As(err, &pgErr) { + switch pgErr.Code { + case "23505": + return store.ErrAlreadyExists + case "23503": + return store.ErrInvalid + } + } + return err +} + +// --- User CRUD --- + +func (s *postgresStore) CreateUser(ctx context.Context, u *store.User) error { + now := time.Now().UTC() + u.CreatedAt = now + u.UpdatedAt = now + if err := s.db.WithContext(ctx).Create(userToModel(u)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) GetUser(ctx context.Context, id string) (*store.User, error) { + var model UserModel + if err := s.db.WithContext(ctx).Where("id = ? AND deleted_at IS NULL", id).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return userFromModel(&model), nil +} + +func (s *postgresStore) GetUserByEmail(ctx context.Context, email string) (*store.User, error) { + var model UserModel + if err := s.db.WithContext(ctx).Where("email = ? AND deleted_at IS NULL", email).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return userFromModel(&model), nil +} + +func (s *postgresStore) UpdateUser(ctx context.Context, u *store.User) error { + u.UpdatedAt = time.Now().UTC() + res := s.db.WithContext(ctx).Model(&UserModel{}).Where("id = ? AND deleted_at IS NULL", u.ID). + Updates(map[string]any{ + "email": u.Email, + "display_name": u.DisplayName, + "avatar_url": u.AvatarURL, + "password_hash": u.PasswordHash, + "email_verified": u.EmailVerified, + "updated_at": u.UpdatedAt, + }) + if err := mapDBError(res.Error); err != nil { + return err + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +// --- OAuth CRUD --- + +func (s *postgresStore) CreateOAuthAccount(ctx context.Context, oa *store.OAuthAccount) error { + oa.CreatedAt = time.Now().UTC() + if err := s.db.WithContext(ctx).Create(s.oauthToModel(oa)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) GetOAuthAccount(ctx context.Context, provider, providerID string) (*store.OAuthAccount, error) { + var model OAuthAccountModel + if err := s.db.WithContext(ctx).Where("provider = ? AND provider_id = ?", provider, providerID).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return s.oauthFromModel(&model), nil +} + +func (s *postgresStore) GetOAuthAccountsByUser(ctx context.Context, userID string) ([]*store.OAuthAccount, error) { + var models []OAuthAccountModel + if err := s.db.WithContext(ctx).Where("user_id = ?", userID).Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]*store.OAuthAccount, 0, len(models)) + for i := range models { + out = append(out, s.oauthFromModel(&models[i])) + } + return out, nil +} + +// --- Session CRUD --- + +func (s *postgresStore) CreateSession(ctx context.Context, sess *store.Session) error { + sess.CreatedAt = time.Now().UTC() + if err := s.db.WithContext(ctx).Create(sessionToModel(sess)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) GetSession(ctx context.Context, id string) (*store.Session, error) { + var model SessionModel + if err := s.db.WithContext(ctx).Where("id = ? AND expires_at > ?", id, time.Now().UTC()).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return sessionFromModel(&model), nil +} + +func (s *postgresStore) DeleteSession(ctx context.Context, id string) error { + res := s.db.WithContext(ctx).Where("id = ?", id).Delete(&SessionModel{}) + if res.Error != nil { + return mapDBError(res.Error) + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +func (s *postgresStore) DeleteExpiredSessions(ctx context.Context) error { + return s.db.WithContext(ctx).Where("expires_at <= ?", time.Now().UTC()).Delete(&SessionModel{}).Error +} + +// --- Organization CRUD --- + +func (s *postgresStore) CreateOrganization(ctx context.Context, org *store.Organization) error { + now := time.Now().UTC() + org.CreatedAt = now + org.UpdatedAt = now + if err := s.db.WithContext(ctx).Create(orgToModel(org)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) GetOrganization(ctx context.Context, id string) (*store.Organization, error) { + var model OrganizationModel + if err := s.db.WithContext(ctx).Where("id = ? AND deleted_at IS NULL", id).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return orgFromModel(&model), nil +} + +func (s *postgresStore) GetOrganizationBySlug(ctx context.Context, slug string) (*store.Organization, error) { + var model OrganizationModel + if err := s.db.WithContext(ctx).Where("slug = ? AND deleted_at IS NULL", slug).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return orgFromModel(&model), nil +} + +func (s *postgresStore) ListOrganizationsByUser(ctx context.Context, userID string) ([]*store.Organization, error) { + var models []OrganizationModel + if err := s.db.WithContext(ctx). + Joins("JOIN org_members ON org_members.org_id = organizations.id"). + Where("org_members.user_id = ? AND organizations.deleted_at IS NULL", userID). + Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]*store.Organization, 0, len(models)) + for i := range models { + out = append(out, orgFromModel(&models[i])) + } + return out, nil +} + +func (s *postgresStore) UpdateOrganization(ctx context.Context, org *store.Organization) error { + org.UpdatedAt = time.Now().UTC() + res := s.db.WithContext(ctx).Model(&OrganizationModel{}).Where("id = ? AND deleted_at IS NULL", org.ID). + Updates(map[string]any{ + "name": org.Name, + "slug": org.Slug, + "stripe_customer_id": org.StripeCustomerID, + "updated_at": org.UpdatedAt, + }) + if err := mapDBError(res.Error); err != nil { + return err + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +func (s *postgresStore) DeleteOrganization(ctx context.Context, id string) error { + now := time.Now().UTC() + res := s.db.WithContext(ctx).Model(&OrganizationModel{}).Where("id = ? AND deleted_at IS NULL", id). + Updates(map[string]any{ + "deleted_at": &now, + "updated_at": now, + }) + if err := mapDBError(res.Error); err != nil { + return err + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +// --- OrgMember CRUD --- + +func (s *postgresStore) CreateOrgMember(ctx context.Context, m *store.OrgMember) error { + m.CreatedAt = time.Now().UTC() + if err := s.db.WithContext(ctx).Create(memberToModel(m)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) GetOrgMember(ctx context.Context, orgID, userID string) (*store.OrgMember, error) { + var model OrgMemberModel + if err := s.db.WithContext(ctx).Where("org_id = ? AND user_id = ?", orgID, userID).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return memberFromModel(&model), nil +} + +func (s *postgresStore) GetOrgMemberByID(ctx context.Context, orgID, memberID string) (*store.OrgMember, error) { + var model OrgMemberModel + if err := s.db.WithContext(ctx).Where("id = ? AND org_id = ?", memberID, orgID).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return memberFromModel(&model), nil +} + +func (s *postgresStore) ListOrgMembers(ctx context.Context, orgID string) ([]*store.OrgMember, error) { + var models []OrgMemberModel + if err := s.db.WithContext(ctx).Where("org_id = ?", orgID).Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]*store.OrgMember, 0, len(models)) + for i := range models { + out = append(out, memberFromModel(&models[i])) + } + return out, nil +} + +func (s *postgresStore) DeleteOrgMember(ctx context.Context, orgID, id string) error { + res := s.db.WithContext(ctx).Where("id = ? AND org_id = ?", id, orgID).Delete(&OrgMemberModel{}) + if res.Error != nil { + return mapDBError(res.Error) + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +// --- Subscription CRUD --- + +func (s *postgresStore) CreateSubscription(ctx context.Context, sub *store.Subscription) error { + now := time.Now().UTC() + sub.CreatedAt = now + sub.UpdatedAt = now + if err := s.db.WithContext(ctx).Create(subToModel(sub)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) GetSubscriptionByOrg(ctx context.Context, orgID string) (*store.Subscription, error) { + var model SubscriptionModel + if err := s.db.WithContext(ctx).Where("org_id = ?", orgID).Order("created_at DESC").First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return subFromModel(&model), nil +} + +func (s *postgresStore) UpdateSubscription(ctx context.Context, sub *store.Subscription) error { + sub.UpdatedAt = time.Now().UTC() + res := s.db.WithContext(ctx).Model(&SubscriptionModel{}).Where("id = ?", sub.ID). + Updates(map[string]any{ + "plan": string(sub.Plan), + "stripe_subscription_id": sub.StripeSubscriptionID, + "stripe_price_id": sub.StripePriceID, + "status": string(sub.Status), + "current_period_start": sub.CurrentPeriodStart, + "current_period_end": sub.CurrentPeriodEnd, + "updated_at": sub.UpdatedAt, + }) + if err := mapDBError(res.Error); err != nil { + return err + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +func (s *postgresStore) GetSubscriptionByStripeID(ctx context.Context, stripeSubID string) (*store.Subscription, error) { + var model SubscriptionModel + if err := s.db.WithContext(ctx).Where("stripe_subscription_id = ?", stripeSubID).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return subFromModel(&model), nil +} + +// --- UsageRecord CRUD --- + +func (s *postgresStore) CreateUsageRecord(ctx context.Context, rec *store.UsageRecord) error { + rec.RecordedAt = time.Now().UTC() + if err := s.db.WithContext(ctx).Create(usageToModel(rec)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) ListUsageRecords(ctx context.Context, orgID string, from, to time.Time) ([]*store.UsageRecord, error) { + var models []UsageRecordModel + if err := s.db.WithContext(ctx). + Where("org_id = ? AND recorded_at >= ? AND recorded_at <= ?", orgID, from, to). + Order("recorded_at DESC"). + Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]*store.UsageRecord, 0, len(models)) + for i := range models { + out = append(out, usageFromModel(&models[i])) + } + return out, nil +} + +// --- Infrastructure model converters --- + +func hostToModel(h *store.Host) *HostModel { + return &HostModel{ + ID: h.ID, + OrgID: h.OrgID, + Hostname: h.Hostname, + Version: h.Version, + TotalCPUs: h.TotalCPUs, + TotalMemoryMB: h.TotalMemoryMB, + TotalDiskMB: h.TotalDiskMB, + AvailableCPUs: h.AvailableCPUs, + AvailableMemoryMB: h.AvailableMemoryMB, + AvailableDiskMB: h.AvailableDiskMB, + BaseImages: h.BaseImages, + SourceVMs: h.SourceVMs, + Bridges: h.Bridges, + Status: string(h.Status), + LastHeartbeat: h.LastHeartbeat, + CreatedAt: h.CreatedAt, + UpdatedAt: h.UpdatedAt, + } +} + +func hostFromModel(m *HostModel) *store.Host { + return &store.Host{ + ID: m.ID, + OrgID: m.OrgID, + Hostname: m.Hostname, + Version: m.Version, + TotalCPUs: m.TotalCPUs, + TotalMemoryMB: m.TotalMemoryMB, + TotalDiskMB: m.TotalDiskMB, + AvailableCPUs: m.AvailableCPUs, + AvailableMemoryMB: m.AvailableMemoryMB, + AvailableDiskMB: m.AvailableDiskMB, + BaseImages: m.BaseImages, + SourceVMs: m.SourceVMs, + Bridges: m.Bridges, + Status: store.HostStatus(m.Status), + LastHeartbeat: m.LastHeartbeat, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + } +} + +func sandboxToModel(s *store.Sandbox) *SandboxModel { + return &SandboxModel{ + ID: s.ID, + OrgID: s.OrgID, + HostID: s.HostID, + Name: s.Name, + AgentID: s.AgentID, + BaseImage: s.BaseImage, + Bridge: s.Bridge, + TAPDevice: s.TAPDevice, + MACAddress: s.MACAddress, + IPAddress: s.IPAddress, + State: string(s.State), + VCPUs: s.VCPUs, + MemoryMB: s.MemoryMB, + TTLSeconds: s.TTLSeconds, + SourceVM: s.SourceVM, + CreatedAt: s.CreatedAt, + UpdatedAt: s.UpdatedAt, + DeletedAt: s.DeletedAt, + } +} + +func sandboxFromModel(m *SandboxModel) *store.Sandbox { + return &store.Sandbox{ + ID: m.ID, + OrgID: m.OrgID, + HostID: m.HostID, + Name: m.Name, + AgentID: m.AgentID, + BaseImage: m.BaseImage, + Bridge: m.Bridge, + TAPDevice: m.TAPDevice, + MACAddress: m.MACAddress, + IPAddress: m.IPAddress, + State: store.SandboxState(m.State), + VCPUs: m.VCPUs, + MemoryMB: m.MemoryMB, + TTLSeconds: m.TTLSeconds, + SourceVM: m.SourceVM, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + DeletedAt: m.DeletedAt, + } +} + +func commandToModel(c *store.Command) *CommandModel { + return &CommandModel{ + ID: c.ID, + SandboxID: c.SandboxID, + Command: c.Command, + Stdout: c.Stdout, + Stderr: c.Stderr, + ExitCode: c.ExitCode, + DurationMS: c.DurationMS, + StartedAt: c.StartedAt, + EndedAt: c.EndedAt, + } +} + +func commandFromModel(m *CommandModel) *store.Command { + return &store.Command{ + ID: m.ID, + SandboxID: m.SandboxID, + Command: m.Command, + Stdout: m.Stdout, + Stderr: m.Stderr, + ExitCode: m.ExitCode, + DurationMS: m.DurationMS, + StartedAt: m.StartedAt, + EndedAt: m.EndedAt, + } +} + +func hostTokenToModel(t *store.HostToken) *HostTokenModel { + return &HostTokenModel{ + ID: t.ID, + OrgID: t.OrgID, + Name: t.Name, + TokenHash: t.TokenHash, + ExpiresAt: t.ExpiresAt, + CreatedAt: t.CreatedAt, + } +} + +func hostTokenFromModel(m *HostTokenModel) *store.HostToken { + return &store.HostToken{ + ID: m.ID, + OrgID: m.OrgID, + Name: m.Name, + TokenHash: m.TokenHash, + ExpiresAt: m.ExpiresAt, + CreatedAt: m.CreatedAt, + } +} + +// --- Host CRUD --- + +func (s *postgresStore) CreateHost(ctx context.Context, host *store.Host) error { + now := time.Now().UTC() + host.CreatedAt = now + host.UpdatedAt = now + if err := s.db.WithContext(ctx).Create(hostToModel(host)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) GetHost(ctx context.Context, hostID string) (*store.Host, error) { + var model HostModel + if err := s.db.WithContext(ctx).Where("id = ?", hostID).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return hostFromModel(&model), nil +} + +func (s *postgresStore) ListHosts(ctx context.Context) ([]store.Host, error) { + var models []HostModel + if err := s.db.WithContext(ctx).Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]store.Host, 0, len(models)) + for i := range models { + out = append(out, *hostFromModel(&models[i])) + } + return out, nil +} + +func (s *postgresStore) ListHostsByOrg(ctx context.Context, orgID string) ([]store.Host, error) { + var models []HostModel + if err := s.db.WithContext(ctx).Where("org_id = ?", orgID).Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]store.Host, 0, len(models)) + for i := range models { + out = append(out, *hostFromModel(&models[i])) + } + return out, nil +} + +func (s *postgresStore) UpdateHost(ctx context.Context, host *store.Host) error { + host.UpdatedAt = time.Now().UTC() + res := s.db.WithContext(ctx).Model(&HostModel{}).Where("id = ?", host.ID). + Updates(map[string]any{ + "org_id": host.OrgID, + "hostname": host.Hostname, + "version": host.Version, + "total_cpus": host.TotalCPUs, + "total_memory_mb": host.TotalMemoryMB, + "total_disk_mb": host.TotalDiskMB, + "available_cpus": host.AvailableCPUs, + "available_memory_mb": host.AvailableMemoryMB, + "available_disk_mb": host.AvailableDiskMB, + "base_images": host.BaseImages, + "source_vms": host.SourceVMs, + "bridges": host.Bridges, + "status": string(host.Status), + "last_heartbeat": host.LastHeartbeat, + "updated_at": host.UpdatedAt, + }) + if err := mapDBError(res.Error); err != nil { + return err + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +func (s *postgresStore) UpdateHostHeartbeat(ctx context.Context, hostID string, availCPUs int32, availMemMB int64, availDiskMB int64) error { + res := s.db.WithContext(ctx). + Model(&HostModel{}). + Where("id = ?", hostID). + Updates(map[string]any{ + "available_cpus": availCPUs, + "available_memory_mb": availMemMB, + "available_disk_mb": availDiskMB, + "status": string(store.HostStatusOnline), + "last_heartbeat": time.Now().UTC(), + "updated_at": time.Now().UTC(), + }) + if err := mapDBError(res.Error); err != nil { + return err + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +// --- Sandbox CRUD --- + +func (s *postgresStore) CreateSandbox(ctx context.Context, sandbox *store.Sandbox) error { + now := time.Now().UTC() + sandbox.CreatedAt = now + sandbox.UpdatedAt = now + if err := s.db.WithContext(ctx).Create(sandboxToModel(sandbox)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) GetSandbox(ctx context.Context, sandboxID string) (*store.Sandbox, error) { + var model SandboxModel + if err := s.db.WithContext(ctx).Where("id = ? AND deleted_at IS NULL", sandboxID).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return sandboxFromModel(&model), nil +} + +func (s *postgresStore) GetSandboxByOrg(ctx context.Context, orgID, sandboxID string) (*store.Sandbox, error) { + var model SandboxModel + if err := s.db.WithContext(ctx).Where("id = ? AND org_id = ? AND deleted_at IS NULL", sandboxID, orgID).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return sandboxFromModel(&model), nil +} + +func (s *postgresStore) ListSandboxes(ctx context.Context) ([]store.Sandbox, error) { + var models []SandboxModel + if err := s.db.WithContext(ctx).Where("deleted_at IS NULL").Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]store.Sandbox, 0, len(models)) + for i := range models { + out = append(out, *sandboxFromModel(&models[i])) + } + return out, nil +} + +func (s *postgresStore) ListSandboxesByOrg(ctx context.Context, orgID string) ([]store.Sandbox, error) { + var models []SandboxModel + if err := s.db.WithContext(ctx).Where("org_id = ? AND deleted_at IS NULL", orgID).Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]store.Sandbox, 0, len(models)) + for i := range models { + out = append(out, *sandboxFromModel(&models[i])) + } + return out, nil +} + +func (s *postgresStore) UpdateSandbox(ctx context.Context, sandbox *store.Sandbox) error { + sandbox.UpdatedAt = time.Now().UTC() + res := s.db.WithContext(ctx).Model(&SandboxModel{}).Where("id = ? AND deleted_at IS NULL", sandbox.ID). + Updates(map[string]any{ + "state": string(sandbox.State), + "ip_address": sandbox.IPAddress, + "tap_device": sandbox.TAPDevice, + "mac_address": sandbox.MACAddress, + "bridge": sandbox.Bridge, + "updated_at": sandbox.UpdatedAt, + }) + if err := mapDBError(res.Error); err != nil { + return err + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +func (s *postgresStore) DeleteSandbox(ctx context.Context, sandboxID string) error { + now := time.Now().UTC() + res := s.db.WithContext(ctx). + Model(&SandboxModel{}). + Where("id = ? AND deleted_at IS NULL", sandboxID). + Updates(map[string]any{ + "deleted_at": &now, + "state": string(store.SandboxStateDestroyed), + "updated_at": now, + }) + if err := mapDBError(res.Error); err != nil { + return err + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +func (s *postgresStore) GetSandboxesByHostID(ctx context.Context, hostID string) ([]store.Sandbox, error) { + var models []SandboxModel + if err := s.db.WithContext(ctx).Where("host_id = ? AND deleted_at IS NULL", hostID).Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]store.Sandbox, 0, len(models)) + for i := range models { + out = append(out, *sandboxFromModel(&models[i])) + } + return out, nil +} + +func (s *postgresStore) CountSandboxesByHostIDs(ctx context.Context, hostIDs []string) (map[string]int, error) { + if len(hostIDs) == 0 { + return map[string]int{}, nil + } + type row struct { + HostID string + Count int + } + var rows []row + err := s.db.WithContext(ctx). + Model(&SandboxModel{}). + Select("host_id, COUNT(*) as count"). + Where("host_id IN ? AND deleted_at IS NULL", hostIDs). + Group("host_id"). + Find(&rows).Error + if err != nil { + return nil, mapDBError(err) + } + result := make(map[string]int, len(rows)) + for _, r := range rows { + result[r.HostID] = r.Count + } + return result, nil +} + +func (s *postgresStore) ListExpiredSandboxes(ctx context.Context, defaultTTL time.Duration) ([]store.Sandbox, error) { + now := time.Now().UTC() + query := s.db.WithContext(ctx). + Where("deleted_at IS NULL"). + Where("state IN ?", []string{string(store.SandboxStateRunning), string(store.SandboxStateStopped)}) + + if defaultTTL > 0 { + defaultTTLSeconds := int32(defaultTTL.Seconds()) + query = query.Where( + "(ttl_seconds > 0 AND created_at + (ttl_seconds || ' seconds')::interval < ?) "+ + "OR (ttl_seconds = 0 AND created_at + (? || ' seconds')::interval < ?)", + now, defaultTTLSeconds, now, + ) + } else { + query = query.Where( + "ttl_seconds > 0 AND created_at + (ttl_seconds || ' seconds')::interval < ?", + now, + ) + } + + var models []SandboxModel + if err := query.Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]store.Sandbox, 0, len(models)) + for i := range models { + out = append(out, *sandboxFromModel(&models[i])) + } + return out, nil +} + +// --- Command CRUD --- + +func (s *postgresStore) CreateCommand(ctx context.Context, cmd *store.Command) error { + if err := s.db.WithContext(ctx).Create(commandToModel(cmd)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) ListSandboxCommands(ctx context.Context, sandboxID string) ([]store.Command, error) { + var models []CommandModel + if err := s.db.WithContext(ctx). + Where("sandbox_id = ?", sandboxID). + Order("started_at ASC"). + Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]store.Command, 0, len(models)) + for i := range models { + out = append(out, *commandFromModel(&models[i])) + } + return out, nil +} + +// --- SourceHost CRUD --- + +func (s *postgresStore) CreateSourceHost(ctx context.Context, sh *store.SourceHost) error { + sh.CreatedAt = time.Now().UTC() + sh.UpdatedAt = sh.CreatedAt + if err := s.db.WithContext(ctx).Create(s.sourceHostToModel(sh)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) GetSourceHost(ctx context.Context, id string) (*store.SourceHost, error) { + var model SourceHostModel + if err := s.db.WithContext(ctx).Where("id = ?", id).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return s.sourceHostFromModel(&model), nil +} + +func (s *postgresStore) ListSourceHostsByOrg(ctx context.Context, orgID string) ([]*store.SourceHost, error) { + var models []SourceHostModel + if err := s.db.WithContext(ctx).Where("org_id = ?", orgID).Order("created_at DESC").Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]*store.SourceHost, 0, len(models)) + for i := range models { + out = append(out, s.sourceHostFromModel(&models[i])) + } + return out, nil +} + +func (s *postgresStore) DeleteSourceHost(ctx context.Context, id string) error { + res := s.db.WithContext(ctx).Where("id = ?", id).Delete(&SourceHostModel{}) + if res.Error != nil { + return mapDBError(res.Error) + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +func (s *postgresStore) sourceHostToModel(sh *store.SourceHost) *SourceHostModel { + m := &SourceHostModel{ + ID: sh.ID, + OrgID: sh.OrgID, + Name: sh.Name, + Hostname: sh.Hostname, + Type: sh.Type, + SSHUser: sh.SSHUser, + SSHPort: sh.SSHPort, + SSHIdentityFile: sh.SSHIdentityFile, + ProxmoxHost: sh.ProxmoxHost, + ProxmoxTokenID: sh.ProxmoxTokenID, + ProxmoxSecret: sh.ProxmoxSecret, + ProxmoxNode: sh.ProxmoxNode, + ProxmoxVerifySSL: sh.ProxmoxVerifySSL, + VMs: sh.VMs, + CreatedAt: sh.CreatedAt, + UpdatedAt: sh.UpdatedAt, + } + if len(s.encryptionKey) > 0 { + if enc, err := crypto.Encrypt(s.encryptionKey, sh.ProxmoxTokenID); err == nil { + m.ProxmoxTokenID = enc + } + if enc, err := crypto.Encrypt(s.encryptionKey, sh.ProxmoxSecret); err == nil { + m.ProxmoxSecret = enc + } + } + return m +} + +func (s *postgresStore) sourceHostFromModel(m *SourceHostModel) *store.SourceHost { + sh := &store.SourceHost{ + ID: m.ID, + OrgID: m.OrgID, + Name: m.Name, + Hostname: m.Hostname, + Type: m.Type, + SSHUser: m.SSHUser, + SSHPort: m.SSHPort, + SSHIdentityFile: m.SSHIdentityFile, + ProxmoxHost: m.ProxmoxHost, + ProxmoxTokenID: m.ProxmoxTokenID, + ProxmoxSecret: m.ProxmoxSecret, + ProxmoxNode: m.ProxmoxNode, + ProxmoxVerifySSL: m.ProxmoxVerifySSL, + VMs: m.VMs, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + } + if len(s.encryptionKey) > 0 { + if dec, err := crypto.Decrypt(s.encryptionKey, m.ProxmoxTokenID); err == nil { + sh.ProxmoxTokenID = dec + } + if dec, err := crypto.Decrypt(s.encryptionKey, m.ProxmoxSecret); err == nil { + sh.ProxmoxSecret = dec + } + } + return sh +} + +// --- HostToken CRUD --- + +func (s *postgresStore) CreateHostToken(ctx context.Context, token *store.HostToken) error { + token.CreatedAt = time.Now().UTC() + if err := s.db.WithContext(ctx).Create(hostTokenToModel(token)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) GetHostTokenByHash(ctx context.Context, hash string) (*store.HostToken, error) { + var model HostTokenModel + if err := s.db.WithContext(ctx).Where("token_hash = ? AND (expires_at IS NULL OR expires_at > ?)", hash, time.Now()).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return hostTokenFromModel(&model), nil +} + +func (s *postgresStore) ListHostTokensByOrg(ctx context.Context, orgID string) ([]store.HostToken, error) { + var models []HostTokenModel + if err := s.db.WithContext(ctx).Where("org_id = ?", orgID).Order("created_at DESC").Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]store.HostToken, 0, len(models)) + for i := range models { + out = append(out, *hostTokenFromModel(&models[i])) + } + return out, nil +} + +func (s *postgresStore) DeleteHostToken(ctx context.Context, orgID, id string) error { + res := s.db.WithContext(ctx).Where("id = ? AND org_id = ?", id, orgID).Delete(&HostTokenModel{}) + if res.Error != nil { + return mapDBError(res.Error) + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +/* +// --- Agent Conversation converters --- + +func convToModel(c *store.AgentConversation) *AgentConversationModel { + return &AgentConversationModel{ + ID: c.ID, + OrgID: c.OrgID, + UserID: c.UserID, + Title: c.Title, + Model: c.Model, + CreatedAt: c.CreatedAt, + UpdatedAt: c.UpdatedAt, + } +} + +func convFromModel(m *AgentConversationModel) *store.AgentConversation { + return &store.AgentConversation{ + ID: m.ID, + OrgID: m.OrgID, + UserID: m.UserID, + Title: m.Title, + Model: m.Model, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + } +} + +func msgToModel(m *store.AgentMessage) *AgentMessageModel { + return &AgentMessageModel{ + ID: m.ID, + ConversationID: m.ConversationID, + Role: string(m.Role), + Content: m.Content, + ToolCalls: m.ToolCalls, + ToolCallID: m.ToolCallID, + TokensInput: m.TokensInput, + TokensOutput: m.TokensOutput, + Model: m.Model, + CreatedAt: m.CreatedAt, + } +} + +func msgFromModel(m *AgentMessageModel) *store.AgentMessage { + return &store.AgentMessage{ + ID: m.ID, + ConversationID: m.ConversationID, + Role: store.MessageRole(m.Role), + Content: m.Content, + ToolCalls: m.ToolCalls, + ToolCallID: m.ToolCallID, + TokensInput: m.TokensInput, + TokensOutput: m.TokensOutput, + Model: m.Model, + CreatedAt: m.CreatedAt, + } +} + +func pbToModel(p *store.Playbook) *PlaybookModel { + return &PlaybookModel{ + ID: p.ID, + OrgID: p.OrgID, + Name: p.Name, + Description: p.Description, + CreatedAt: p.CreatedAt, + UpdatedAt: p.UpdatedAt, + } +} + +func pbFromModel(m *PlaybookModel) *store.Playbook { + return &store.Playbook{ + ID: m.ID, + OrgID: m.OrgID, + Name: m.Name, + Description: m.Description, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + } +} + +func taskToModel(t *store.PlaybookTask) *PlaybookTaskModel { + return &PlaybookTaskModel{ + ID: t.ID, + PlaybookID: t.PlaybookID, + SortOrder: t.SortOrder, + Name: t.Name, + Module: t.Module, + Params: t.Params, + CreatedAt: t.CreatedAt, + UpdatedAt: t.UpdatedAt, + } +} + +func taskFromModel(m *PlaybookTaskModel) *store.PlaybookTask { + return &store.PlaybookTask{ + ID: m.ID, + PlaybookID: m.PlaybookID, + SortOrder: m.SortOrder, + Name: m.Name, + Module: m.Module, + Params: m.Params, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + } +} +*/ + +/* +// --- Agent Conversation CRUD --- + +func (s *postgresStore) CreateAgentConversation(ctx context.Context, conv *store.AgentConversation) error { + now := time.Now().UTC() + conv.CreatedAt = now + conv.UpdatedAt = now + if err := s.db.WithContext(ctx).Create(convToModel(conv)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) GetAgentConversation(ctx context.Context, id string) (*store.AgentConversation, error) { + var model AgentConversationModel + if err := s.db.WithContext(ctx).Where("id = ?", id).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return convFromModel(&model), nil +} + +func (s *postgresStore) ListAgentConversationsByOrg(ctx context.Context, orgID string) ([]*store.AgentConversation, error) { + var models []AgentConversationModel + if err := s.db.WithContext(ctx).Where("org_id = ?", orgID).Order("updated_at DESC").Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]*store.AgentConversation, 0, len(models)) + for i := range models { + out = append(out, convFromModel(&models[i])) + } + return out, nil +} + +func (s *postgresStore) DeleteAgentConversation(ctx context.Context, id string) error { + return s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + if err := tx.Where("conversation_id = ?", id).Delete(&AgentMessageModel{}).Error; err != nil { + return mapDBError(err) + } + res := tx.Where("id = ?", id).Delete(&AgentConversationModel{}) + if res.Error != nil { + return mapDBError(res.Error) + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil + }) +} + +// --- Agent Message CRUD --- + +func (s *postgresStore) CreateAgentMessage(ctx context.Context, msg *store.AgentMessage) error { + msg.CreatedAt = time.Now().UTC() + if err := s.db.WithContext(ctx).Create(msgToModel(msg)).Error; err != nil { + return mapDBError(err) + } + // Update conversation updated_at + if err := s.db.WithContext(ctx).Model(&AgentConversationModel{}). + Where("id = ?", msg.ConversationID). + Update("updated_at", time.Now().UTC()).Error; err != nil { + slog.Warn("failed to update conversation updated_at", + "conversation_id", msg.ConversationID, "error", err) + } + return nil +} + +func (s *postgresStore) ListAgentMessages(ctx context.Context, conversationID string) ([]*store.AgentMessage, error) { + var models []AgentMessageModel + if err := s.db.WithContext(ctx).Where("conversation_id = ?", conversationID).Order("created_at ASC").Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]*store.AgentMessage, 0, len(models)) + for i := range models { + out = append(out, msgFromModel(&models[i])) + } + return out, nil +} + +// --- Playbook CRUD --- + +func (s *postgresStore) CreatePlaybook(ctx context.Context, pb *store.Playbook) error { + now := time.Now().UTC() + pb.CreatedAt = now + pb.UpdatedAt = now + if err := s.db.WithContext(ctx).Create(pbToModel(pb)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) GetPlaybook(ctx context.Context, id string) (*store.Playbook, error) { + var model PlaybookModel + if err := s.db.WithContext(ctx).Where("id = ?", id).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return pbFromModel(&model), nil +} + +func (s *postgresStore) ListPlaybooksByOrg(ctx context.Context, orgID string) ([]*store.Playbook, error) { + var models []PlaybookModel + if err := s.db.WithContext(ctx).Where("org_id = ?", orgID).Order("created_at DESC").Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]*store.Playbook, 0, len(models)) + for i := range models { + out = append(out, pbFromModel(&models[i])) + } + return out, nil +} + +func (s *postgresStore) UpdatePlaybook(ctx context.Context, pb *store.Playbook) error { + pb.UpdatedAt = time.Now().UTC() + res := s.db.WithContext(ctx).Model(&PlaybookModel{}).Where("id = ?", pb.ID). + Updates(map[string]any{ + "name": pb.Name, + "description": pb.Description, + "updated_at": pb.UpdatedAt, + }) + if err := mapDBError(res.Error); err != nil { + return err + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +func (s *postgresStore) DeletePlaybook(ctx context.Context, id string) error { + return s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + if err := tx.Where("playbook_id = ?", id).Delete(&PlaybookTaskModel{}).Error; err != nil { + return mapDBError(err) + } + res := tx.Where("id = ?", id).Delete(&PlaybookModel{}) + if res.Error != nil { + return mapDBError(res.Error) + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil + }) +} + +// --- Playbook Task CRUD --- + +func (s *postgresStore) CreatePlaybookTask(ctx context.Context, task *store.PlaybookTask) error { + now := time.Now().UTC() + task.CreatedAt = now + task.UpdatedAt = now + + return s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + var maxOrder *int + if err := tx.Model(&PlaybookTaskModel{}). + Where("playbook_id = ?", task.PlaybookID). + Select("MAX(sort_order)"). + Scan(&maxOrder).Error; err != nil { + return mapDBError(err) + } + if maxOrder != nil { + task.SortOrder = *maxOrder + 1 + } else { + task.SortOrder = 0 + } + if err := tx.Create(taskToModel(task)).Error; err != nil { + return mapDBError(err) + } + return nil + }) +} + +func (s *postgresStore) GetPlaybookTask(ctx context.Context, id string) (*store.PlaybookTask, error) { + var model PlaybookTaskModel + if err := s.db.WithContext(ctx).Where("id = ?", id).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return taskFromModel(&model), nil +} + +func (s *postgresStore) ListPlaybookTasks(ctx context.Context, playbookID string) ([]*store.PlaybookTask, error) { + var models []PlaybookTaskModel + if err := s.db.WithContext(ctx).Where("playbook_id = ?", playbookID).Order("sort_order ASC").Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]*store.PlaybookTask, 0, len(models)) + for i := range models { + out = append(out, taskFromModel(&models[i])) + } + return out, nil +} + +func (s *postgresStore) UpdatePlaybookTask(ctx context.Context, task *store.PlaybookTask) error { + task.UpdatedAt = time.Now().UTC() + res := s.db.WithContext(ctx).Model(&PlaybookTaskModel{}).Where("id = ?", task.ID). + Updates(map[string]any{ + "name": task.Name, + "module": task.Module, + "params": task.Params, + "sort_order": task.SortOrder, + "updated_at": task.UpdatedAt, + }) + if err := mapDBError(res.Error); err != nil { + return err + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +func (s *postgresStore) DeletePlaybookTask(ctx context.Context, id string) error { + res := s.db.WithContext(ctx).Where("id = ?", id).Delete(&PlaybookTaskModel{}) + if res.Error != nil { + return mapDBError(res.Error) + } + if res.RowsAffected == 0 { + return store.ErrNotFound + } + return nil +} + +func (s *postgresStore) ReorderPlaybookTasks(ctx context.Context, playbookID string, taskIDs []string) error { + return s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { + for i, id := range taskIDs { + res := tx.Model(&PlaybookTaskModel{}).Where("id = ? AND playbook_id = ?", id, playbookID). + Update("sort_order", i) + if res.Error != nil { + return mapDBError(res.Error) + } + } + return nil + }) +} +*/ + +// --- Billing helpers --- + +func (s *postgresStore) GetOrganizationByStripeCustomerID(ctx context.Context, customerID string) (*store.Organization, error) { + var model OrganizationModel + if err := s.db.WithContext(ctx).Where("stripe_customer_id = ? AND deleted_at IS NULL", customerID).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return orgFromModel(&model), nil +} + +// --- Model Meter / Billing --- + +func modelMeterToModel(m *store.ModelMeter) *ModelMeterModel { + return &ModelMeterModel{ + ID: m.ID, + ModelID: m.ModelID, + StripeProductID: m.StripeProductID, + StripeInputMeterID: m.StripeInputMeterID, + StripeOutputMeterID: m.StripeOutputMeterID, + StripeInputPriceID: m.StripeInputPriceID, + StripeOutputPriceID: m.StripeOutputPriceID, + InputEventName: m.InputEventName, + OutputEventName: m.OutputEventName, + InputCostPerToken: m.InputCostPerToken, + OutputCostPerToken: m.OutputCostPerToken, + CreatedAt: m.CreatedAt, + } +} + +func modelMeterFromModel(m *ModelMeterModel) *store.ModelMeter { + return &store.ModelMeter{ + ID: m.ID, + ModelID: m.ModelID, + StripeProductID: m.StripeProductID, + StripeInputMeterID: m.StripeInputMeterID, + StripeOutputMeterID: m.StripeOutputMeterID, + StripeInputPriceID: m.StripeInputPriceID, + StripeOutputPriceID: m.StripeOutputPriceID, + InputEventName: m.InputEventName, + OutputEventName: m.OutputEventName, + InputCostPerToken: m.InputCostPerToken, + OutputCostPerToken: m.OutputCostPerToken, + CreatedAt: m.CreatedAt, + } +} + +func orgModelSubToModel(s *store.OrgModelSubscription) *OrgModelSubscriptionModel { + return &OrgModelSubscriptionModel{ + ID: s.ID, + OrgID: s.OrgID, + ModelID: s.ModelID, + StripeInputSubItemID: s.StripeInputSubItemID, + StripeOutputSubItemID: s.StripeOutputSubItemID, + CreatedAt: s.CreatedAt, + } +} + +func orgModelSubFromModel(m *OrgModelSubscriptionModel) *store.OrgModelSubscription { + return &store.OrgModelSubscription{ + ID: m.ID, + OrgID: m.OrgID, + ModelID: m.ModelID, + StripeInputSubItemID: m.StripeInputSubItemID, + StripeOutputSubItemID: m.StripeOutputSubItemID, + CreatedAt: m.CreatedAt, + } +} + +func (s *postgresStore) GetModelMeter(ctx context.Context, modelID string) (*store.ModelMeter, error) { + var model ModelMeterModel + if err := s.db.WithContext(ctx).Where("model_id = ?", modelID).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return modelMeterFromModel(&model), nil +} + +func (s *postgresStore) CreateModelMeter(ctx context.Context, m *store.ModelMeter) error { + m.CreatedAt = time.Now().UTC() + if err := s.db.WithContext(ctx).Create(modelMeterToModel(m)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) GetOrgModelSubscription(ctx context.Context, orgID, modelID string) (*store.OrgModelSubscription, error) { + var model OrgModelSubscriptionModel + if err := s.db.WithContext(ctx).Where("org_id = ? AND model_id = ?", orgID, modelID).First(&model).Error; err != nil { + return nil, mapDBError(err) + } + return orgModelSubFromModel(&model), nil +} + +func (s *postgresStore) CreateOrgModelSubscription(ctx context.Context, sub *store.OrgModelSubscription) error { + sub.CreatedAt = time.Now().UTC() + if err := s.db.WithContext(ctx).Create(orgModelSubToModel(sub)).Error; err != nil { + return mapDBError(err) + } + return nil +} + +func (s *postgresStore) SumTokenUsage(ctx context.Context, orgID string, from, to time.Time) (float64, error) { + var total float64 + err := s.db.WithContext(ctx). + Model(&UsageRecordModel{}). + Where("org_id = ? AND recorded_at >= ? AND recorded_at <= ? AND resource_type = ?", orgID, from, to, "llm_token"). + Select("COALESCE(SUM(quantity), 0)"). + Scan(&total).Error + if err != nil { + return 0, mapDBError(err) + } + return total, nil +} + +func (s *postgresStore) ListActiveSubscriptions(ctx context.Context) ([]*store.Subscription, error) { + var models []SubscriptionModel + if err := s.db.WithContext(ctx).Where("status = ?", string(store.SubStatusActive)).Find(&models).Error; err != nil { + return nil, mapDBError(err) + } + out := make([]*store.Subscription, 0, len(models)) + for i := range models { + out = append(out, subFromModel(&models[i])) + } + return out, nil +} + +// --- Advisory Locks --- + +func (s *postgresStore) AcquireAdvisoryLock(ctx context.Context, key int64) error { + return s.db.WithContext(ctx).Exec("SELECT pg_advisory_lock(?)", key).Error +} + +func (s *postgresStore) ReleaseAdvisoryLock(ctx context.Context, key int64) error { + return s.db.WithContext(ctx).Exec("SELECT pg_advisory_unlock(?)", key).Error +} diff --git a/api/internal/store/store.go b/api/internal/store/store.go new file mode 100644 index 00000000..326ace2d --- /dev/null +++ b/api/internal/store/store.go @@ -0,0 +1,571 @@ +package store + +import ( + "context" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "time" +) + +// Sentinel errors for store implementations. +var ( + ErrNotFound = errors.New("store: not found") + ErrAlreadyExists = errors.New("store: already exists") + ErrConflict = errors.New("store: conflict") + ErrInvalid = errors.New("store: invalid data") +) + +type Config struct { + DatabaseURL string `json:"database_url"` + MaxOpenConns int `json:"max_open_conns"` + MaxIdleConns int `json:"max_idle_conns"` + ConnMaxLifetime time.Duration `json:"conn_max_lifetime"` + AutoMigrate bool `json:"auto_migrate"` + EncryptionKey string `json:"-"` +} + +type ListOptions struct { + Limit int + Offset int + OrderBy string + Asc bool +} + +// OrgRole enumerates roles within an organization. +type OrgRole string + +const ( + OrgRoleOwner OrgRole = "owner" + OrgRoleAdmin OrgRole = "admin" + OrgRoleMember OrgRole = "member" +) + +// SubscriptionPlan enumerates billing plans. +type SubscriptionPlan string + +const ( + PlanFree SubscriptionPlan = "free" + PlanUsageBased SubscriptionPlan = "usage_based" +) + +// SubscriptionStatus enumerates subscription statuses. +type SubscriptionStatus string + +const ( + SubStatusActive SubscriptionStatus = "active" + SubStatusPastDue SubscriptionStatus = "past_due" + SubStatusCancelled SubscriptionStatus = "cancelled" +) + +// User represents a registered user. +type User struct { + ID string `json:"id"` + Email string `json:"email"` + DisplayName string `json:"display_name"` + AvatarURL string `json:"avatar_url,omitempty"` + PasswordHash string `json:"-"` + EmailVerified bool `json:"email_verified"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + DeletedAt *time.Time `json:"deleted_at,omitempty"` +} + +// OAuthAccount links an external OAuth provider identity to a user. +type OAuthAccount struct { + ID string `json:"id"` + UserID string `json:"user_id"` + Provider string `json:"provider"` + ProviderID string `json:"provider_id"` + Email string `json:"email"` + AccessToken string `json:"-"` + RefreshToken string `json:"-"` + TokenExpiry time.Time `json:"token_expiry"` + CreatedAt time.Time `json:"created_at"` +} + +// Session represents an active user session. +type Session struct { + ID string `json:"id"` + UserID string `json:"user_id"` + IPAddress string `json:"ip_address"` + UserAgent string `json:"user_agent"` + ExpiresAt time.Time `json:"expires_at"` + CreatedAt time.Time `json:"created_at"` +} + +// Organization represents a team/org that owns resources. +type Organization struct { + ID string `json:"id"` + Name string `json:"name"` + Slug string `json:"slug"` + OwnerID string `json:"owner_id"` + StripeCustomerID string `json:"stripe_customer_id,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + DeletedAt *time.Time `json:"deleted_at,omitempty"` +} + +// OrgMember represents a user's membership in an organization. +type OrgMember struct { + ID string `json:"id"` + OrgID string `json:"org_id"` + UserID string `json:"user_id"` + Role OrgRole `json:"role"` + CreatedAt time.Time `json:"created_at"` +} + +// Subscription tracks an org's billing plan. +type Subscription struct { + ID string `json:"id"` + OrgID string `json:"org_id"` + Plan SubscriptionPlan `json:"plan"` + StripeSubscriptionID string `json:"stripe_subscription_id,omitempty"` + StripePriceID string `json:"stripe_price_id,omitempty"` + Status SubscriptionStatus `json:"status"` + CurrentPeriodStart time.Time `json:"current_period_start"` + CurrentPeriodEnd time.Time `json:"current_period_end"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// UsageRecord tracks resource consumption. +type UsageRecord struct { + ID string `json:"id"` + OrgID string `json:"org_id"` + ResourceType string `json:"resource_type"` + Quantity float64 `json:"quantity"` + RecordedAt time.Time `json:"recorded_at"` + MetadataJSON string `json:"metadata_json,omitempty"` +} + +// --------------------------------------------------------------------------- +// Infrastructure types (from control-plane) +// --------------------------------------------------------------------------- + +// HostStatus represents the connectivity status of a sandbox host. +type HostStatus string + +const ( + HostStatusOnline HostStatus = "ONLINE" + HostStatusOffline HostStatus = "OFFLINE" +) + +// SandboxState represents the lifecycle state of a sandbox. +type SandboxState string + +const ( + SandboxStateCreating SandboxState = "CREATING" + SandboxStateRunning SandboxState = "RUNNING" + SandboxStateStopped SandboxState = "STOPPED" + SandboxStateDestroyed SandboxState = "DESTROYED" + SandboxStateError SandboxState = "ERROR" +) + +// StringSlice is a JSON-serialized []string for use as a GORM column type. +type StringSlice []string + +func (s StringSlice) Value() (driver.Value, error) { + if s == nil { + return "[]", nil + } + b, err := json.Marshal(s) + if err != nil { + return nil, fmt.Errorf("marshal StringSlice: %w", err) + } + return string(b), nil +} + +func (s *StringSlice) Scan(value interface{}) error { + if value == nil { + *s = StringSlice{} + return nil + } + var bytes []byte + switch v := value.(type) { + case string: + bytes = []byte(v) + case []byte: + bytes = v + default: + return fmt.Errorf("unsupported type for StringSlice: %T", value) + } + return json.Unmarshal(bytes, s) +} + +// SourceVMJSON represents a source VM entry stored as JSON in the host record. +type SourceVMJSON struct { + Name string `json:"name"` + State string `json:"state"` + IPAddress string `json:"ip_address"` + Prepared bool `json:"prepared"` +} + +// SourceVMSlice is a JSON-serialized []SourceVMJSON for use as a GORM column type. +type SourceVMSlice []SourceVMJSON + +func (s SourceVMSlice) Value() (driver.Value, error) { + if s == nil { + return "[]", nil + } + b, err := json.Marshal(s) + if err != nil { + return nil, fmt.Errorf("marshal SourceVMSlice: %w", err) + } + return string(b), nil +} + +func (s *SourceVMSlice) Scan(value interface{}) error { + if value == nil { + *s = SourceVMSlice{} + return nil + } + var bytes []byte + switch v := value.(type) { + case string: + bytes = []byte(v) + case []byte: + bytes = v + default: + return fmt.Errorf("unsupported type for SourceVMSlice: %T", value) + } + return json.Unmarshal(bytes, s) +} + +// BridgeJSON represents a network bridge entry stored as JSON in the host record. +type BridgeJSON struct { + Name string `json:"name"` + Subnet string `json:"subnet"` +} + +// BridgeSlice is a JSON-serialized []BridgeJSON for use as a GORM column type. +type BridgeSlice []BridgeJSON + +func (s BridgeSlice) Value() (driver.Value, error) { + if s == nil { + return "[]", nil + } + b, err := json.Marshal(s) + if err != nil { + return nil, fmt.Errorf("marshal BridgeSlice: %w", err) + } + return string(b), nil +} + +func (s *BridgeSlice) Scan(value interface{}) error { + if value == nil { + *s = BridgeSlice{} + return nil + } + var bytes []byte + switch v := value.(type) { + case string: + bytes = []byte(v) + case []byte: + bytes = v + default: + return fmt.Errorf("unsupported type for BridgeSlice: %T", value) + } + return json.Unmarshal(bytes, s) +} + +// Host represents a sandbox host machine registered with the control plane. +type Host struct { + ID string `json:"id"` + OrgID string `json:"org_id"` + Hostname string `json:"hostname"` + Version string `json:"version"` + TotalCPUs int32 `json:"total_cpus"` + TotalMemoryMB int64 `json:"total_memory_mb"` + TotalDiskMB int64 `json:"total_disk_mb"` + AvailableCPUs int32 `json:"available_cpus"` + AvailableMemoryMB int64 `json:"available_memory_mb"` + AvailableDiskMB int64 `json:"available_disk_mb"` + BaseImages StringSlice `json:"base_images"` + SourceVMs SourceVMSlice `json:"source_vms"` + Bridges BridgeSlice `json:"bridges"` + Status HostStatus `json:"status"` + LastHeartbeat time.Time `json:"last_heartbeat"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Sandbox represents a VM sandbox managed by the control plane. +type Sandbox struct { + ID string `json:"id"` + OrgID string `json:"org_id"` + HostID string `json:"host_id"` + Name string `json:"name"` + AgentID string `json:"agent_id"` + BaseImage string `json:"base_image"` + Bridge string `json:"bridge"` + TAPDevice string `json:"tap_device"` + MACAddress string `json:"mac_address"` + IPAddress string `json:"ip_address"` + State SandboxState `json:"state"` + VCPUs int32 `json:"vcpus"` + MemoryMB int32 `json:"memory_mb"` + TTLSeconds int32 `json:"ttl_seconds"` + SourceVM string `json:"source_vm"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + DeletedAt *time.Time `json:"deleted_at,omitempty"` +} + +// Command represents a command executed within a sandbox. +type Command struct { + ID string `json:"id"` + SandboxID string `json:"sandbox_id"` + Command string `json:"command"` + Stdout string `json:"stdout"` + Stderr string `json:"stderr"` + ExitCode int32 `json:"exit_code"` + DurationMS int64 `json:"duration_ms"` + StartedAt time.Time `json:"started_at"` + EndedAt time.Time `json:"ended_at"` +} + +// Agent conversation and playbook types - commented out, not yet ready for integration. +/* +// AgentConversation represents a chat conversation with the AI agent. +type AgentConversation struct { + ID string `json:"id"` + OrgID string `json:"org_id"` + UserID string `json:"user_id"` + Title string `json:"title"` + Model string `json:"model"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// MessageRole enumerates agent message roles. +type MessageRole string + +const ( + MessageRoleUser MessageRole = "user" + MessageRoleAssistant MessageRole = "assistant" + MessageRoleTool MessageRole = "tool" +) + +// AgentMessage represents a single message in an agent conversation. +type AgentMessage struct { + ID string `json:"id"` + ConversationID string `json:"conversation_id"` + Role MessageRole `json:"role"` + Content string `json:"content"` + ToolCalls string `json:"tool_calls,omitempty"` + ToolCallID string `json:"tool_call_id,omitempty"` + TokensInput int `json:"tokens_input,omitempty"` + TokensOutput int `json:"tokens_output,omitempty"` + Model string `json:"model,omitempty"` + CreatedAt time.Time `json:"created_at"` +} + +// Playbook represents an Ansible-style playbook. +type Playbook struct { + ID string `json:"id"` + OrgID string `json:"org_id"` + Name string `json:"name"` + Description string `json:"description"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// PlaybookTask represents a single task within a playbook. +type PlaybookTask struct { + ID string `json:"id"` + PlaybookID string `json:"playbook_id"` + SortOrder int `json:"sort_order"` + Name string `json:"name"` + Module string `json:"module"` + Params string `json:"params"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} +*/ + +// ModelMeter tracks Stripe meter/price objects for a specific LLM model. +type ModelMeter struct { + ID string `json:"id"` + ModelID string `json:"model_id"` + StripeProductID string `json:"stripe_product_id"` + StripeInputMeterID string `json:"stripe_input_meter_id"` + StripeOutputMeterID string `json:"stripe_output_meter_id"` + StripeInputPriceID string `json:"stripe_input_price_id"` + StripeOutputPriceID string `json:"stripe_output_price_id"` + InputEventName string `json:"input_event_name"` + OutputEventName string `json:"output_event_name"` + InputCostPerToken float64 `json:"input_cost_per_token"` + OutputCostPerToken float64 `json:"output_cost_per_token"` + CreatedAt time.Time `json:"created_at"` +} + +// OrgModelSubscription links an org's Stripe subscription items for a specific model. +type OrgModelSubscription struct { + ID string `json:"id"` + OrgID string `json:"org_id"` + ModelID string `json:"model_id"` + StripeInputSubItemID string `json:"stripe_input_sub_item_id"` + StripeOutputSubItemID string `json:"stripe_output_sub_item_id"` + CreatedAt time.Time `json:"created_at"` +} + +// SourceHost represents a confirmed source host that can be used for snapshot-based sandboxes. +type SourceHost struct { + ID string `json:"id"` + OrgID string `json:"org_id"` + Name string `json:"name"` + Hostname string `json:"hostname"` + Type string `json:"type"` // "libvirt" or "proxmox" + SSHUser string `json:"ssh_user"` + SSHPort int `json:"ssh_port"` + SSHIdentityFile string `json:"ssh_identity_file"` + ProxmoxHost string `json:"proxmox_host,omitempty"` + ProxmoxTokenID string `json:"proxmox_token_id,omitempty"` + ProxmoxSecret string `json:"-"` + ProxmoxNode string `json:"proxmox_node,omitempty"` + ProxmoxVerifySSL bool `json:"proxmox_verify_ssl,omitempty"` + VMs StringSlice `json:"vms"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// HostToken is a bearer token that a sandbox host uses to authenticate its +// gRPC connection. Tokens are scoped to an organization. +type HostToken struct { + ID string `json:"id"` + OrgID string `json:"org_id"` + Name string `json:"name"` + TokenHash string `json:"-"` + ExpiresAt *time.Time `json:"expires_at,omitempty"` + CreatedAt time.Time `json:"created_at"` +} + +// DataStore declares data operations. +type DataStore interface { + // User + CreateUser(ctx context.Context, u *User) error + GetUser(ctx context.Context, id string) (*User, error) + GetUserByEmail(ctx context.Context, email string) (*User, error) + UpdateUser(ctx context.Context, u *User) error + + // OAuth + CreateOAuthAccount(ctx context.Context, oa *OAuthAccount) error + GetOAuthAccount(ctx context.Context, provider, providerID string) (*OAuthAccount, error) + GetOAuthAccountsByUser(ctx context.Context, userID string) ([]*OAuthAccount, error) + + // Session + CreateSession(ctx context.Context, s *Session) error + GetSession(ctx context.Context, id string) (*Session, error) + DeleteSession(ctx context.Context, id string) error + DeleteExpiredSessions(ctx context.Context) error + + // Organization + CreateOrganization(ctx context.Context, org *Organization) error + GetOrganization(ctx context.Context, id string) (*Organization, error) + GetOrganizationBySlug(ctx context.Context, slug string) (*Organization, error) + ListOrganizationsByUser(ctx context.Context, userID string) ([]*Organization, error) + UpdateOrganization(ctx context.Context, org *Organization) error + DeleteOrganization(ctx context.Context, id string) error + + // OrgMember + CreateOrgMember(ctx context.Context, m *OrgMember) error + GetOrgMember(ctx context.Context, orgID, userID string) (*OrgMember, error) + GetOrgMemberByID(ctx context.Context, orgID, memberID string) (*OrgMember, error) + ListOrgMembers(ctx context.Context, orgID string) ([]*OrgMember, error) + DeleteOrgMember(ctx context.Context, orgID, id string) error + + // Subscription + CreateSubscription(ctx context.Context, sub *Subscription) error + GetSubscriptionByOrg(ctx context.Context, orgID string) (*Subscription, error) + UpdateSubscription(ctx context.Context, sub *Subscription) error + GetSubscriptionByStripeID(ctx context.Context, stripeSubID string) (*Subscription, error) + + // Usage + CreateUsageRecord(ctx context.Context, rec *UsageRecord) error + ListUsageRecords(ctx context.Context, orgID string, from, to time.Time) ([]*UsageRecord, error) + + // Host + CreateHost(ctx context.Context, host *Host) error + GetHost(ctx context.Context, hostID string) (*Host, error) + ListHosts(ctx context.Context) ([]Host, error) + ListHostsByOrg(ctx context.Context, orgID string) ([]Host, error) + UpdateHost(ctx context.Context, host *Host) error + UpdateHostHeartbeat(ctx context.Context, hostID string, availCPUs int32, availMemMB int64, availDiskMB int64) error + + // Sandbox + CreateSandbox(ctx context.Context, sandbox *Sandbox) error + GetSandbox(ctx context.Context, sandboxID string) (*Sandbox, error) + GetSandboxByOrg(ctx context.Context, orgID, sandboxID string) (*Sandbox, error) + ListSandboxes(ctx context.Context) ([]Sandbox, error) + ListSandboxesByOrg(ctx context.Context, orgID string) ([]Sandbox, error) + UpdateSandbox(ctx context.Context, sandbox *Sandbox) error + DeleteSandbox(ctx context.Context, sandboxID string) error + GetSandboxesByHostID(ctx context.Context, hostID string) ([]Sandbox, error) + CountSandboxesByHostIDs(ctx context.Context, hostIDs []string) (map[string]int, error) + ListExpiredSandboxes(ctx context.Context, defaultTTL time.Duration) ([]Sandbox, error) + + // Command + CreateCommand(ctx context.Context, cmd *Command) error + ListSandboxCommands(ctx context.Context, sandboxID string) ([]Command, error) + + // SourceHost + CreateSourceHost(ctx context.Context, sh *SourceHost) error + GetSourceHost(ctx context.Context, id string) (*SourceHost, error) + ListSourceHostsByOrg(ctx context.Context, orgID string) ([]*SourceHost, error) + DeleteSourceHost(ctx context.Context, id string) error + + // HostToken + CreateHostToken(ctx context.Context, token *HostToken) error + GetHostTokenByHash(ctx context.Context, hash string) (*HostToken, error) + ListHostTokensByOrg(ctx context.Context, orgID string) ([]HostToken, error) + DeleteHostToken(ctx context.Context, orgID, id string) error + + // Agent Conversations - commented out, not yet ready for integration + // CreateAgentConversation(ctx context.Context, conv *AgentConversation) error + // GetAgentConversation(ctx context.Context, id string) (*AgentConversation, error) + // ListAgentConversationsByOrg(ctx context.Context, orgID string) ([]*AgentConversation, error) + // DeleteAgentConversation(ctx context.Context, id string) error + + // Agent Messages - commented out, not yet ready for integration + // CreateAgentMessage(ctx context.Context, msg *AgentMessage) error + // ListAgentMessages(ctx context.Context, conversationID string) ([]*AgentMessage, error) + + // Playbooks - commented out, not yet ready for integration + // CreatePlaybook(ctx context.Context, pb *Playbook) error + // GetPlaybook(ctx context.Context, id string) (*Playbook, error) + // ListPlaybooksByOrg(ctx context.Context, orgID string) ([]*Playbook, error) + // UpdatePlaybook(ctx context.Context, pb *Playbook) error + // DeletePlaybook(ctx context.Context, id string) error + + // Playbook Tasks - commented out, not yet ready for integration + // CreatePlaybookTask(ctx context.Context, task *PlaybookTask) error + // GetPlaybookTask(ctx context.Context, id string) (*PlaybookTask, error) + // ListPlaybookTasks(ctx context.Context, playbookID string) ([]*PlaybookTask, error) + // UpdatePlaybookTask(ctx context.Context, task *PlaybookTask) error + // DeletePlaybookTask(ctx context.Context, id string) error + // ReorderPlaybookTasks(ctx context.Context, playbookID string, taskIDs []string) error + + // Billing helpers + GetOrganizationByStripeCustomerID(ctx context.Context, customerID string) (*Organization, error) + GetModelMeter(ctx context.Context, modelID string) (*ModelMeter, error) + CreateModelMeter(ctx context.Context, m *ModelMeter) error + GetOrgModelSubscription(ctx context.Context, orgID, modelID string) (*OrgModelSubscription, error) + CreateOrgModelSubscription(ctx context.Context, s *OrgModelSubscription) error + SumTokenUsage(ctx context.Context, orgID string, from, to time.Time) (float64, error) + ListActiveSubscriptions(ctx context.Context) ([]*Subscription, error) + + // Advisory locks (multi-instance safety) + AcquireAdvisoryLock(ctx context.Context, key int64) error + ReleaseAdvisoryLock(ctx context.Context, key int64) error +} + +// Store is the root database handle with lifecycle methods. +type Store interface { + DataStore + Config() Config + Ping(ctx context.Context) error + WithTx(ctx context.Context, fn func(tx DataStore) error) error + Close() error +} diff --git a/api/internal/store/store_test.go b/api/internal/store/store_test.go new file mode 100644 index 00000000..63b27188 --- /dev/null +++ b/api/internal/store/store_test.go @@ -0,0 +1,305 @@ +package store + +import ( + "encoding/json" + "testing" +) + +// --------------------------------------------------------------------------- +// StringSlice +// --------------------------------------------------------------------------- + +func TestStringSlice_Value_Nil(t *testing.T) { + var s StringSlice + v, err := s.Value() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if v != "[]" { + t.Errorf("expected '[]', got %v", v) + } +} + +func TestStringSlice_Value_NonNil(t *testing.T) { + s := StringSlice{"a", "b", "c"} + v, err := s.Value() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + str, ok := v.(string) + if !ok { + t.Fatalf("expected string, got %T", v) + } + + var result []string + if err := json.Unmarshal([]byte(str), &result); err != nil { + t.Fatalf("invalid JSON: %v", err) + } + if len(result) != 3 { + t.Errorf("expected 3 items, got %d", len(result)) + } + if result[0] != "a" || result[1] != "b" || result[2] != "c" { + t.Errorf("unexpected values: %v", result) + } +} + +func TestStringSlice_Value_Empty(t *testing.T) { + s := StringSlice{} + v, err := s.Value() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + str, ok := v.(string) + if !ok { + t.Fatalf("expected string, got %T", v) + } + if str != "[]" { + t.Errorf("expected '[]', got %q", str) + } +} + +func TestStringSlice_Scan_String(t *testing.T) { + var s StringSlice + err := s.Scan(`["x","y"]`) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(s) != 2 { + t.Fatalf("expected 2 items, got %d", len(s)) + } + if s[0] != "x" || s[1] != "y" { + t.Errorf("unexpected values: %v", s) + } +} + +func TestStringSlice_Scan_Bytes(t *testing.T) { + var s StringSlice + err := s.Scan([]byte(`["p","q"]`)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(s) != 2 { + t.Fatalf("expected 2 items, got %d", len(s)) + } + if s[0] != "p" || s[1] != "q" { + t.Errorf("unexpected values: %v", s) + } +} + +func TestStringSlice_Scan_Nil(t *testing.T) { + var s StringSlice + err := s.Scan(nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if s == nil { + t.Error("expected non-nil empty slice, got nil") + } + if len(s) != 0 { + t.Errorf("expected empty slice, got %v", s) + } +} + +func TestStringSlice_Scan_InvalidType(t *testing.T) { + var s StringSlice + err := s.Scan(12345) + if err == nil { + t.Fatal("expected error for invalid type, got nil") + } +} + +func TestStringSlice_Scan_InvalidJSON(t *testing.T) { + var s StringSlice + err := s.Scan(`{not valid}`) + if err == nil { + t.Fatal("expected error for invalid JSON, got nil") + } +} + +// --------------------------------------------------------------------------- +// SourceVMSlice +// --------------------------------------------------------------------------- + +func TestSourceVMSlice_Value_Nil(t *testing.T) { + var s SourceVMSlice + v, err := s.Value() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if v != "[]" { + t.Errorf("expected '[]', got %v", v) + } +} + +func TestSourceVMSlice_Value_NonNil(t *testing.T) { + s := SourceVMSlice{ + {Name: "vm1", State: "running", IPAddress: "10.0.0.1", Prepared: true}, + {Name: "vm2", State: "stopped", IPAddress: "", Prepared: false}, + } + v, err := s.Value() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + str, ok := v.(string) + if !ok { + t.Fatalf("expected string, got %T", v) + } + + var result []SourceVMJSON + if err := json.Unmarshal([]byte(str), &result); err != nil { + t.Fatalf("invalid JSON: %v", err) + } + if len(result) != 2 { + t.Errorf("expected 2 items, got %d", len(result)) + } + if result[0].Name != "vm1" || !result[0].Prepared { + t.Errorf("unexpected first item: %+v", result[0]) + } +} + +func TestSourceVMSlice_Scan_String(t *testing.T) { + var s SourceVMSlice + err := s.Scan(`[{"name":"vm1","state":"running","ip_address":"10.0.0.1","prepared":true}]`) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(s) != 1 { + t.Fatalf("expected 1 item, got %d", len(s)) + } + if s[0].Name != "vm1" { + t.Errorf("expected name 'vm1', got %q", s[0].Name) + } + if !s[0].Prepared { + t.Error("expected prepared to be true") + } +} + +func TestSourceVMSlice_Scan_Bytes(t *testing.T) { + var s SourceVMSlice + err := s.Scan([]byte(`[{"name":"vm2","state":"stopped","ip_address":"","prepared":false}]`)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(s) != 1 { + t.Fatalf("expected 1 item, got %d", len(s)) + } + if s[0].Name != "vm2" { + t.Errorf("expected name 'vm2', got %q", s[0].Name) + } +} + +func TestSourceVMSlice_Scan_Nil(t *testing.T) { + var s SourceVMSlice + err := s.Scan(nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if s == nil { + t.Error("expected non-nil empty slice, got nil") + } + if len(s) != 0 { + t.Errorf("expected empty slice, got %v", s) + } +} + +func TestSourceVMSlice_Scan_InvalidType(t *testing.T) { + var s SourceVMSlice + err := s.Scan(12345) + if err == nil { + t.Fatal("expected error for invalid type, got nil") + } +} + +// --------------------------------------------------------------------------- +// BridgeSlice +// --------------------------------------------------------------------------- + +func TestBridgeSlice_Value_Nil(t *testing.T) { + var s BridgeSlice + v, err := s.Value() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if v != "[]" { + t.Errorf("expected '[]', got %v", v) + } +} + +func TestBridgeSlice_Value_NonNil(t *testing.T) { + s := BridgeSlice{ + {Name: "br0", Subnet: "10.0.0.0/24"}, + {Name: "br1", Subnet: "192.168.1.0/24"}, + } + v, err := s.Value() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + str, ok := v.(string) + if !ok { + t.Fatalf("expected string, got %T", v) + } + + var result []BridgeJSON + if err := json.Unmarshal([]byte(str), &result); err != nil { + t.Fatalf("invalid JSON: %v", err) + } + if len(result) != 2 { + t.Errorf("expected 2 items, got %d", len(result)) + } + if result[0].Name != "br0" || result[0].Subnet != "10.0.0.0/24" { + t.Errorf("unexpected first item: %+v", result[0]) + } +} + +func TestBridgeSlice_Scan_String(t *testing.T) { + var s BridgeSlice + err := s.Scan(`[{"name":"br0","subnet":"10.0.0.0/24"}]`) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(s) != 1 { + t.Fatalf("expected 1 item, got %d", len(s)) + } + if s[0].Name != "br0" { + t.Errorf("expected name 'br0', got %q", s[0].Name) + } + if s[0].Subnet != "10.0.0.0/24" { + t.Errorf("expected subnet '10.0.0.0/24', got %q", s[0].Subnet) + } +} + +func TestBridgeSlice_Scan_Bytes(t *testing.T) { + var s BridgeSlice + err := s.Scan([]byte(`[{"name":"br1","subnet":"192.168.0.0/16"}]`)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(s) != 1 { + t.Fatalf("expected 1 item, got %d", len(s)) + } + if s[0].Name != "br1" { + t.Errorf("expected name 'br1', got %q", s[0].Name) + } +} + +func TestBridgeSlice_Scan_Nil(t *testing.T) { + var s BridgeSlice + err := s.Scan(nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if s == nil { + t.Error("expected non-nil empty slice, got nil") + } + if len(s) != 0 { + t.Errorf("expected empty slice, got %v", s) + } +} + +func TestBridgeSlice_Scan_InvalidType(t *testing.T) { + var s BridgeSlice + err := s.Scan(12345) + if err == nil { + t.Fatal("expected error for invalid type, got nil") + } +} diff --git a/api/internal/telemetry/telemetry.go b/api/internal/telemetry/telemetry.go new file mode 100644 index 00000000..9cb8e708 --- /dev/null +++ b/api/internal/telemetry/telemetry.go @@ -0,0 +1,71 @@ +package telemetry + +import ( + "github.com/posthog/posthog-go" +) + +// Service defines the interface for telemetry operations. +type Service interface { + Track(userID, event string, properties map[string]any) + GroupIdentify(orgID string, properties map[string]any) + Close() +} + +// NoopService is a telemetry service that does nothing. +type NoopService struct{} + +func (s *NoopService) Track(userID, event string, properties map[string]any) {} +func (s *NoopService) GroupIdentify(orgID string, properties map[string]any) {} +func (s *NoopService) Close() {} + +type posthogService struct { + client posthog.Client +} + +// New creates a new telemetry service. Returns NoopService if apiKey is empty. +func New(apiKey, endpoint string) Service { + if apiKey == "" { + return &NoopService{} + } + + if endpoint == "" { + endpoint = "https://nautilus.fluid.sh" + } + + client, err := posthog.NewWithConfig(apiKey, posthog.Config{Endpoint: endpoint}) + if err != nil { + return &NoopService{} + } + + return &posthogService{client: client} +} + +func (s *posthogService) Track(userID, event string, properties map[string]any) { + props := posthog.NewProperties() + for k, v := range properties { + props.Set(k, v) + } + + _ = s.client.Enqueue(posthog.Capture{ + DistinctId: userID, + Event: event, + Properties: props, + }) +} + +func (s *posthogService) GroupIdentify(orgID string, properties map[string]any) { + props := posthog.NewProperties() + for k, v := range properties { + props.Set(k, v) + } + + _ = s.client.Enqueue(posthog.GroupIdentify{ + Type: "organization", + Key: orgID, + Properties: props, + }) +} + +func (s *posthogService) Close() { + _ = s.client.Close() +} diff --git a/fluid-remote/scripts/generate-openapi.sh b/api/scripts/generate-openapi.sh similarity index 53% rename from fluid-remote/scripts/generate-openapi.sh rename to api/scripts/generate-openapi.sh index cfb2dbd7..09103d44 100755 --- a/fluid-remote/scripts/generate-openapi.sh +++ b/api/scripts/generate-openapi.sh @@ -4,7 +4,7 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" # Generate swagger docs -swag init --dir .,./internal/ansible,./internal/error,./internal/rest,./internal/vm,./internal/workflow --generalInfo ./cmd/api/main.go --parseDependency --parseInternal +swag init --dir cmd/server,./internal/rest,./internal/orchestrator,./internal/store,./internal/error --generalInfo main.go --parseDependency --parseInternal # Convert swagger to OpenAPI 3.0 docker run --rm \ @@ -15,4 +15,4 @@ docker run --rm \ -o /workspace/docs mv docs/openapi/openapi.yaml docs/ -rm -R docs/swagger.json docs/swagger.yaml docs/README.md docs/docs.go docs/.openapi-generator-ignore docs/.openapi-generator/ docs/openapi/ +rm -rf docs/docs.go docs/swagger.json docs/swagger.yaml docs/README.md docs/.openapi-generator-ignore docs/.openapi-generator/ docs/openapi/ diff --git a/docker-compose.yml b/docker-compose.yml index 8c5ac013..41ba34c2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,76 +9,6 @@ # - SSH key is mounted for libvirt authentication services: - fluid-remote: - container_name: fluid-remote - build: - context: ./fluid-remote - dockerfile: Dockerfile - restart: unless-stopped - extra_hosts: - - "host.docker.internal:host-gateway" - depends_on: - - postgres - environment: - # Logging - - LOG_FORMAT=${LOG_FORMAT:-text} - - LOG_LEVEL=${LOG_LEVEL:-info} - - # API - - API_HTTP_ADDR=${API_HTTP_ADDR:-:8080} - - # Libvirt/KVM (SSH-based connection) - # For macOS: qemu:///session - # For production: qemu+ssh://username@baremetal-host/system - - LIBVIRT_URI=${LIBVIRT_URI:-qemu:///session} - - LIBVIRT_NETWORK=${LIBVIRT_NETWORK:-default} - - BASE_IMAGE_DIR=/var/lib/libvirt/images/base - - SANDBOX_WORKDIR=/var/lib/libvirt/images/jobs - - - DATABASE_URL=${DATABASE_URL:-postgresql://fluid:fluid@postgres:5432/fluid} - - # Defaults and timeouts - - DEFAULT_VCPUS=${DEFAULT_VCPUS:-2} - - DEFAULT_MEMORY_MB=${DEFAULT_MEMORY_MB:-2048} - - COMMAND_TIMEOUT_SEC=${COMMAND_TIMEOUT_SEC:-600} - - IP_DISCOVERY_TIMEOUT_SEC=${IP_DISCOVERY_TIMEOUT_SEC:-180} - # SSH CA config - - SSH_CA_KEY_PATH=${SSH_CA_KEY_PATH:-/etc/fluid-remote/ssh_ca} - - SSH_CA_PUB_KEY_PATH=${SSH_CA_PUB_KEY_PATH:-/etc/fluid-remote/ssh_ca.pub} - - SSH_KEY_DIR=${SSH_KEY_DIR:-/tmp/sandbox-keys} - - SSH_CERT_TTL_SEC=${SSH_CERT_TTL_SEC:-300} - - ports: - - "8080:8080" - volumes: - # SSH key for libvirt connection (required for SSH-based LIBVIRT_URI) - - ~/.ssh/id_ed25519:/root/.ssh/id_rsa:ro - - # Bind base images (read-only) and job overlays (read-write) - - ${BASE_IMAGES_DIR:-/var/lib/libvirt/images/base}:/var/lib/libvirt/images/base:ro - - ${JOBS_DIR:-/var/lib/libvirt/images/jobs}:/var/lib/libvirt/images/jobs:rw - - # SSH CA for managed credentials (run setup-ssh-ca.sh first) - - ./fluid-remote/.ssh-ca:/etc/fluid-remote:ro - - # Optionally mount SSH deploy key for GitOps (uncomment and set env GITOPS_SSH_KEY_PATH accordingly) - # - ./secrets/gitops_key:/run/secrets/gitops_key:ro - - # If using qemu-nbd inside the container, you may need these (ensure host has /dev/nbdX present): - # - /dev/nbd0:/dev/nbd0 - # - /dev/nbd1:/dev/nbd1 - - # If qemu-nbd is required to mount snapshots, uncomment the following: - # cap_add: - # - SYS_ADMIN - # privileged: false - - healthcheck: - test: ["CMD", "curl", "-fsSL", "http://127.0.0.1:8080/v1/health"] - interval: 30s - timeout: 5s - retries: 5 - postgres: image: postgres:18 container_name: fluid-postgres @@ -98,6 +28,32 @@ services: timeout: 5s retries: 5 + api: + container_name: fluid-api + build: + context: . + dockerfile: api/Dockerfile + restart: unless-stopped + depends_on: + postgres: + condition: service_healthy + env_file: + - ./api/.env + environment: + - DATABASE_URL=${DATABASE_URL:-postgresql://fluid:fluid@postgres:5432/fluid} + - API_ADDR=${API_ADDR_WEB:-:8080} + - FRONTEND_URL=${FRONTEND_URL:-http://localhost:5173} + - LOG_FORMAT=${LOG_FORMAT:-text} + - LOG_LEVEL=${LOG_LEVEL:-info} + ports: + - "8080:8080" + - "9090:9090" + healthcheck: + test: ["CMD", "wget", "-qO-", "http://127.0.0.1:8080/v1/health"] + interval: 30s + timeout: 5s + retries: 5 + web: build: context: ./web @@ -105,9 +61,13 @@ services: container_name: fluid-web restart: unless-stopped depends_on: - - fluid-remote + - api + environment: + - API_URL=http://fluid-api:8080 + - VITE_POSTHOG_KEY=${VITE_POSTHOG_KEY:-} + - VITE_POSTHOG_HOST=${VITE_POSTHOG_HOST:-} ports: - - "5173:5173" + - "80:5173" volumes: fluid-postgres-data: diff --git a/docs/agent-based-cleanup.md b/docs/agent-based-cleanup.md deleted file mode 100644 index ca523c72..00000000 --- a/docs/agent-based-cleanup.md +++ /dev/null @@ -1,226 +0,0 @@ - #Proposed Solution for "Agent-based Cleanup": - - To fulfill the user's request ("once this agent is done working then the sandbox expires and gets cleaned up"): - - 1. Track Created Resources: We need to modify FluidAgent in fluid/internal/tui/agent.go to maintain a list of - createdSandboxIDs []string. - 2. Implement Cleanup: Add a Close() or Cleanup() method to FluidAgent that iterates through this list and calls - vmService.DestroySandbox(). - 3. Trigger Cleanup: In fluid/cmd/fluid/main.go, update the tuiCmd.RunE or runTUI function to defer the agent's cleanup - method. - - This ensures that when the user exits the interactive session (or when the agent finishes its "work" if we view the session - as the unit of work), the resources are reaped. - - I will write the response based on this "Session-Based Cleanup" approach, as it directly addresses the user's scenario of - using the CLI tool. - - I will also mention the "Janitor" approach again as a complementary safety net, but focus on the "Session Cleanup" as the - primary answer to "once this agent is done". - - I will structure the response to: - 1. Confirm Understanding: "You want the cleanup to happen automatically when the agent finishes its session." - 2. The Solution (Session Cleanup): Explain how to modify FluidAgent to track and destroy its own messes. - 3. Code Example: Show the modifications for agent.go and main.go. - - I am ready to write the response. - - One small refinement: The user said "the sandbox expires". My "Session Cleanup" is immediate destruction. "Expiration" - usually implies time. But "expires and gets cleaned up by the AI agent" suggests an event-driven cleanup. I will clarify that - this is "Session Cleanup". - - I'll proceed. - - --- - - Response - - That makes perfect sense for a CLI tool. You want a "leave no trace" policy where the agent cleans up after itself when the - session ends. - - Since fluid is an interactive CLI tool, the best place to handle this is by implementing a Session Cleanup mechanism. The - agent should track every sandbox it creates and destroy them when the CLI exits. - - Step 1: Track Created Sandboxes - - Modify your FluidAgent struct in fluid/internal/tui/agent.go to keep a ledger of what it built. - -```go - 1 type FluidAgent struct { - 2 // ... existing fields - 3 - 4 // Track resources created during this session - 5 createdSandboxes []string - 6 } - ``` - - Then, update the createSandbox method to append to this list: -```go - 1 func (a *FluidAgent) createSandbox(ctx context.Context, args []string) (map[string]interface{}, error) { - 2 // ... existing logic to create sb ... - 3 - 4 // Track it! - 5 a.createdSandboxes = append(a.createdSandboxes, sb.ID) - 6 - 7 return result, nil - 8 } -``` - Step 2: Implement the Cleanup Method - - Add a Close or Cleanup method to FluidAgent that iterates through that list. -```go - 1 // Cleanup destroys all sandboxes created during this session. - 2 func (a *FluidAgent) Cleanup(ctx context.Context) error { - 3 if len(a.createdSandboxes) == 0 { - 4 return nil - 5 } - 6 - 7 a.logger.Info("cleaning up session resources", "count", len(a.createdSandboxes)) - 8 - 9 for _, id := range a.createdSandboxes { - 10 // Check if it still exists before destroying - 11 if _, err := a.vmService.GetSandbox(ctx, id); err == nil { - 12 a.logger.Info("destroying session sandbox", "id", id) - 13 if _, err := a.vmService.DestroySandbox(ctx, id); err != nil { - 14 a.logger.Error("failed to destroy sandbox", "id", id, "error", err) - 15 // Continue trying to destroy others even if one fails - 16 } - 17 } - 18 } - 19 - 20 // Clear the list - 21 a.createdSandboxes = nil - 22 return nil - 23 } -``` - Step 3: Trigger on Exit - - Finally, hook this into your application lifecycle in fluid/cmd/fluid/main.go. You want to ensure this runs even if the user - hits Ctrl+C. - - Update the runTUI function: -```go - 1 func runTUI() error { - 2 // ... setup config and services ... - 3 - 4 agent := tui.NewFluidAgent(cfg, dataStore, vmService, libvirtMgr) - 5 - 6 // Ensure cleanup runs when the function exits (user quits TUI) - 7 defer func() { - 8 fmt.Println("Cleaning up session resources...") - 9 ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - 10 defer cancel() - 11 if err := agent.Cleanup(ctx); err != nil { - 12 fmt.Printf("Warning: cleanup failed: %v\n", err) - 13 } - 14 }() - 15 - 16 model := tui.NewModel("fluid", "local", "vm-agent", agent, cfg, configPath) - 17 return tui.Run(model) - 18 } -``` - Summary - With this change: - 1. Usage: You run fluid tui, tell the agent "Create a sandbox and test this script." - 2. Action: The agent creates the VM, does the work, and reports back. - 3. Exit: When you quit the tool, it automatically detects the leftover VM and destroys it, ensuring your host machine stays - clean. - - - - - --- - - Yes, you are absolutely right to be worried. I have analyzed the codebase (fluid/cmd/fluid/main.go and - fluid/internal/vm/service.go) and while the intent to expire VMs is stored in the database via the TTLSeconds field, there is - no active process currently watching that field. - - If a user creates a sandbox with a 1-hour TTL, the record will just sit in the database and the VM will remain running in - Libvirt indefinitely after that hour passes. - - The Missing Piece: The "Janitor" - - To fix this, we need to introduce a background worker (a "Janitor") that runs alongside the main application. - - Proposed Architecture - - You should create a new Janitor service that runs a simple loop: - - 1. Check: Every minute, query the database for "expired" sandboxes. - * Query: WHERE state IN ('RUNNING', 'STARTING') AND (created_at + ttl_seconds * interval '1 second') < NOW() - 2. Act: For each match, call the existing vmService.DestroySandbox(id) method. - - Implementation Draft - - Here is how you can implement this in a new file fluid/internal/janitor/janitor.go: - ```go - 1 package janitor - 2 - 3 import ( - 4 "context" - 5 "fluid/internal/store" - 6 "fluid/internal/vm" - 7 "log/slog" - 8 "time" - 9 ) - 10 - 11 type Janitor struct { - 12 store store.Store - 13 vmService *vm.Service - 14 logger *slog.Logger - 15 } - 16 - 17 func New(st store.Store, svc *vm.Service) *Janitor { - 18 return &Janitor{ - 19 store: st, - 20 vmService: svc, - 21 logger: slog.Default().With("component", "janitor"), - 22 } - 23 } - 24 - 25 // Start runs the cleanup loop in a blocking manner. - 26 func (j *Janitor) Start(ctx context.Context, interval time.Duration) { - 27 ticker := time.NewTicker(interval) - 28 defer ticker.Stop() - 29 - 30 for { - 31 select { - 32 case <-ctx.Done(): - 33 return - 34 case <-ticker.C: - 35 j.cleanup(ctx) - 36 } - 37 } - 38 } - 39 - 40 func (j *Janitor) cleanup(ctx context.Context) { - 41 // 1. Find expired sandboxes - 42 // This requires adding a 'ListExpiredSandboxes' method to your Store interface - 43 expired, err := j.store.ListExpiredSandboxes(ctx) - 44 if err != nil { - 45 j.logger.Error("failed to list expired sandboxes", "error", err) - 46 return - 47 } - 48 - 49 for _, sb := range expired { - 50 j.logger.Info("destroying expired sandbox", "id", sb.ID, "ttl", sb.TTLSeconds) - 51 if _, err := j.vmService.DestroySandbox(ctx, sb.ID); err != nil { - 52 j.logger.Error("failed to destroy expired sandbox", "id", sb.ID, "error", err) - 53 } - 54 } - 55 } - ``` - Wiring It Up - - Then, in your fluid/cmd/fluid/main.go (or wherever your long-running server process lives, if you have one), you would - initialize and start this: - ```go - 1 // In initServices or main() - 2 janitorSvc := janitor.New(dataStore, vmService) - 3 - 4 // Run in a goroutine so it doesn't block the main process - 5 go janitorSvc.Start(ctx, 1 * time.Minute) -``` - Note: Since fluid is currently a CLI tool, this background worker assumes you are running a persistent server (like - fluid-remote or a local daemon). If you only run the CLI for short commands, you might need a fluid cleanup command that can - be run via system cron diff --git a/docs/agent-connection-flow.md b/docs/agent-connection-flow.md deleted file mode 100644 index 34107bf4..00000000 --- a/docs/agent-connection-flow.md +++ /dev/null @@ -1,196 +0,0 @@ -# Agent Connection Flows - -There are two ways for agents to execute commands in sandboxes: - -1. **Tool Call Flow** (Recommended) - Direct API calls to virsh-sandbox -2. **Interactive Session Flow** - tmux-client for interactive terminal access - ---- - -## Option 1: Tool Call Flow (Recommended) - -For agents using tool/function calls, virsh-sandbox handles SSH credentials internally. -No key management required by the caller. - -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Tool Call Flow (Simplified) β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ β”‚ -β”‚ 1. Agent calls virsh-sandbox API β”‚ -β”‚ POST /v1/sandboxes/{id}/run β”‚ -β”‚ { "command": "apt update && apt install -y nginx" } β”‚ -β”‚ β”‚ β”‚ -β”‚ β–Ό β”‚ -β”‚ 2. virsh-sandbox checks for cached credentials β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ /tmp/sandbox-keys/{sandbox_id}/ β”‚ β”‚ -β”‚ β”‚ - key (private key, 0600) β”‚ β”‚ -β”‚ β”‚ - key-cert.pub (certificate) β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ β”‚ -β”‚ β”œβ”€β”€β”€ Cached & valid? ──> Skip to step 5 β”‚ -β”‚ β”‚ β”‚ -β”‚ β–Ό β”‚ -β”‚ 3. virsh-sandbox generates ephemeral key pair (if needed) β”‚ -β”‚ - ed25519 keypair β”‚ -β”‚ - Stored in /tmp/sandbox-keys/{sandbox_id}/ β”‚ -β”‚ β”‚ β”‚ -β”‚ β–Ό β”‚ -β”‚ 4. virsh-sandbox issues certificate internally (5 min TTL) β”‚ -β”‚ - Signs public key with SSH CA β”‚ -β”‚ - Saves certificate to key-cert.pub β”‚ -β”‚ β”‚ β”‚ -β”‚ β–Ό β”‚ -β”‚ 5. virsh-sandbox executes SSH command β”‚ -β”‚ ssh -i key -o CertificateFile=key-cert.pub \ β”‚ -β”‚ sandbox@{vm_ip} -- "apt update && apt install -y nginx" β”‚ -β”‚ β”‚ β”‚ -β”‚ β–Ό β”‚ -β”‚ 6. Returns command result to agent β”‚ -β”‚ { β”‚ -β”‚ "command": { β”‚ -β”‚ "stdout": "...", β”‚ -β”‚ "stderr": "...", β”‚ -β”‚ "exit_code": 0 β”‚ -β”‚ } β”‚ -β”‚ } β”‚ -β”‚ β”‚ -β”‚ Cleanup: Keys deleted when sandbox is destroyed β”‚ -β”‚ DELETE /v1/sandboxes/{id} β”‚ -β”‚ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -### API Example - -```bash -# Create sandbox -curl -X POST http://localhost:8080/v1/sandboxes \ - -H "Content-Type: application/json" \ - -d '{"source_vm_name": "ubuntu-base", "agent_id": "my-agent"}' - -# Start sandbox -curl -X POST http://localhost:8080/v1/sandboxes/SBX-abc123/start \ - -H "Content-Type: application/json" \ - -d '{"wait_for_ip": true}' - -# Run commands (no credentials needed!) -curl -X POST http://localhost:8080/v1/sandboxes/SBX-abc123/run \ - -H "Content-Type: application/json" \ - -d '{"command": "whoami"}' - -# Response: -# {"command": {"stdout": "sandbox\n", "exit_code": 0, ...}} - -# Destroy when done (cleans up keys automatically) -curl -X DELETE http://localhost:8080/v1/sandboxes/SBX-abc123 -``` - -### Configuration - -Set these environment variables on the virsh-sandbox API server: - -| Variable | Default | Description | -|----------|---------|-------------| -| `SSH_CA_KEY_PATH` | `/etc/virsh-sandbox/ssh_ca` | SSH CA private key | -| `SSH_CA_PUB_KEY_PATH` | `/etc/virsh-sandbox/ssh_ca.pub` | SSH CA public key | -| `SSH_KEY_DIR` | `/tmp/sandbox-keys` | Ephemeral key storage | -| `SSH_CERT_TTL_SEC` | `300` | Certificate TTL (5 min) | - -### VM Setup - -VMs must trust the SSH CA: - -```bash -# Copy CA public key to VM image -cp /etc/virsh-sandbox/ssh_ca.pub /etc/ssh/ssh_ca.pub - -# Add to /etc/ssh/sshd_config -echo "TrustedUserCAKeys /etc/ssh/ssh_ca.pub" >> /etc/ssh/sshd_config - -# Ensure 'sandbox' user exists -useradd -m -s /bin/bash sandbox -``` - ---- - -## Option 2: Interactive Session Flow (tmux-client) - -For interactive terminal access, use the tmux-client which manages its own -SSH sessions and provides a persistent terminal interface. - -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Interactive Session Flow (tmux-client) β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ β”‚ -β”‚ 1. Agent calls tmux-client API β”‚ -β”‚ POST /v1/sandbox/sessions/create β”‚ -β”‚ { "sandbox_id": "SBX-abc123" } β”‚ -β”‚ β”‚ β”‚ -β”‚ β–Ό β”‚ -β”‚ 2. tmux-client generates ephemeral key pair β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ /tmp/sandbox-keys/sandbox_SBX-abc123_... β”‚ β”‚ -β”‚ β”‚ - Private key (ed25519) β”‚ β”‚ -β”‚ β”‚ - Public key β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ β”‚ -β”‚ β–Ό β”‚ -β”‚ 3. tmux-client calls virsh-sandbox API β”‚ -β”‚ POST /v1/access/request β”‚ -β”‚ { "sandbox_id": "...", "public_key": "ssh-ed25519 ..." } β”‚ -β”‚ β”‚ β”‚ -β”‚ β–Ό β”‚ -β”‚ 4. virsh-sandbox issues certificate (5 min TTL) β”‚ -β”‚ Returns: { "certificate": "ssh-ed25519-cert-v01...", β”‚ -β”‚ "vm_ip_address": "192.168.122.10" } β”‚ -β”‚ β”‚ β”‚ -β”‚ β–Ό β”‚ -β”‚ 5. tmux-client saves certificate β”‚ -β”‚ /tmp/sandbox-keys/sandbox_SBX-abc123_...-cert.pub β”‚ -β”‚ β”‚ β”‚ -β”‚ β–Ό β”‚ -β”‚ 6. tmux-client creates tmux session with SSH command β”‚ -β”‚ tmux new-session -d -s sandbox_SBX-abc123 \ β”‚ -β”‚ "ssh -i /tmp/.../key -o CertificateFile=/tmp/.../key-cert.pub \ β”‚ -β”‚ sandbox@192.168.122.10" β”‚ -β”‚ β”‚ β”‚ -β”‚ β–Ό β”‚ -β”‚ 7. Agent is now in a tmux session connected to the sandbox VM β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ sandbox@vm:~$ β”‚ β”‚ -β”‚ β”‚ (tmux session - no shell escape) β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ -β”‚ 8. When done: DELETE /v1/sandbox/sessions/sandbox_SBX-abc123 β”‚ -β”‚ - Kills tmux session β”‚ -β”‚ - Deletes ephemeral keys β”‚ -β”‚ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - ---- - -## Comparison - -| Feature | Tool Call Flow | Interactive Session Flow | -|---------|---------------|-------------------------| -| Use case | Automated tool/function calls | Interactive terminal | -| Credential management | Automatic (virsh-sandbox) | Automatic (tmux-client) | -| Session type | Stateless per-command | Persistent tmux session | -| Best for | AI agents with tool calling | Human debugging, complex workflows | -| API | virsh-sandbox `/run` endpoint | tmux-client sessions API | - ---- - -## Security Properties - -Both flows share these security characteristics: - -1. **Short-lived certificates** - 5 minute TTL by default -2. **Ephemeral keys** - Generated per-sandbox, deleted on destroy -3. **Certificate-based auth** - No passwords, no authorized_keys management -4. **Audit trail** - All certificates logged by the SSH CA -5. **Isolation** - Each sandbox has its own keypair diff --git a/docs/agent-run-command-requiring-password.md b/docs/agent-run-command-requiring-password.md deleted file mode 100644 index a347fd66..00000000 --- a/docs/agent-run-command-requiring-password.md +++ /dev/null @@ -1,5 +0,0 @@ -# Agent Run Command Requiring Password - -## Overview - -If you are running into the run command causing the virsh-sandbox API to ask about a password, the reason is that the base-VM doesn't have the base CA used in the virsh-sandbox API. You will need to regenerate the SSH CA with `./virsh-sandbox/scripts/setup-ssh-ca.sh [ssh-ca-dir]` and `./virsh-sandbox/scripts/reset-libvirt-macos.sh [vmname] [ca-pub-path] [ca-key-path]` to get this to work. It will regenerate all the certs and rebuild the test base VM. diff --git a/docs/cloud-init-cloning-fix.md b/docs/cloud-init-cloning-fix.md deleted file mode 100644 index 36788088..00000000 --- a/docs/cloud-init-cloning-fix.md +++ /dev/null @@ -1,812 +0,0 @@ -# Cloud-Init Cloning Fix for Sandbox VMs - -## Overview - -This document describes a critical bug fix for sandbox VM networking that was causing `run_command` API calls to timeout. The fix ensures that cloned VMs properly initialize their network interfaces through cloud-init. - -**Date**: January 2026 -**Affected Components**: `virsh-sandbox/internal/libvirt/virsh.go` -**Symptom**: `run_command` times out with "IP discovery timeout" error - ---- - -## Problem Description - -### Symptoms - -When creating a sandbox from a base VM and calling `run_command`, the API would: -1. Wait for approximately 2 minutes (the IP discovery timeout) -2. Return a 500 error with message: "IP discovery timeout" -3. Leave the sandbox VM in a running state but with no network connectivity - -### Root Cause Analysis - -The issue had two contributing factors: - -#### 1. Cloud-Init Instance-ID Collision (Primary Cause) - -When cloning a VM using qcow2 overlay disks, the clone inherits the base VM's disk state, which includes cloud-init's record of having already run for a specific `instance-id`. - -**The problem flow:** -1. Base VM `test-vm-arm64` boots with cloud-init ISO containing `instance-id: test-vm-arm64` -2. Cloud-init runs, configures networking for MAC address `52:54:00:14:74:62`, and records completion -3. Sandbox `sbx-abc123` is created as a linked clone (qcow2 overlay on base disk) -4. Sandbox boots with the **same** cloud-init ISO (`instance-id: test-vm-arm64`) -5. Cloud-init checks instance-id β†’ matches stored value β†’ **skips initialization** -6. Sandbox has a **different** MAC address (`52:54:00:xx:xx:xx`) but no network config for it -7. No DHCP request is sent β†’ No IP address is obtained - -**Evidence observed:** -```bash -# VM interface statistics showing zero TX packets after 2+ minutes -$ virsh domifstat sbx-abc123 vnet15 -vnet15 rx_bytes 180 -vnet15 rx_packets 2 -vnet15 tx_bytes 0 # No outgoing traffic! -vnet15 tx_packets 0 -``` - -#### 2. Slow ARM64 Emulation (Secondary Factor) - -VMs running under QEMU TCG emulation (when KVM is unavailable, e.g., ARM64 on x86 host) take significantly longer to boot: -- Expected boot time: 15-30 seconds -- Actual boot time under TCG: 150+ seconds -- Default IP discovery timeout: 120 seconds - -This meant that even if networking was properly configured, the timeout would expire before the VM could boot and obtain an IP. - ---- - -## Solution - -### Fix Implementation - -The fix generates a **unique cloud-init ISO for each sandbox** with a new `instance-id`, forcing cloud-init to re-run network initialization. - -#### Changes Made - -**1. Added `CloudInitISO` field to domain XML parameters:** -```go -type domainXMLParams struct { - Name string - MemoryMB int - VCPUs int - DiskPath string - CloudInitISO string // NEW: Optional path to cloud-init ISO - Network string - // ... -} -``` - -**2. Updated domain XML template to include CDROM device:** -```xml -{{- if .CloudInitISO }} - - - - - - - -{{- end }} -``` - -**3. Added `buildCloudInitSeedForClone()` function:** - -This function creates a minimal cloud-init seed that: -- Uses the sandbox name as a unique `instance-id` -- Includes a netplan configuration that enables DHCP on virtio interfaces -- Preserves existing user accounts from the base image - -```go -func (m *VirshManager) buildCloudInitSeedForClone(ctx context.Context, vmName, outISO string) error { - userData := `#cloud-config -network: - version: 2 - ethernets: - id0: - match: - driver: virtio* - dhcp4: true -` - metaData := fmt.Sprintf(`instance-id: %s -local-hostname: %s -`, vmName, vmName) - - // Create ISO using genisoimage or cloud-localds - // ... -} -``` - -**4. Modified `CloneFromVM()` to generate unique cloud-init ISO:** - -```go -func (m *VirshManager) CloneFromVM(...) (DomainRef, error) { - // ... existing code ... - - // Detect if source VM has cloud-init - if sourceCloudInitISO != "" { - cloudInitISO = filepath.Join(jobDir, "cloud-init.iso") - if err := m.buildCloudInitSeedForClone(ctx, newVMName, cloudInitISO); err != nil { - log.Printf("WARNING: failed to build cloud-init seed: %v", err) - cloudInitISO = sourceCloudInitISO // Fallback - } - } - - // Include in domain XML - xml, err := renderDomainXML(domainXMLParams{ - CloudInitISO: cloudInitISO, - // ... - }) -} -``` - -### How the Fix Works - -1. When `CloneFromVM()` is called, it detects if the source VM has a cloud-init CDROM -2. If yes, it generates a new cloud-init ISO at `/var/lib/libvirt/images/jobs//cloud-init.iso` -3. The ISO contains: - - `meta-data` with `instance-id: ` (unique per sandbox) - - `user-data` with network configuration for DHCP -4. When the sandbox boots, cloud-init sees a **different** instance-id -5. Cloud-init re-runs initialization, including network configuration -6. The netplan config enables DHCP on the virtio network interface -7. The VM obtains an IP address via DHCP - ---- - -## Verification - -### Test Results - -After the fix, sandbox VMs successfully obtain IP addresses: - -```bash -# Create sandbox with auto_start -$ curl -X POST http://localhost:8080/v1/sandboxes \ - -H "Content-Type: application/json" \ - -d '{"source_vm_name": "test-vm-arm64", "agent_id": "test", "auto_start": true}' - -# After ~150 seconds (ARM64 boot time), check for IP -$ virsh domifaddr sbx-28a48bc8 --source lease - Name MAC address Protocol Address -------------------------------------------------------------------------------- - vnet18 52:54:00:b8:09:c3 ipv4 192.168.122.228/24 - -# Verify connectivity -$ ping -c 3 192.168.122.228 -64 bytes from 192.168.122.228: icmp_seq=1 ttl=64 time=12.2 ms -``` - -### Unit Tests Added - -New tests in `virsh-sandbox/internal/libvirt/virsh_test.go`: - -- `TestRenderDomainXML_WithCloudInitISO` - Verifies CDROM is included in XML -- `TestRenderDomainXML_WithoutCloudInitISO` - Verifies no CDROM when ISO is empty -- `TestCloudInitSeedForClone_UniqueInstanceID` - Verifies unique ISO paths per sandbox - ---- - -## Configuration - -### Environment Variables - -| Variable | Default | Description | -|----------|---------|-------------| -| `IP_DISCOVERY_TIMEOUT` | `2m` | Maximum time to wait for VM to obtain IP address | - -For environments with slow VM boot times (e.g., ARM64 under TCG emulation), consider increasing this value: - -```bash -export IP_DISCOVERY_TIMEOUT=5m -``` - -### Base VM Requirements - -For optimal cloning behavior, base VMs should: - -1. **Have a cloud-init CDROM attached** with a valid NoCloud seed -2. **Use virtio network interfaces** (for the netplan match rule to work) -3. **Have cloud-init installed** in the guest OS - ---- - -## Next Steps & Recommendations - -### Short-Term Improvements - -1. **Increase default IP discovery timeout for known slow environments** - - Detect ARM64 under TCG and automatically extend timeout - - Add configuration option per base VM for expected boot time - -2. **Add IP polling endpoint** - - Allow clients to create sandbox with `wait_for_ip: false` - - Provide endpoint to check/poll for IP address separately - - Reduces API timeout issues for slow-booting VMs - -3. **Improve error messages** - - When IP discovery times out, include diagnostic info: - - VM state (running/paused/etc) - - Network interface statistics (TX/RX packets) - - Suggestion to check cloud-init logs - -### Medium-Term Improvements - -1. **Cloud-init status detection** - - Use qemu-guest-agent to query cloud-init status inside VM - - Detect if cloud-init is still running vs. failed vs. completed - - Provide more accurate progress feedback to clients - -2. **Network configuration options** - - Allow specifying static IP for sandboxes - - Support custom netplan configurations - - Enable IPv6 DHCP option - -3. **Base VM validation** - - Add pre-flight check when registering base VMs - - Verify cloud-init is installed and configured - - Warn if expected boot time exceeds IP discovery timeout - -### Long-Term Improvements - -1. **Alternative network initialization methods** - - Support cloud-init "ConfigDrive" in addition to NoCloud - - Consider QEMU guest agent for network config injection - - Explore using cloud-init's "clean" command instead of new ISO - -2. **VM boot optimization** - - Profile boot process to identify slow components - - Consider using pre-booted VM snapshots for faster startup - - Evaluate alternative emulation options (e.g., Rosetta on macOS) - -3. **Monitoring & Observability** - - Add metrics for VM boot time, IP discovery time - - Track cloud-init success/failure rates - - Alert on sandboxes that fail to obtain IP - ---- - -## Troubleshooting Guide - -This section provides comprehensive debugging steps for diagnosing VM networking issues. - ---- - -### Quick Diagnostic Commands - -Run these first to get an overview of the system state: - -```bash -# 1. Check DHCP leases -cat /var/lib/libvirt/dnsmasq/default.leases - -# 2. Check network config -virsh net-dumpxml default | grep -A5 dhcp - -# 3. List all VM MACs and IPs -for vm in $(virsh list --name); do - echo "=== $vm ===" - virsh domifaddr "$vm" --source lease -done - -# 4. Check for duplicate MACs (causes IP conflicts) -virsh list --name | xargs -I{} virsh domiflist {} 2>/dev/null | grep -E "^[a-z]" | awk '{print $5}' | sort | uniq -d -``` - ---- - -### Pre-Flight Validation (fluid CLI) - -Before creating sandboxes, use the built-in validation: - -```bash -# Validate source VM and host resources -fluid validate - -# Example output showing warnings -{ - "source_vm": "test-vm-1", - "valid": true, - "vm_state": "running", - "has_network": true, - "mac_address": "52:54:00:12:34:56", - "warnings": [ - "Source VM is running but has no IP address assigned", - "This may indicate cloud-init or DHCP issues - cloned sandboxes may also fail to get IPs" - ] -} -``` - ---- - -### Source VM Has No IP Address - -If `virsh domifaddr --source lease` returns empty results, investigate: - -#### 1. Check if libvirt network is running - -```bash -# List networks -virsh net-list --all - -# Expected output: -# Name State Autostart Persistent -# -------------------------------------------- -# default active yes yes - -# If not active, start it: -virsh net-start default -virsh net-autostart default -``` - -#### 2. Verify DHCP is enabled on the network - -```bash -virsh net-dumpxml default | grep -A10 '' - -# Expected output should include: -# -# -# -``` - -If DHCP is missing, edit the network: -```bash -virsh net-edit default -# Add inside block: -# -# -# - -# Restart network -virsh net-destroy default -virsh net-start default -``` - -#### 3. Check VM has a network interface - -```bash -virsh domiflist - -# Expected output: -# Interface Type Source Model MAC -# ------------------------------------------------------------- -# vnet0 network default virtio 52:54:00:xx:xx:xx -``` - -If empty, the VM XML is missing network configuration. - -#### 4. Check dnsmasq is running (provides DHCP) - -```bash -# Check process -ps aux | grep dnsmasq - -# Check dnsmasq logs -journalctl -u libvirtd | grep dnsmasq - -# Or check syslog -grep dnsmasq /var/log/syslog | tail -20 -``` - -#### 5. Verify cloud-init is installed in the VM - -Access VM console and check: -```bash -virsh console - -# Inside VM: -cloud-init --version -systemctl status cloud-init - -# Check cloud-init data directory exists -ls -la /var/lib/cloud/ -``` - -#### 6. Check cloud-init status inside VM - -```bash -# Inside VM: -cloud-init status -# Should show: status: done - -# If status shows error, check logs: -cat /var/log/cloud-init.log | grep -i error -cat /var/log/cloud-init-output.log -``` - -#### 7. Check network configuration inside VM - -```bash -# Inside VM: -ip addr show -ip route show - -# Check if interface is up but has no IP -# This indicates DHCP client issue - -# Check DHCP client logs -journalctl -u systemd-networkd | tail -50 -# or -journalctl -u NetworkManager | tail -50 -``` - ---- - -### Sandbox Has No IP After Expected Boot Time - -#### 1. Check VM is running - -```bash -virsh list --all | grep - -# State should be "running" -``` - -#### 2. Check network interface statistics - -```bash -# Get interface name first -virsh domiflist - -# Then check stats -virsh domifstat - -# Example output: -# vnet15 rx_bytes 180 -# vnet15 rx_packets 2 -# vnet15 tx_bytes 0 # Zero = no outgoing traffic! -# vnet15 tx_packets 0 -``` - -**Interpretation:** -- `tx_packets = 0`: VM isn't sending any traffic (cloud-init issue or VM not booted) -- `rx_packets > 0, tx_packets = 0`: VM receives broadcasts but doesn't respond -- Both non-zero: Network is working, check DHCP server - -#### 3. Check cloud-init ISO is attached - -```bash -virsh dumpxml | grep -A5 cdrom - -# Should show: -# -# -# ... -# -``` - -#### 4. Verify cloud-init seed content - -```bash -# Check meta-data (instance-id must be unique per sandbox) -cat /var/lib/libvirt/images/sandboxes//meta-data - -# Should show: -# instance-id: -# local-hostname: - -# Check user-data (should have network config) -cat /var/lib/libvirt/images/sandboxes//user-data - -# Should contain: -# network: -# version: 2 -# ethernets: -# id0: -# match: -# driver: virtio* -# dhcp4: true -``` - -#### 5. Check DHCP server has leases available - -```bash -virsh net-dhcp-leases default - -# Check lease file directly -cat /var/lib/libvirt/dnsmasq/default.leases -``` - -#### 6. Check for MAC address collision - -```bash -# Get sandbox MAC -virsh domiflist - -# Compare with source VM MAC -virsh domiflist - -# They MUST be different! If same, the clone process failed to generate new MAC. -``` - -#### 7. Access VM console for debugging - -```bash -# Serial console (if configured) -virsh console -# Press Enter, login with cloud-init credentials - -# VNC display -virsh vncdisplay -# Connect with VNC viewer to localhost: - -# If neither works, check VM has console configured: -virsh dumpxml | grep -A3 ' --details - -# Look for cdrom device - if missing, source VM doesn't use cloud-init -``` - ---- - -### Cloud-Init Runs But Network Fails - -If cloud-init runs but network still fails, check inside the VM: - -#### 1. Check cloud-init network config was applied - -```bash -# Inside VM: -cat /etc/netplan/*.yaml -# or -cat /etc/network/interfaces -# or -nmcli device status -``` - -#### 2. Check for conflicting network configs - -```bash -# Inside VM: -ls -la /etc/netplan/ - -# Multiple files can conflict - cloud-init creates 50-cloud-init.yaml -# Other files (00-installer-config.yaml) may override it -``` - -#### 3. Force cloud-init to re-run (for debugging) - -```bash -# Inside VM: -sudo cloud-init clean --logs -sudo cloud-init init --local -sudo cloud-init init -sudo cloud-init modules --mode=config -sudo cloud-init modules --mode=final - -# Check status -cloud-init status --long -``` - -#### 4. Check instance-id matches expectation - -```bash -# Inside VM: -cat /var/lib/cloud/data/instance-id - -# This should match the sandbox name -# If it matches the source VM name, cloud-init didn't re-run -``` - ---- - -### MAC Address Issues - -#### 1. Check MAC was generated correctly - -```bash -# Sandbox MAC should start with 52:54:00 (QEMU prefix) -virsh domiflist - -# Verify it's different from source VM -virsh domiflist -``` - -#### 2. Check for MAC collision across all VMs - -```bash -# List all MACs -for vm in $(virsh list --all --name); do - echo -n "$vm: " - virsh domiflist "$vm" 2>/dev/null | grep -oE '([0-9a-f]{2}:){5}[0-9a-f]{2}' | head -1 -done | sort -t: -k2 - -# Check for duplicates -virsh list --all --name | xargs -I{} virsh domiflist {} 2>/dev/null | \ - grep -oE '([0-9a-f]{2}:){5}[0-9a-f]{2}' | sort | uniq -d -``` - -#### 3. Manually fix MAC if needed - -```bash -# Stop VM -virsh destroy - -# Edit XML -virsh edit -# Find and change to unique value - -# Start VM -virsh start -``` - ---- - -### Host Resource Issues - -#### 1. Check available memory - -```bash -# System memory -free -h - -# Libvirt view -virsh nodememstats - -# Memory used by VMs -virsh list --all --name | xargs -I{} virsh dominfo {} 2>/dev/null | grep -E "^(Name|Max memory|Used memory)" -``` - -#### 2. Check disk space - -```bash -df -h /var/lib/libvirt/images/ - -# Check individual sandbox sizes -du -sh /var/lib/libvirt/images/sandboxes/* -``` - -#### 3. Check for resource exhaustion - -```bash -# Too many VMs? -virsh list --all | wc -l - -# CPU overcommit? -virsh nodeinfo | grep "CPU(s)" -virsh list --all --name | xargs -I{} virsh vcpucount {} 2>/dev/null | grep current | awk '{sum+=$2} END {print "Total vCPUs: " sum}' -``` - ---- - -### Performance Issues (Slow Boot) - -#### 1. Check if using KVM acceleration - -```bash -# Inside VM or from host: -virsh dumpxml | grep -i kvm - -# Check host supports KVM -ls -la /dev/kvm -# If missing, VMs run in slow TCG emulation mode -``` - -#### 2. Check VM architecture matches host - -```bash -# Host architecture -uname -m - -# VM architecture -virsh dumpxml | grep -i arch - -# ARM64 VMs on x86 hosts use TCG emulation (very slow) -``` - -#### 3. Increase IP discovery timeout for slow VMs - -```bash -# Set environment variable -export IP_DISCOVERY_TIMEOUT=5m - -# Or in config file -# vm: -# ip_discovery_timeout: 5m -``` - ---- - -### Debugging Checklist - -Use this checklist when sandboxes fail to get IPs: - -- [ ] Source VM exists and is defined in libvirt -- [ ] Source VM has network interface with MAC address -- [ ] Source VM (if running) has IP address -- [ ] Libvirt network is active (`virsh net-list`) -- [ ] DHCP is enabled on network (`virsh net-dumpxml default`) -- [ ] dnsmasq process is running -- [ ] Sandbox was created successfully -- [ ] Sandbox has unique MAC (different from source) -- [ ] Cloud-init ISO was created in sandbox directory -- [ ] Cloud-init ISO has unique instance-id -- [ ] Sandbox is in "running" state -- [ ] Sandbox network interface shows TX packets > 0 -- [ ] No duplicate MACs across VMs -- [ ] Sufficient host memory available -- [ ] Sufficient disk space in work directory - ---- - -### Getting Help - -If issues persist after following this guide: - -1. Collect diagnostic info: - ```bash - fluid validate > validation.json - virsh dumpxml > sandbox.xml - virsh net-dumpxml default > network.xml - cat /var/lib/libvirt/images/sandboxes//meta-data > meta-data.txt - cat /var/lib/libvirt/images/sandboxes//user-data > user-data.txt - ``` - -2. Check cloud-init logs from inside VM if accessible - -3. File an issue with the collected diagnostic files - ---- - -## References - -- [Cloud-Init NoCloud Data Source](https://cloudinit.readthedocs.io/en/latest/reference/datasources/nocloud.html) -- [Netplan Configuration](https://netplan.readthedocs.io/) -- [Libvirt Domain XML Format](https://libvirt.org/formatdomain.html) -- [QEMU Disk Images](https://qemu.readthedocs.io/en/latest/system/images.html) diff --git a/docs/future-plan.md b/docs/future-plan.md deleted file mode 100644 index 50366c4b..00000000 --- a/docs/future-plan.md +++ /dev/null @@ -1,55 +0,0 @@ -# Future Development Plan - -This document outlines proposed features and architectural improvements for the `virsh-sandbox` project. - -## Support for `cloud-init` User-Data on Sandbox Creation - -### Problem - -When cloning customer-provided VMs (e.g., CentOS, RHEL), the resulting sandbox often fails to acquire a network connection. This is because the base image's network configuration scripts (e.g., `/etc/sysconfig/network-scripts/ifcfg-eth0`) contain a hardcoded MAC address (`HWADDR`). Since a cloned VM gets a new, unique MAC address, the network configuration is not applied, and the interface is not brought up. - -As we cannot modify the customer's base image, we need a way to fix the network configuration of the sandbox *after* it has been cloned but *before* it is used. - -### Proposed Solution - -Enhance the `virsh-sandbox` API to accept `cloud-init` `user-data` during sandbox creation. This allows the agent creating the sandbox to provide a generic network configuration that will override the faulty one in the base image. - -This is the standard, "cloud native" way to handle per-instance customization and is more robust and portable than attempting to mount and modify the disk image manually. - -### Implementation Details - -1. **Modify API Endpoint:** The `POST /v1/sandboxes` endpoint will be updated to accept an optional `user_data` field in its JSON request body. - ```json - { - "source_vm_name": "centos-base-image", - "agent_id": "my-agent", - "user_data": "#cloud-config\nnetwork:\n version: 1\n..." - } - ``` - -2. **Update Service Layer:** The `vm.Service.CreateSandbox` function will be updated to accept the `user_data` string. - -3. **Enhance Libvirt Manager:** The `libvirt.Manager.CloneFromVM` function will receive the `user_data` and perform the following steps: - * If `user_data` is provided, create a temporary `cloud-init.iso` file containing the user-data and default meta-data. - * When defining the new cloned VM, add a CD-ROM device to the libvirt XML that points to this temporary ISO. - * Ensure the temporary ISO file is deleted after the VM has been successfully defined. - -### Example: Fixing a CentOS Clone - -An agent wanting to create a sandbox from a CentOS base image would provide the following `user_data` to correctly configure the primary network interface for DHCP: - -```yaml -#cloud-config - -# This user-data configures the network for a RHEL/CentOS based system. -# It will generate a new ifcfg-eth0 file without a HWADDR, ensuring -# the network is configured correctly on the cloned VM. - -network: - version: 1 - config: - - type: physical - name: eth0 - subnets: - - type: dhcp -``` diff --git a/docs/macos-libvirt-setup.md b/docs/macos-libvirt-setup.md deleted file mode 100644 index c740f2b2..00000000 --- a/docs/macos-libvirt-setup.md +++ /dev/null @@ -1,176 +0,0 @@ -# macOS Libvirt Setup Guide - -This guide explains how to set up libvirt on macOS for use with virsh-sandbox. - -## Overview - -The virsh-sandbox container connects to libvirt via SSH (`qemu+ssh://`). This approach: -- Works the same for local Mac development and production bare metal servers -- Requires no special libvirt configuration -- Is secure (SSH encrypted) - -## Prerequisites - -- macOS 11+ (Big Sur or later) -- Homebrew installed -- Docker Desktop installed - -## Installation - -### 1. Install libvirt and QEMU - -```bash -brew install libvirt qemu cdrtools -brew services start libvirt -``` - -### 2. Enable Remote Login (SSH) - -The container connects to your Mac via SSH. Enable it: - -1. Open **System Settings** -2. Go to **General > Sharing** -3. Enable **Remote Login** -4. Note your username (shown in the Remote Login panel) - -### 3. Create image directories - -```bash -sudo mkdir -p /var/lib/libvirt/images/base -sudo mkdir -p /var/lib/libvirt/images/jobs -sudo chmod 777 /var/lib/libvirt/images/base -sudo chmod 777 /var/lib/libvirt/images/jobs -``` - -### 4. Verify SSH works - -```bash -# Test SSH to localhost -ssh $(whoami)@localhost - -# Test libvirt over SSH -virsh -c qemu+ssh://$(whoami)@localhost/session list --all -``` - -## Configuration - -### Environment Setup - -Copy and edit the example environment file: - -```bash -cp .env.example .env -``` - -Edit `.env` and set your username: - -```env -LIBVIRT_URI=qemu+ssh://yourusername@host.docker.internal/session -``` - -### Docker Compose - -The `docker-compose.yml` mounts your SSH key for authentication: - -```yaml -volumes: - - ~/.ssh/id_ed25519:/root/.ssh/id_rsa:ro -``` - -If you use a different SSH key, update this path. - -## Creating a Test VM - -Use the provided script: - -```bash -./scripts/reset-libvirt-macos.sh -``` - -This script: -1. Deletes all existing VMs -2. Downloads Ubuntu cloud image (if needed) -3. Creates a test VM with cloud-init - -## Running the API - -```bash -# Start all services -docker-compose up -d - -# Check logs -docker-compose logs -f virsh-sandbox - -# Verify connection -curl http://localhost:8080/v1/health -curl http://localhost:8080/v1/sandboxes -``` - -## Troubleshooting - -### SSH connection refused - -1. Verify Remote Login is enabled in System Settings -2. Test SSH manually: `ssh yourusername@localhost` - -### Permission denied (publickey) - -The container needs your SSH key. Verify the volume mount in `docker-compose.yml`: - -```yaml -- ~/.ssh/id_ed25519:/root/.ssh/id_rsa:ro -``` - -### "Host key verification failed" - -The container needs to trust the host. Add to the container's known_hosts or use: - -```bash -# In the container -ssh-keyscan host.docker.internal >> /root/.ssh/known_hosts -``` - -### VM won't start - -Check libvirt is running: - -```bash -brew services list | grep libvirt -virsh -c qemu:///session list --all -``` - -## Quick Reference - -```bash -# Connection URI -export LIBVIRT_URI="qemu+ssh://$(whoami)@localhost/session" - -# List VMs -virsh -c $LIBVIRT_URI list --all - -# Start VM -virsh -c $LIBVIRT_URI start test-vm - -# Stop VM -virsh -c $LIBVIRT_URI destroy test-vm - -# VM console -virsh -c $LIBVIRT_URI console test-vm - -# Delete VM -virsh -c $LIBVIRT_URI undefine test-vm --nvram - -# Reset and recreate test VM -./scripts/reset-libvirt-macos.sh -``` - -## Production Setup - -For production with Foreman-managed bare metal servers: - -```env -# Point to your bare metal server -LIBVIRT_URI=qemu+ssh://virsh-user@baremetal-host.example.com/system -``` - -The same SSH-based approach works - just change the host in the URI. diff --git a/docs/plan-virtualbox.md b/docs/plan-virtualbox.md deleted file mode 100644 index 40dff91d..00000000 --- a/docs/plan-virtualbox.md +++ /dev/null @@ -1,93 +0,0 @@ -# VirtualBox Support Plan - -This plan details the steps to add VirtualBox support to `virsh-sandbox` alongside the existing KVM/libvirt implementation. - -## 1. Abstract Hypervisor Interface - -The current `VirshManager` is tightly coupled to KVM/libvirt commands and XML generation. We need to extract a generic `HypervisorManager` interface that both KVM and VirtualBox implementations will satisfy. - -**Goal:** Allow switching between `kvm` and `vbox` backends via configuration. - -### Steps: -1. **Refactor Interface:** - - Review `virsh-sandbox/internal/libvirt/virsh.go`'s `Manager` interface. It's already an interface, but the implementation (`VirshManager`) is specific. - - Move `Manager` interface to a shared package if necessary, or keep it in `libvirt` (renamed to `hypervisor` or `provider`) if it's generic enough. Currently it's in `internal/libvirt`, which implies KVM. - - Create a new package `internal/hypervisor` to define the common interface. - - Refactor `internal/libvirt` to implement this interface as `KVMManager` (or keep as `VirshManager`). - -2. **Define Interface Methods:** - - `CloneVM(ctx, baseImage, name, cpu, mem, network)` - - `StartVM(ctx, name)` - - `StopVM(ctx, name, force)` - - `DestroyVM(ctx, name)` - - `GetIPAddress(ctx, name)` - - `InjectSSHKey(ctx, name, user, key)` - - `CreateSnapshot(ctx, name, snapName)` - - `DiffSnapshot(ctx, name, from, to)` - *Note: might be tricky for VBox* - -## 2. Implement VirtualBox Manager - -Create a new implementation in `internal/virtualbox` that uses `VBoxManage` CLI commands. - -### Key Mappings: -- **CloneVM:** `VBoxManage clonevm --name --register` + `VBoxManage modifyvm` for specs. -- **StartVM:** `VBoxManage startvm --type headless` -- **StopVM:** `VBoxManage controlvm acpipowerbutton` (graceful) or `poweroff` (force). -- **DestroyVM:** `VBoxManage unregistervm --delete` -- **GetIPAddress:** - - *NAT Mode:* VBox doesn't easily expose guest IP. We might need `VBoxManage guestproperty get` (requires Guest Additions) or a port forwarding strategy + SSH check. - - *Bridged Mode:* ARP table lookup (similar to `socket_vmnet` logic). - - *Host-Only:* Parse `vboxnet` DHCP leases (if available). -- **InjectSSHKey:** - - *Cloud-Init:* Attach a config-drive ISO (similar to KVM implementation). - - *Guest Control:* `VBoxManage guestcontrol` (requires Guest Additions/credentials). *Cloud-init ISO is preferred for consistency.* - -## 3. Disk Image Management - -VirtualBox prefers VDI or VMDK. QCOW2 is supported but might be slower or read-only in some contexts. - -- **Base Images:** Users should provide VDI base images for VirtualBox. -- **Overlay/Clones:** `VBoxManage snapshot` or "Linked Clones" use differential disks naturally. -- **Conversion:** We might need `qemu-img convert` to create VDIs from QCOW2s if we want to share base images (complex, maybe out of scope for V1). *Decision: Assume native VDI base images for VBox mode.* - -## 4. Configuration & Factory - -Update `cmd/api/main.go` and `Config` to support selecting the hypervisor. - -- **Env Vars:** - - `HYPERVISOR`: `kvm` (default) or `vbox`. - - `VBOX_MANAGE_PATH`: Path to binary (default: lookup in PATH). - - `BASE_IMAGE_DIR`: Should point to a dir with VDIs for VBox. - -- **Factory Logic:** - - If `HYPERVISOR=vbox`, instantiate `VirtualBoxManager`. - - Else, instantiate `VirshManager`. - -## 5. Networking - -VirtualBox networking differs from Libvirt. - -- **Default:** NAT is easiest but isolates the VM. -- **Host-Only:** Good for local comms, requires setting up a `vboxnet0` adapter. -- **Bridged:** Good for LAN access, but requires specifying a physical interface. - -*Strategy:* Default to **NAT** with **Port Forwarding** for SSH (host random port -> guest 22). Or use **Host-Only** networking if we want direct IP access like KVM. -*Recommendation:* **Host-Only** is closest to the `virsh` "default" network experience (VM gets an IP reachable by host). - -## 6. Snapshot & Diff (Advanced) - -- **CreateSnapshot:** `VBoxManage snapshot take ` -- **Diff:** `VBoxManage clonehd` or mounting VDIs. - - *Challenge:* Mounting VDI on host requires `nbd` + `qemu-nbd` (works with VDI!) or `vbox-img`. - - We can likely reuse the `qemu-nbd` logic if we compile QEMU with VDI support (standard). - -## 7. Execution Plan - -1. **Refactor:** Extract `Manager` interface to `internal/hypervisor/manager.go`. -2. **Scaffold:** Create `internal/virtualbox/manager.go` struct. -3. **Implement Basic Lifecycle:** `Start`, `Stop`, `Destroy`. -4. **Implement Cloning:** `CloneVM` using `VBoxManage`. -5. **Implement Networking/IP:** Decide on Host-Only vs NAT. Implement IP retrieval. -6. **Wire Up:** Update `main.go` to use the new flags/env vars. -7. **Test:** Verify with a simple Alpine VDI. - diff --git a/docs/plan.md b/docs/plan.md deleted file mode 100644 index f78e5ed1..00000000 --- a/docs/plan.md +++ /dev/null @@ -1,16 +0,0 @@ -Local Mac development: - - Run Go API directly on Mac (not in Docker) - - Connect via local socket: qemu:///session - - No SSH needed, no network config, simplest setup - - Production: - - Run Go API anywhere (container, VM, bare metal) - - Connect via SSH: qemu+ssh://user@libvirt-host/system - - No libvirtd config changes needed on production machines - - Just needs SSH access with appropriate permissions - - The SSH approach in production is the standard way to manage remote libvirt - it's what tools like virt-manager use. Your Foreman-managed servers already have libvirt running; you just need SSH credentials to reach them. - - For multi-host support, you'd either: - 1. Run one API instance per libvirt host (simplest) - 2. Modify the API to accept a host parameter and maintain connections to multiple libvirt instances (more complex but single endpoint) diff --git a/docs/publishing-plan.md b/docs/publishing-plan.md deleted file mode 100644 index 8688ca6e..00000000 --- a/docs/publishing-plan.md +++ /dev/null @@ -1,65 +0,0 @@ -# Publishing Plan for Virsh Sandbox - -This document outlines the strategy to package and publish `virsh-sandbox` for major Linux distributions (Debian/Ubuntu, RHEL/Fedora) and Snap. - -## 1. Challenge: CGO and Libvirt - -The project uses `libvirt.org/go/libvirt`, which links against the C `libvirt` library. This means: -1. **CGO is Required:** `CGO_ENABLED=1` must be set during the build. -2. **Dependencies:** The build environment must have `libvirt-dev` (Debian/Ubuntu) or `libvirt-devel` (RHEL) headers installed. -3. **Cross-Compilation is Hard:** Building for `linux/arm64` on a `linux/amd64` machine requires a C cross-compiler (`gcc-aarch64-linux-gnu`) and the target architecture's libvirt libraries. - -## 2. Solution: GoReleaser with Docker - -We will use **GoReleaser**, the standard release automation tool for Go. To handle the CGO/Cross-compilation complexity, we will utilize GoReleaser's Docker-based build feature or a custom build image in CI. - -### Recommended Toolchain -* **GoReleaser:** Automates building binaries, creating packages (deb/rpm/snap), and publishing releases. -* **NFPM:** (Included in GoReleaser) Handles creating `.deb` and `.rpm` packages without needing `dpkg` or `rpmbuild` present. -* **Snapcraft:** For creating Snap packages. - -## 3. Configuration Steps - -### A. Create `goreleaser.yaml` -This file will be placed in the project root. It defines: -- **Builds:** How to compile the binary (Env vars, flags, targets). -- **Archives:** How to zip the binary for GitHub Releases (tar.gz). -- **NFPM:** Configuration for `.deb` and `.rpm` metadata (maintainer, description, dependencies). -- **Snap:** Configuration for Snap packages. - -### B. Build Environment (GitHub Actions) -Since we need `libvirt-dev`, the GitHub Actions workflow will need to install these dependencies before running GoReleaser. -* For **native builds** (amd64 on amd64), we simply `sudo apt-get install libvirt-dev`. -* For **cross-builds** (arm64), we can use `zig` as a C compiler (GoReleaser supports this) OR use a Docker container with cross-compilers pre-installed. Given the library dependency, **Zig** is often the easiest modern solution if it supports the specific C headers, otherwise a Docker build strategy is safer. - -## 4. Hosting Repositories - -Building the `.deb` and `.rpm` files is only half the battle. Users expect to run `apt-get install` or `yum install`. This requires a **Package Repository**. - -### Option 1: Cloudsmith (Recommended) -* **Pros:** Fully managed, free for Open Source, supports Apt, Yum, Maven, Docker, etc. in one place. -* **Setup:** Create an account, get an API key, and configure GoReleaser to push artifacts directly to Cloudsmith. -* **User Exp:** `curl -1sLf 'https://dl.cloudsmith.io/.../setup.deb.sh' | sudo bash` - -### Option 2: Gemfury -* **Pros:** Simple, supports Apt/Yum. -* **Cons:** Free tier has limits. - -### Option 3: GitHub Releases (Manual) -* **Pros:** Free, built-in. -* **Cons:** Users must manually download `.deb`/`.rpm` and install with `dpkg -i` / `rpm -i`. No automatic updates via `apt upgrade`. - -## 5. Implementation Roadmap - -1. **Install GoReleaser** locally to test config. -2. **Create `goreleaser.yaml`** (I will draft this for you). -3. **Update GitHub Action** (`.github/workflows/release.yml`) to trigger on tag creation. -4. **Verify Cross-Compilation:** Check if we can build for ARM64 using standard runners + libraries, or if we need to restrict to AMD64 for the first iteration. - -### Prerequisite Checks -Before automating, we should manually verify we can build the binary with `CGO_ENABLED=1`. - -```bash -# Verify local build works -go build -v -o virsh-sandbox-api ./cmd/api -``` diff --git a/docs/request-timeouts.md b/docs/request-timeouts.md deleted file mode 100644 index 7b939dc0..00000000 --- a/docs/request-timeouts.md +++ /dev/null @@ -1,107 +0,0 @@ -# Request Timeouts - -When creating sandboxes with `wait_for_ip=True`, the request may take longer than default HTTP timeouts allow. This document explains how to configure timeouts on both the server and client side. - -## Server Configuration - -The virsh-sandbox server's HTTP write timeout is automatically calculated based on the IP discovery timeout: - -```bash -# IP discovery timeout (default: 120 seconds) -export IP_DISCOVERY_TIMEOUT_SEC=120 - -# HTTP write timeout = IP_DISCOVERY_TIMEOUT_SEC + 30 seconds -# With default settings: 150 seconds -``` - -For slower VM boot times (e.g., cloud-init heavy images), increase the IP discovery timeout: - -```bash -# 5 minutes for IP discovery, 5.5 minutes HTTP timeout -export IP_DISCOVERY_TIMEOUT_SEC=300 -``` - -## SDK Configuration - -The Python SDK accepts a `request_timeout` parameter on methods that may take a long time: - -### create_sandbox - -```python -from virsh_sandbox import Client - -client = Client(host="http://localhost:8080") - -# Single timeout value (total request timeout) -sandbox = client.sandbox.create_sandbox( - source_vm_name="base-vm", - wait_for_ip=True, - request_timeout=180.0, # 180 seconds -) - -# Tuple for (connect_timeout, read_timeout) -sandbox = client.sandbox.create_sandbox( - source_vm_name="base-vm", - wait_for_ip=True, - request_timeout=(5.0, 180.0), # 5s connect, 180s read -) -``` - -### start_sandbox - -```python -# Start an existing sandbox and wait for IP -result = client.sandbox.start_sandbox( - id=sandbox_id, - wait_for_ip=True, - request_timeout=180.0, -) -``` - -### run_command - -```python -# Long-running commands may need extended timeouts -result = client.sandbox.run_command( - id=sandbox_id, - command="apt-get update && apt-get upgrade -y", - timeout_sec=600, # Server-side command timeout - request_timeout=660.0, # Client HTTP timeout (command timeout + buffer) -) -``` - -## Recommended Values - -| Operation | Recommended `request_timeout` | -|-----------|------------------------------| -| `create_sandbox` with `wait_for_ip=False` | 30s (default) | -| `create_sandbox` with `wait_for_ip=True` | 180s | -| `start_sandbox` with `wait_for_ip=True` | 180s | -| `run_command` | `timeout_sec` + 60s buffer | - -## Alternative: Async IP Discovery - -Instead of blocking on `wait_for_ip=True`, you can poll for the IP address: - -```python -import time - -# Create without waiting -sandbox = client.sandbox.create_sandbox( - source_vm_name="base-vm", - auto_start=True, - wait_for_ip=False, -) - -sandbox_id = sandbox.sandbox.id - -# Poll for IP -while True: - result = client.sandbox.discover_ip(sandbox_id) - if result.ip_address: - print(f"IP: {result.ip_address}") - break - time.sleep(5) -``` - -This approach avoids long HTTP request timeouts and provides better visibility into the sandbox startup process. diff --git a/fluid/.gitignore b/fluid-cli/.gitignore similarity index 98% rename from fluid/.gitignore rename to fluid-cli/.gitignore index e979a548..d81c222f 100755 --- a/fluid/.gitignore +++ b/fluid-cli/.gitignore @@ -29,6 +29,7 @@ go.work.sum .env.lima .ssh-ca/ bin/ +fluid-cli .ansible/ config.yaml config.yml diff --git a/fluid-cli/AGENTS.md b/fluid-cli/AGENTS.md new file mode 100644 index 00000000..c94a802a --- /dev/null +++ b/fluid-cli/AGENTS.md @@ -0,0 +1,170 @@ +# Fluid CLI - Development Guide + +The interactive TUI agent and MCP server for fluid.sh. Connects to fluid-daemon over gRPC to manage VM sandboxes. + +## Architecture + +``` +User + | + v +fluid CLI (TUI / MCP) + | + v (gRPC :9091) +fluid-daemon + | + v +libvirt/KVM +``` + +## Quick Start + +```bash +# Build the CLI +make build + +# Launch the TUI +./bin/fluid + +# Start MCP server on stdio +./bin/fluid mcp +``` + +## TUI Slash Commands + +| Command | Description | +|---------|-------------| +| `/vms` | List available VMs for cloning | +| `/sandboxes` | List active sandboxes | +| `/hosts` | List configured remote hosts | +| `/playbooks` | List generated Ansible playbooks | +| `/compact` | Summarize and compact conversation history | +| `/context` | Show current context token usage | +| `/settings` | Open configuration settings | +| `/clear` | Clear conversation history | +| `/help` | Show available commands | + +## TUI Keyboard Shortcuts + +| Key | Action | +|-----|--------| +| `Enter` | Send message | +| `Shift+Tab` | Toggle edit / read-only mode | +| `PgUp/PgDn` | Scroll conversation history | +| `Ctrl+R` | Reset conversation | +| `Ctrl+C` | Quit | + +## MCP Tools + +17 tools exposed via `fluid mcp`: + +| Tool | Parameters | Description | +|------|-----------|-------------| +| `list_sandboxes` | (none) | List all sandboxes with state and IPs | +| `create_sandbox` | `source_vm` (required), `cpu`, `memory_mb` | Create a sandbox by cloning a source VM | +| `destroy_sandbox` | `sandbox_id` (required) | Destroy a sandbox and remove storage | +| `run_command` | `sandbox_id` (required), `command` (required), `timeout_seconds` | Execute a shell command via SSH | +| `start_sandbox` | `sandbox_id` (required) | Start a stopped sandbox | +| `stop_sandbox` | `sandbox_id` (required) | Stop a running sandbox | +| `get_sandbox` | `sandbox_id` (required) | Get detailed sandbox info | +| `list_vms` | (none) | List available VMs for cloning | +| `create_snapshot` | `sandbox_id` (required), `name` | Snapshot current sandbox state | +| `create_playbook` | `name` (required), `hosts`, `become` | Create an Ansible playbook | +| `add_playbook_task` | `playbook_id` (required), `name` (required), `module` (required), `params` | Add a task to a playbook | +| `edit_file` | `sandbox_id` (required), `path` (required), `new_str` (required), `old_str`, `replace_all` | Edit or create a file in a sandbox | +| `read_file` | `sandbox_id` (required), `path` (required) | Read a file from a sandbox | +| `list_playbooks` | (none) | List all created playbooks | +| `get_playbook` | `playbook_id` (required) | Get playbook definition and YAML | +| `run_source_command` | `source_vm` (required), `command` (required), `timeout_seconds` | Run read-only command on a source VM | +| `read_source_file` | `source_vm` (required), `path` (required) | Read a file from a source VM | + +## Configuration + +Default config location: `~/.fluid/config.yaml` + +```yaml +libvirt: + uri: qemu:///system + network: default + base_image_dir: /var/lib/libvirt/images/base + work_dir: /var/lib/libvirt/images/sandboxes + ssh_key_inject_method: virt-customize + +vm: + default_vcpus: 2 + default_memory_mb: 2048 + command_timeout: 5m + ip_discovery_timeout: 2m + +ssh: + proxy_jump: "" + default_user: sandbox +``` + +## Development + +### Prerequisites + +- Go 1.24+ +- libvirt/KVM installed and running + +### Build + +```bash +make build # Build bin/fluid +make build-dev # Build with telemetry key +make clean # Clean build artifacts +``` + +### Testing + +```bash +make test # Run all tests +make test-coverage # Tests with coverage report +``` + +### Code Quality + +```bash +make fmt # Format with gofumpt +make vet # Run go vet +make lint # Run golangci-lint +make check # Run all checks (fmt, vet, lint) +``` + +### Dependencies + +```bash +make deps # Download dependencies +make tidy # Tidy and verify +make install-tools # Install gofumpt, golangci-lint, swag +``` + +## Makefile Targets + +| Target | Description | +|--------|-------------| +| `all` | Run fmt, vet, test, and build (default) | +| `build` | Build the fluid CLI binary | +| `build-dev` | Build with PostHog telemetry key | +| `run` | Build and run the CLI | +| `clean` | Clean build artifacts | +| `fmt` | Format code with gofumpt | +| `lint` | Run golangci-lint | +| `vet` | Run go vet | +| `test` | Run tests | +| `test-coverage` | Run tests with coverage | +| `check` | Run all code quality checks | +| `deps` | Download dependencies | +| `tidy` | Tidy and verify dependencies | +| `install` | Install fluid to GOPATH/bin | +| `install-tools` | Install dev tools | + +## Data Storage + +State is stored in SQLite at `~/.fluid/state.db`: +- Sandboxes, Snapshots, Commands, Diffs + +The database is auto-migrated on first run. + +If you remove a parameter from a function, don't just pass in nil/null/empty string in a different layer, make sure to remove the extra parameter from every place. diff --git a/fluid-remote/CLAUDE.md b/fluid-cli/CLAUDE.md similarity index 100% rename from fluid-remote/CLAUDE.md rename to fluid-cli/CLAUDE.md diff --git a/fluid/Makefile b/fluid-cli/Makefile similarity index 81% rename from fluid/Makefile rename to fluid-cli/Makefile index ecc69ac0..b79875a2 100644 --- a/fluid/Makefile +++ b/fluid-cli/Makefile @@ -1,6 +1,6 @@ # Fluid CLI Makefile -.PHONY: all build build-dev build-cli build-api run clean fmt vet test test-coverage check deps tidy help +.PHONY: all build build-dev run clean fmt vet test test-coverage check deps tidy help # Go parameters GOCMD=go @@ -12,13 +12,18 @@ GOMOD=$(GOCMD) mod # Binary names and paths CLI_BINARY=fluid -CLI_PATH=./cmd/fluid +CLI_PATH=./cmd/fluid-cli + +# Version injection +VERSION ?= dev +COMMIT ?= $(shell git rev-parse --short HEAD 2>/dev/null || echo "none") +DATE ?= $(shell date -u +%Y-%m-%dT%H:%M:%SZ) # PostHog key (empty by default for dev builds) POSTHOG_KEY ?= # Build flags -LDFLAGS=-s -w +LDFLAGS=-s -w -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.date=$(DATE) ifneq ($(POSTHOG_KEY),) LDFLAGS += -X github.com/aspectrr/fluid.sh/fluid/internal/telemetry.posthogAPIKey=$(POSTHOG_KEY) endif @@ -31,7 +36,7 @@ all: fmt vet test build ## Build targets -build: +build: ## Build the fluid CLI binary @echo "Building fluid CLI..." @rm -f bin/$(CLI_BINARY) @mkdir -p bin @@ -41,7 +46,7 @@ build-dev: POSTHOG_KEY=phc_QR3I1IKrEOqx5jIfJkBMfyznynIxRYd8kzmZM9o9fRZ build-dev: build ## Build with PostHog key -run: build-cli ## Build and run the CLI +run: build ## Build and run the CLI ./bin/$(CLI_BINARY) clean: ## Clean build artifacts @@ -58,7 +63,7 @@ fmt: ## Format code with gofumpt lint: ## Run golangci-lint @echo "Running golangci-lint..." - golangci-lint run --build-tags=$(TAGS) ./... + golangci-lint run --allow-parallel-runners --build-tags=$(TAGS) ./... vet: ## Run go vet @echo "Running go vet..." @@ -94,7 +99,7 @@ tidy: ## Tidy and verify dependencies ## Installation -install: build-cli ## Install fluid CLI to GOPATH/bin +install: build ## Install fluid CLI to GOPATH/bin @echo "Installing fluid to GOPATH/bin..." @cp bin/$(CLI_BINARY) $(GOPATH)/bin/$(CLI_BINARY) diff --git a/fluid/README.md b/fluid-cli/README.md similarity index 100% rename from fluid/README.md rename to fluid-cli/README.md diff --git a/fluid-cli/cmd/fluid-cli/main.go b/fluid-cli/cmd/fluid-cli/main.go new file mode 100644 index 00000000..b2555023 --- /dev/null +++ b/fluid-cli/cmd/fluid-cli/main.go @@ -0,0 +1,286 @@ +package main + +import ( + "context" + "fmt" + "io" + "log/slog" + "os" + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/aspectrr/fluid.sh/fluid/internal/config" + "github.com/aspectrr/fluid.sh/fluid/internal/doctor" + "github.com/aspectrr/fluid.sh/fluid/internal/hostexec" + fluidmcp "github.com/aspectrr/fluid.sh/fluid/internal/mcp" + "github.com/aspectrr/fluid.sh/fluid/internal/sandbox" + "github.com/aspectrr/fluid.sh/fluid/internal/store" + "github.com/aspectrr/fluid.sh/fluid/internal/store/sqlite" + "github.com/aspectrr/fluid.sh/fluid/internal/telemetry" + "github.com/aspectrr/fluid.sh/fluid/internal/tui" + "github.com/aspectrr/fluid.sh/fluid/internal/updater" +) + +var ( + version = "dev" + commit = "none" + date = "unknown" +) + +var ( + cfgFile string + cfg *config.Config +) + +func main() { + // Set TUI version from ldflags + tui.Version = version + + if err := rootCmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) + os.Exit(1) + } +} + +var rootCmd = &cobra.Command{ + Use: "fluid", + Short: "Fluid - Make Infrastructure Safe for AI", + Long: "Fluid is a terminal agent that AI manage infrastructure via sandboxed resources, audit trails and human approval.", + // Default to TUI when no subcommand is provided + RunE: func(cmd *cobra.Command, args []string) error { + if v, _ := cmd.Flags().GetBool("version"); v { + short := commit + if len(short) > 7 { + short = short[:7] + } + fmt.Printf("fluid %s (%s, %s)\n", version, short, date) + return nil + } + return runTUI() + }, +} + +var mcpCmd = &cobra.Command{ + Use: "mcp", + Short: "Start MCP server on stdio", + Long: "Start an MCP (Model Context Protocol) server that exposes fluid tools over stdio for use with Claude Code, Cursor, and other MCP clients.", + RunE: func(cmd *cobra.Command, args []string) error { + return runMCP() + }, +} + +var doctorCmd = &cobra.Command{ + Use: "doctor", + Short: "Check daemon setup on a host", + Long: "Validate that the fluid-daemon is properly installed and configured on a sandbox host.", + RunE: func(cmd *cobra.Command, args []string) error { + hostName, _ := cmd.Flags().GetString("host") + + configPath := cfgFile + if configPath == "" { + home, _ := os.UserHomeDir() + configPath = filepath.Join(home, ".fluid", "config.yaml") + } + + loadedCfg, err := config.Load(configPath) + if err != nil { + return fmt.Errorf("load config: %w", err) + } + + ctx := context.Background() + var run hostexec.RunFunc + + if hostName == "" || hostName == "localhost" { + run = hostexec.NewLocal() + } else { + // Find host in config + var found bool + for _, h := range loadedCfg.Hosts { + if h.Name == hostName { + user := h.SSHUser + if user == "" { + user = "root" + } + port := h.SSHPort + if port == 0 { + port = 22 + } + run = hostexec.NewSSH(h.Address, user, port) + found = true + break + } + } + if !found { + return fmt.Errorf("host %q not found in config", hostName) + } + } + + useColor := os.Getenv("NO_COLOR") == "" + fmt.Println() + fmt.Println(" Checking daemon health...") + fmt.Println() + + results := doctor.RunAll(ctx, run) + allPassed := doctor.PrintResults(results, os.Stdout, useColor) + fmt.Println() + + if !allPassed { + os.Exit(1) + } + return nil + }, +} + +var updateCmd = &cobra.Command{ + Use: "update", + Aliases: []string{"upgrade"}, + Short: "Update fluid to the latest version", + RunE: func(cmd *cobra.Command, args []string) error { + latest, url, needsUpdate, err := updater.CheckLatest(version) + if err != nil { + return fmt.Errorf("check for updates: %w", err) + } + if !needsUpdate { + fmt.Printf("Already up to date (%s)\n", version) + return nil + } + fmt.Printf("Updating %s -> %s...\n", version, latest) + if err := updater.Update(url); err != nil { + return fmt.Errorf("update failed: %w", err) + } + fmt.Printf("Updated to %s\n", latest) + return nil + }, +} + +func init() { + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default ~/.fluid/config.yaml)") + rootCmd.Flags().BoolP("version", "v", false, "print version") + doctorCmd.Flags().String("host", "", "host name from config (default: localhost)") + rootCmd.AddCommand(mcpCmd) + rootCmd.AddCommand(updateCmd) + rootCmd.AddCommand(doctorCmd) +} + +// runMCP launches the MCP server on stdio +func runMCP() error { + configPath := cfgFile + if configPath == "" { + home, _ := os.UserHomeDir() + configPath = filepath.Join(home, ".fluid", "config.yaml") + } + + var err error + cfg, err = tui.EnsureConfigExists(configPath) + if err != nil { + return fmt.Errorf("ensure config: %w", err) + } + + // Log to file - stdout is the MCP transport + logPath := filepath.Join(filepath.Dir(configPath), "fluid-mcp.log") + logFile, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o600) + if err != nil { + logFile = nil + } + var logger *slog.Logger + if logFile != nil { + defer func() { _ = logFile.Close() }() + logger = slog.New(slog.NewTextHandler(logFile, &slog.HandlerOptions{Level: slog.LevelDebug})) + } else { + logger = slog.New(slog.NewTextHandler(io.Discard, nil)) + } + + svc, st, tele, err := initServicesForMCPTUI(cfg, logger) + if err != nil { + return fmt.Errorf("init services: %w", err) + } + defer func() { _ = svc.Close() }() + defer func() { _ = st.Close() }() + + srv := fluidmcp.NewServer(cfg, st, svc, tele, logger) + return srv.Serve() +} + +// runTUI launches the interactive TUI +func runTUI() error { + configPath := cfgFile + if configPath == "" { + home, _ := os.UserHomeDir() + configPath = filepath.Join(home, ".fluid", "config.yaml") + } + + var err error + cfg, err = tui.EnsureConfigExists(configPath) + if err != nil { + return fmt.Errorf("ensure config: %w", err) + } + + // Check if onboarding is needed (first run) + if !cfg.OnboardingComplete { + updatedCfg, err := tui.RunOnboarding(cfg, configPath) + if err != nil { + return fmt.Errorf("onboarding: %w", err) + } + cfg = updatedCfg + cfg.OnboardingComplete = true + if err := cfg.Save(configPath); err != nil { + fmt.Fprintf(os.Stderr, "Warning: could not save onboarding status: %v\n", err) + } + } + + // Log to file to avoid corrupting the TUI + logPath := filepath.Join(filepath.Dir(configPath), "fluid.log") + logFile, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o600) + if err != nil { + fmt.Fprintf(os.Stderr, "Warning: could not open log file %s: %v\n", logPath, err) + logFile = nil + } + var fileLogger *slog.Logger + if logFile != nil { + defer func() { _ = logFile.Close() }() + fileLogger = slog.New(slog.NewTextHandler(logFile, &slog.HandlerOptions{Level: slog.LevelDebug})) + } else { + fileLogger = slog.New(slog.NewTextHandler(io.Discard, nil)) + } + + svc, st, tele, err := initServicesForMCPTUI(cfg, fileLogger) + if err != nil { + return fmt.Errorf("init services: %w", err) + } + defer func() { _ = svc.Close() }() + defer func() { _ = st.Close() }() + + agent := tui.NewFluidAgent(cfg, st, svc, tele, fileLogger) + + model := tui.NewModel("fluid", "local", "vm-agent", agent, cfg, configPath) + return tui.Run(model) +} + +// initServicesForMCPTUI creates sandbox.Service, store, and telemetry for MCP/TUI modes. +func initServicesForMCPTUI(loadedCfg *config.Config, logger *slog.Logger) (sandbox.Service, store.Store, telemetry.Service, error) { + ctx := context.Background() + st, err := sqlite.New(ctx, store.Config{AutoMigrate: true}) + if err != nil { + return nil, nil, nil, fmt.Errorf("open store: %w", err) + } + + tele, err := telemetry.NewService(loadedCfg.Telemetry) + if err != nil { + tele = telemetry.NewNoopService() + } + + daemonAddr := loadedCfg.ControlPlane.DaemonAddress + if daemonAddr == "" { + daemonAddr = "localhost:9091" + } + + svc, err := sandbox.NewRemoteService(daemonAddr, loadedCfg.ControlPlane) + if err != nil { + _ = st.Close() + tele.Close() + return nil, nil, nil, fmt.Errorf("connect to daemon at %s: %w", daemonAddr, err) + } + + return svc, st, tele, nil +} diff --git a/fluid/go.mod b/fluid-cli/go.mod similarity index 87% rename from fluid/go.mod rename to fluid-cli/go.mod index 0bc91910..d42eaf0c 100644 --- a/fluid/go.mod +++ b/fluid-cli/go.mod @@ -6,7 +6,7 @@ toolchain go1.24.4 require ( github.com/alecthomas/chroma/v2 v2.14.0 - github.com/beevik/etree v1.4.0 + github.com/aspectrr/fluid.sh/proto/gen/go v0.0.0-00010101000000-000000000000 github.com/charmbracelet/bubbles v0.21.0 github.com/charmbracelet/bubbletea v1.3.10 github.com/charmbracelet/glamour v0.10.0 @@ -17,11 +17,13 @@ require ( github.com/posthog/posthog-go v1.9.0 github.com/spf13/cobra v1.10.2 github.com/stretchr/testify v1.11.1 + google.golang.org/grpc v1.79.1 gopkg.in/yaml.v3 v3.0.1 gorm.io/gorm v1.30.0 - libvirt.org/go/libvirt v1.11010.0 ) +replace github.com/aspectrr/fluid.sh/proto/gen/go => ../proto/gen/go + require ( github.com/atotto/clipboard v0.1.4 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect @@ -39,7 +41,6 @@ require ( github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect github.com/glebarez/go-sqlite v1.22.0 // indirect github.com/goccy/go-json v0.10.5 // indirect - github.com/google/go-cmp v0.6.0 // indirect github.com/gorilla/css v1.0.1 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -68,12 +69,12 @@ require ( github.com/yosida95/uritemplate/v3 v3.0.2 // indirect github.com/yuin/goldmark v1.7.8 // indirect github.com/yuin/goldmark-emoji v1.0.5 // indirect - golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.36.0 // indirect - golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect - golang.org/x/tools v0.26.0 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/term v0.38.0 // indirect + golang.org/x/text v0.32.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/protobuf v1.36.11 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect modernc.org/libc v1.55.3 // indirect modernc.org/mathutil v1.6.0 // indirect diff --git a/fluid/go.sum b/fluid-cli/go.sum similarity index 80% rename from fluid/go.sum rename to fluid-cli/go.sum index aec79855..83800411 100644 --- a/fluid/go.sum +++ b/fluid-cli/go.sum @@ -16,10 +16,10 @@ github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuP github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= -github.com/beevik/etree v1.4.0 h1:oz1UedHRepuY3p4N5OjE0nK1WLCqtzHf25bxplKOHLs= -github.com/beevik/etree v1.4.0/go.mod h1:cyWiXwGoasx60gHvtnEh5x8+uIjUVnjWqBvEnhnqKDA= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs= github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg= github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw= @@ -55,10 +55,16 @@ github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw= github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo= github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -142,25 +148,45 @@ github.com/yuin/goldmark v1.7.8 h1:iERMLn0/QJeHFhxSt3p6PeN9mGnvIKSpG9YYorDMnic= github.com/yuin/goldmark v1.7.8/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= github.com/yuin/goldmark-emoji v1.0.5 h1:EMVWyCGPlXJfUXBXpuMu+ii3TIaxbVBnEX9uaDC4cIk= github.com/yuin/goldmark-emoji v1.0.5/go.mod h1:tTkZEbwu5wkPmgTcitqddVxY9osFZiavD+r4AzQrh1U= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E= golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= +google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -168,8 +194,6 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs= gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE= -libvirt.org/go/libvirt v1.11010.0 h1:1EIh2x6qcRoIBBOvrgN62vq5FIpgUBrmGadprQ/4M0Y= -libvirt.org/go/libvirt v1.11010.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ= modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ= modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ= modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y= diff --git a/fluid/internal/ansible/playbook.go b/fluid-cli/internal/ansible/playbook.go similarity index 100% rename from fluid/internal/ansible/playbook.go rename to fluid-cli/internal/ansible/playbook.go diff --git a/fluid/internal/ansible/playbook_test.go b/fluid-cli/internal/ansible/playbook_test.go similarity index 100% rename from fluid/internal/ansible/playbook_test.go rename to fluid-cli/internal/ansible/playbook_test.go diff --git a/fluid/internal/config/config.go b/fluid-cli/internal/config/config.go similarity index 88% rename from fluid/internal/config/config.go rename to fluid-cli/internal/config/config.go index 884d5346..8dbc7610 100644 --- a/fluid/internal/config/config.go +++ b/fluid-cli/internal/config/config.go @@ -12,17 +12,35 @@ import ( // Config is the root configuration for virsh-sandbox API. type Config struct { - Provider string `yaml:"provider"` // "libvirt" (default) or "proxmox" - Libvirt LibvirtConfig `yaml:"libvirt"` - Proxmox ProxmoxConfig `yaml:"proxmox"` - VM VMConfig `yaml:"vm"` - SSH SSHConfig `yaml:"ssh"` - Ansible AnsibleConfig `yaml:"ansible"` - Logging LoggingConfig `yaml:"logging"` - Telemetry TelemetryConfig `yaml:"telemetry"` - AIAgent AIAgentConfig `yaml:"ai_agent"` - Hosts []HostConfig `yaml:"hosts"` // Remote hosts for multi-host VM management - OnboardingComplete bool `yaml:"onboarding_complete"` // Whether onboarding wizard has been completed + Provider string `yaml:"provider"` // "libvirt" (default), "proxmox", or "control-plane" + Libvirt LibvirtConfig `yaml:"libvirt"` + Proxmox ProxmoxConfig `yaml:"proxmox"` + ControlPlane ControlPlaneConfig `yaml:"control_plane"` + VM VMConfig `yaml:"vm"` + SSH SSHConfig `yaml:"ssh"` + Ansible AnsibleConfig `yaml:"ansible"` + Logging LoggingConfig `yaml:"logging"` + Telemetry TelemetryConfig `yaml:"telemetry"` + AIAgent AIAgentConfig `yaml:"ai_agent"` + Hosts []HostConfig `yaml:"hosts"` // Remote hosts for multi-host VM management + OnboardingComplete bool `yaml:"onboarding_complete"` // Whether onboarding wizard has been completed +} + +// ControlPlaneConfig configures the connection to the hosted control plane. +type ControlPlaneConfig struct { + // Address is the control plane REST API endpoint (e.g., "http://localhost:8080"). + Address string `yaml:"address"` + + // DaemonAddress is the gRPC endpoint for direct daemon access (e.g., "localhost:9091"). + // When set, the CLI calls the daemon directly instead of using local providers. + DaemonAddress string `yaml:"daemon_address"` + + // DaemonInsecure skips TLS verification for the daemon gRPC connection. + // Defaults to true for backward compatibility. + DaemonInsecure bool `yaml:"daemon_insecure"` + + // DaemonCAFile is the path to a CA certificate for verifying the daemon's TLS cert. + DaemonCAFile string `yaml:"daemon_ca_file"` } // ProxmoxConfig holds Proxmox VE API settings. @@ -122,6 +140,9 @@ func DefaultConfig() *Config { return &Config{ Provider: "libvirt", + ControlPlane: ControlPlaneConfig{ + DaemonInsecure: true, + }, Proxmox: ProxmoxConfig{ VerifySSL: true, CloneMode: "full", @@ -129,7 +150,7 @@ func DefaultConfig() *Config { VMIDEnd: 9999, }, Telemetry: TelemetryConfig{ - EnableAnonymousUsage: true, + EnableAnonymousUsage: false, }, Libvirt: LibvirtConfig{ URI: "qemu:///system", diff --git a/fluid/internal/config/config_test.go b/fluid-cli/internal/config/config_test.go similarity index 100% rename from fluid/internal/config/config_test.go rename to fluid-cli/internal/config/config_test.go diff --git a/fluid-cli/internal/doctor/checks.go b/fluid-cli/internal/doctor/checks.go new file mode 100644 index 00000000..5a7420e1 --- /dev/null +++ b/fluid-cli/internal/doctor/checks.go @@ -0,0 +1,211 @@ +package doctor + +import ( + "context" + "fmt" + "runtime" + "strings" + + "github.com/aspectrr/fluid.sh/fluid/internal/hostexec" +) + +// qemuBinaryName returns the correct QEMU binary name for the current architecture. +func qemuBinaryName() string { + switch runtime.GOARCH { + case "arm64": + return "qemu-system-aarch64" + default: + return "qemu-system-x86_64" + } +} + +type check struct { + name string + fn func(ctx context.Context, run hostexec.RunFunc) CheckResult +} + +func allChecks() []check { + return []check{ + {"daemon-binary", checkDaemonBinary}, + {"grpc-port", checkGRPCPort}, + {"systemd-active", checkSystemdActive}, + {"systemd-enabled", checkSystemdEnabled}, + {"libvirt-running", checkLibvirtRunning}, + {"kvm-available", checkKVMAvailable}, + {"qemu-binary", checkQEMUBinary}, + {"storage-dirs", checkStorageDirs}, + {"daemon-config", checkDaemonConfig}, + } +} + +func checkDaemonBinary(ctx context.Context, run hostexec.RunFunc) CheckResult { + _, _, code, _ := run(ctx, "which fluid-daemon") + if code == 0 { + return CheckResult{ + Name: "daemon-binary", + Category: "binary", + Passed: true, + Message: "fluid-daemon binary found", + } + } + return CheckResult{ + Name: "daemon-binary", + Category: "binary", + Passed: false, + Message: "fluid-daemon binary not found", + FixCmd: "curl -fsSL https://get.fluid.sh/daemon | bash", + } +} + +func checkGRPCPort(ctx context.Context, run hostexec.RunFunc) CheckResult { + stdout, _, code, _ := run(ctx, "ss -tlnp 2>/dev/null | grep :9091 || netstat -tlnp 2>/dev/null | grep :9091") + if code == 0 && strings.TrimSpace(stdout) != "" { + return CheckResult{ + Name: "grpc-port", + Category: "connectivity", + Passed: true, + Message: "gRPC port :9091 listening", + } + } + return CheckResult{ + Name: "grpc-port", + Category: "connectivity", + Passed: false, + Message: "gRPC port :9091 not listening", + FixCmd: "sudo systemctl start fluid-daemon", + } +} + +func checkSystemdActive(ctx context.Context, run hostexec.RunFunc) CheckResult { + stdout, _, _, _ := run(ctx, "systemctl is-active fluid-daemon 2>/dev/null") + if strings.TrimSpace(stdout) == "active" { + return CheckResult{ + Name: "systemd-active", + Category: "service", + Passed: true, + Message: "fluid-daemon service active", + } + } + return CheckResult{ + Name: "systemd-active", + Category: "service", + Passed: false, + Message: "fluid-daemon service not active", + FixCmd: "sudo systemctl start fluid-daemon", + } +} + +func checkSystemdEnabled(ctx context.Context, run hostexec.RunFunc) CheckResult { + stdout, _, _, _ := run(ctx, "systemctl is-enabled fluid-daemon 2>/dev/null") + if strings.TrimSpace(stdout) == "enabled" { + return CheckResult{ + Name: "systemd-enabled", + Category: "service", + Passed: true, + Message: "fluid-daemon service enabled at boot", + } + } + return CheckResult{ + Name: "systemd-enabled", + Category: "service", + Passed: false, + Message: "fluid-daemon service not enabled at boot", + FixCmd: "sudo systemctl enable fluid-daemon", + } +} + +func checkLibvirtRunning(ctx context.Context, run hostexec.RunFunc) CheckResult { + stdout, _, _, _ := run(ctx, "systemctl is-active libvirtd 2>/dev/null") + if strings.TrimSpace(stdout) == "active" { + return CheckResult{ + Name: "libvirt-running", + Category: "prerequisites", + Passed: true, + Message: "libvirt running", + } + } + return CheckResult{ + Name: "libvirt-running", + Category: "prerequisites", + Passed: false, + Message: "libvirt not running", + FixCmd: "sudo apt install -y libvirt-daemon-system && sudo systemctl start libvirtd", + } +} + +func checkKVMAvailable(ctx context.Context, run hostexec.RunFunc) CheckResult { + _, _, code, _ := run(ctx, "test -e /dev/kvm") + if code == 0 { + return CheckResult{ + Name: "kvm-available", + Category: "prerequisites", + Passed: true, + Message: "KVM available (/dev/kvm)", + } + } + return CheckResult{ + Name: "kvm-available", + Category: "prerequisites", + Passed: false, + Message: "KVM not available (/dev/kvm missing)", + FixCmd: "sudo modprobe kvm && sudo modprobe kvm_intel || sudo modprobe kvm_amd", + } +} + +func checkQEMUBinary(ctx context.Context, run hostexec.RunFunc) CheckResult { + binary := qemuBinaryName() + _, _, code, _ := run(ctx, fmt.Sprintf("which %s", binary)) + if code == 0 { + return CheckResult{ + Name: "qemu-binary", + Category: "binary", + Passed: true, + Message: "QEMU binary found", + } + } + return CheckResult{ + Name: "qemu-binary", + Category: "binary", + Passed: false, + Message: fmt.Sprintf("%s not found", binary), + FixCmd: "sudo apt install -y qemu-system-x86", + } +} + +func checkStorageDirs(ctx context.Context, run hostexec.RunFunc) CheckResult { + _, _, code, _ := run(ctx, "test -d /var/lib/fluid-daemon/images && test -d /var/lib/fluid-daemon/overlays") + if code == 0 { + return CheckResult{ + Name: "storage-dirs", + Category: "storage", + Passed: true, + Message: "storage directories exist", + } + } + return CheckResult{ + Name: "storage-dirs", + Category: "storage", + Passed: false, + Message: "storage directories missing (/var/lib/fluid-daemon/{images,overlays})", + FixCmd: "sudo mkdir -p /var/lib/fluid-daemon/images /var/lib/fluid-daemon/overlays", + } +} + +func checkDaemonConfig(ctx context.Context, run hostexec.RunFunc) CheckResult { + _, _, code, _ := run(ctx, "test -f /etc/fluid-daemon/daemon.yaml || test -f /etc/fluid/daemon.yaml || test -f ~/.config/fluid/daemon.yaml") + if code == 0 { + return CheckResult{ + Name: "daemon-config", + Category: "config", + Passed: true, + Message: "daemon config found", + } + } + return CheckResult{ + Name: "daemon-config", + Category: "config", + Passed: false, + Message: "daemon config not found", + FixCmd: "Run the guided setup in onboarding or create /etc/fluid-daemon/daemon.yaml", + } +} diff --git a/fluid-cli/internal/doctor/doctor.go b/fluid-cli/internal/doctor/doctor.go new file mode 100644 index 00000000..3474c02c --- /dev/null +++ b/fluid-cli/internal/doctor/doctor.go @@ -0,0 +1,69 @@ +package doctor + +import ( + "context" + "fmt" + "io" + + "github.com/aspectrr/fluid.sh/fluid/internal/hostexec" +) + +// CheckResult holds the outcome of a single doctor check. +type CheckResult struct { + Name string + Category string // "connectivity", "binary", "service", "prerequisites", "storage", "config" + Passed bool + Message string + FixCmd string // empty if passed +} + +// RunAll executes all doctor checks and returns results. +func RunAll(ctx context.Context, run hostexec.RunFunc) []CheckResult { + checks := allChecks() + results := make([]CheckResult, 0, len(checks)) + for _, c := range checks { + result := c.fn(ctx, run) + results = append(results, result) + } + return results +} + +// PrintResults writes check results to w. Returns true if all checks passed. +func PrintResults(results []CheckResult, w io.Writer, color bool) bool { + allPassed := true + passed := 0 + failed := 0 + + for _, r := range results { + var icon, colorStart, colorEnd string + if r.Passed { + passed++ + icon = "v" + if color { + colorStart = "\033[32m" // green + colorEnd = "\033[0m" + } + } else { + failed++ + allPassed = false + icon = "x" + if color { + colorStart = "\033[31m" // red + colorEnd = "\033[0m" + } + } + _, _ = fmt.Fprintf(w, " %s%s %s%s\n", colorStart, icon, r.Message, colorEnd) + if !r.Passed && r.FixCmd != "" { + _, _ = fmt.Fprintf(w, " Fix: %s\n", r.FixCmd) + } + } + + _, _ = fmt.Fprintln(w) + if allPassed { + _, _ = fmt.Fprintf(w, " %d/%d passed\n", passed, passed+failed) + } else { + _, _ = fmt.Fprintf(w, " %d/%d passed, %d failed\n", passed, passed+failed, failed) + } + + return allPassed +} diff --git a/fluid-cli/internal/doctor/doctor_test.go b/fluid-cli/internal/doctor/doctor_test.go new file mode 100644 index 00000000..91c62460 --- /dev/null +++ b/fluid-cli/internal/doctor/doctor_test.go @@ -0,0 +1,121 @@ +package doctor + +import ( + "bytes" + "context" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRunAllAllPass(t *testing.T) { + run := func(ctx context.Context, command string) (string, string, int, error) { + // Everything succeeds + if strings.Contains(command, "systemctl is-active fluid-daemon") { + return "active\n", "", 0, nil + } + if strings.Contains(command, "systemctl is-enabled fluid-daemon") { + return "enabled\n", "", 0, nil + } + if strings.Contains(command, "systemctl is-active libvirtd") { + return "active\n", "", 0, nil + } + if strings.Contains(command, "which fluid-daemon") { + return "/usr/local/bin/fluid-daemon\n", "", 0, nil + } + if strings.Contains(command, "which qemu-system") { + return "/usr/bin/qemu-system\n", "", 0, nil + } + if strings.Contains(command, "ss -tlnp") { + return "LISTEN 0 128 *:9091 *:*\n", "", 0, nil + } + if strings.Contains(command, "test -e /dev/kvm") { + return "", "", 0, nil + } + if strings.Contains(command, "test -d /var/lib/fluid") { + return "", "", 0, nil + } + if strings.Contains(command, "test -f") { + return "", "", 0, nil + } + return "", "", 0, nil + } + + results := RunAll(context.Background(), run) + assert.Len(t, results, 9) + for _, r := range results { + assert.True(t, r.Passed, "check %s should pass", r.Name) + } +} + +func TestRunAllMixedFailures(t *testing.T) { + run := func(ctx context.Context, command string) (string, string, int, error) { + // Only daemon binary and KVM pass + if strings.Contains(command, "which fluid-daemon") { + return "/usr/local/bin/fluid-daemon\n", "", 0, nil + } + if strings.Contains(command, "test -e /dev/kvm") { + return "", "", 0, nil + } + // Everything else fails + if strings.Contains(command, "systemctl is-active") { + return "inactive\n", "", 0, nil + } + if strings.Contains(command, "systemctl is-enabled") { + return "disabled\n", "", 0, nil + } + return "", "", 1, nil + } + + results := RunAll(context.Background(), run) + assert.Len(t, results, 9) + + passCount := 0 + for _, r := range results { + if r.Passed { + passCount++ + } else { + assert.NotEmpty(t, r.FixCmd, "failed check %s should have a fix command", r.Name) + } + } + assert.Equal(t, 2, passCount) +} + +func TestPrintResultsAllPass(t *testing.T) { + results := []CheckResult{ + {Name: "test1", Passed: true, Message: "check 1 ok"}, + {Name: "test2", Passed: true, Message: "check 2 ok"}, + } + + var buf bytes.Buffer + allPassed := PrintResults(results, &buf, false) + assert.True(t, allPassed) + assert.Contains(t, buf.String(), "2/2 passed") +} + +func TestPrintResultsWithFailures(t *testing.T) { + results := []CheckResult{ + {Name: "test1", Passed: true, Message: "check 1 ok"}, + {Name: "test2", Passed: false, Message: "check 2 failed", FixCmd: "fix it"}, + } + + var buf bytes.Buffer + allPassed := PrintResults(results, &buf, false) + assert.False(t, allPassed) + assert.Contains(t, buf.String(), "1/2 passed, 1 failed") + assert.Contains(t, buf.String(), "Fix: fix it") +} + +func TestPrintResultsWithColor(t *testing.T) { + results := []CheckResult{ + {Name: "test1", Passed: true, Message: "ok"}, + {Name: "test2", Passed: false, Message: "fail", FixCmd: "fix"}, + } + + var buf bytes.Buffer + PrintResults(results, &buf, true) + // Should contain ANSI escape codes + assert.Contains(t, buf.String(), "\033[32m") // green + assert.Contains(t, buf.String(), "\033[31m") // red +} diff --git a/fluid/internal/error/responderror.go b/fluid-cli/internal/error/responderror.go similarity index 100% rename from fluid/internal/error/responderror.go rename to fluid-cli/internal/error/responderror.go diff --git a/fluid-cli/internal/hostexec/hostexec.go b/fluid-cli/internal/hostexec/hostexec.go new file mode 100644 index 00000000..e5e263cc --- /dev/null +++ b/fluid-cli/internal/hostexec/hostexec.go @@ -0,0 +1,104 @@ +package hostexec + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "os/exec" +) + +// RunFunc executes a command on a host and returns stdout, stderr, exit code, and error. +type RunFunc func(ctx context.Context, command string) (stdout, stderr string, exitCode int, err error) + +// NewLocal returns a RunFunc that executes commands locally via bash. +func NewLocal() RunFunc { + return func(ctx context.Context, command string) (string, string, int, error) { + cmd := exec.CommandContext(ctx, "bash", "-c", command) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + exitCode := 0 + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + exitCode = exitErr.ExitCode() + } else { + return stdout.String(), stderr.String(), 1, err + } + } + return stdout.String(), stderr.String(), exitCode, nil + } +} + +// NewSSH returns a RunFunc that executes commands on a remote host via SSH. +func NewSSH(addr, user string, port int) RunFunc { + return func(ctx context.Context, command string) (string, string, int, error) { + args := []string{ + "-o", "StrictHostKeyChecking=accept-new", + "-o", "ConnectTimeout=15", + "-o", "BatchMode=yes", + } + if port != 0 && port != 22 { + args = append(args, "-p", fmt.Sprintf("%d", port)) + } + args = append(args, fmt.Sprintf("%s@%s", user, addr), "--", command) + + cmd := exec.CommandContext(ctx, "ssh", args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + exitCode := 0 + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + exitCode = exitErr.ExitCode() + } else { + return stdout.String(), stderr.String(), 1, err + } + } + return stdout.String(), stderr.String(), exitCode, nil + } +} + +// NewSSHWithJump returns a RunFunc that executes commands on a remote host via SSH +// with a proxy jump through an intermediate host. +// jumpHost format: "user@host" or "user@host:port" +func NewSSHWithJump(addr, user string, port int, jumpHost string) RunFunc { + return func(ctx context.Context, command string) (string, string, int, error) { + args := []string{ + "-o", "StrictHostKeyChecking=accept-new", + "-o", "ConnectTimeout=15", + "-o", "BatchMode=yes", + "-J", jumpHost, + } + if port != 0 && port != 22 { + args = append(args, "-p", fmt.Sprintf("%d", port)) + } + args = append(args, fmt.Sprintf("%s@%s", user, addr), "--", command) + + cmd := exec.CommandContext(ctx, "ssh", args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + exitCode := 0 + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + exitCode = exitErr.ExitCode() + } else { + return stdout.String(), stderr.String(), 1, err + } + } + return stdout.String(), stderr.String(), exitCode, nil + } +} + +// WithSudo wraps a RunFunc to execute commands with sudo via base64 encoding. +// This avoids shell quoting issues with complex commands. +func WithSudo(run RunFunc) RunFunc { + return func(ctx context.Context, command string) (string, string, int, error) { + encoded := base64.StdEncoding.EncodeToString([]byte(command)) + return run(ctx, fmt.Sprintf("echo %s | base64 -d | sudo bash", encoded)) + } +} diff --git a/fluid-cli/internal/hostexec/hostexec_test.go b/fluid-cli/internal/hostexec/hostexec_test.go new file mode 100644 index 00000000..0f1b3fac --- /dev/null +++ b/fluid-cli/internal/hostexec/hostexec_test.go @@ -0,0 +1,60 @@ +package hostexec + +import ( + "context" + "encoding/base64" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWithSudo(t *testing.T) { + var captured string + mockRun := RunFunc(func(ctx context.Context, command string) (string, string, int, error) { + captured = command + return "", "", 0, nil + }) + + sudoRun := WithSudo(mockRun) + _, _, _, err := sudoRun(context.Background(), "apt install nginx") + require.NoError(t, err) + + // Should wrap with base64 + sudo + assert.Contains(t, captured, "base64 -d | sudo bash") + assert.Contains(t, captured, "echo ") + + // Decode the base64 part to verify the original command + parts := strings.SplitN(captured, "echo ", 2) + require.Len(t, parts, 2) + b64Part := strings.Split(parts[1], " |")[0] + decoded, err := base64.StdEncoding.DecodeString(b64Part) + require.NoError(t, err) + assert.Equal(t, "apt install nginx", string(decoded)) +} + +func TestNewLocal(t *testing.T) { + run := NewLocal() + stdout, _, code, err := run(context.Background(), "echo hello") + require.NoError(t, err) + assert.Equal(t, 0, code) + assert.Equal(t, "hello\n", stdout) +} + +func TestNewLocalExitCode(t *testing.T) { + run := NewLocal() + _, _, code, err := run(context.Background(), "exit 42") + assert.NoError(t, err) // err is nil for non-zero exit codes via ExitError + assert.Equal(t, 42, code) +} + +func TestNewSSHCommandConstruction(t *testing.T) { + // We can't test actual SSH, but we can verify the function is constructed + run := NewSSH("192.168.1.100", "root", 22) + assert.NotNil(t, run) + + // Test with non-standard port + runCustomPort := NewSSH("192.168.1.100", "root", 2222) + assert.NotNil(t, runCustomPort) +} diff --git a/fluid-remote/internal/json/decodejson.go b/fluid-cli/internal/json/decodejson.go similarity index 100% rename from fluid-remote/internal/json/decodejson.go rename to fluid-cli/internal/json/decodejson.go diff --git a/fluid/internal/json/respondjson.go b/fluid-cli/internal/json/respondjson.go similarity index 100% rename from fluid/internal/json/respondjson.go rename to fluid-cli/internal/json/respondjson.go diff --git a/fluid/internal/llm/client.go b/fluid-cli/internal/llm/client.go similarity index 100% rename from fluid/internal/llm/client.go rename to fluid-cli/internal/llm/client.go diff --git a/fluid/internal/llm/openrouter.go b/fluid-cli/internal/llm/openrouter.go similarity index 100% rename from fluid/internal/llm/openrouter.go rename to fluid-cli/internal/llm/openrouter.go diff --git a/fluid/internal/llm/tools.go b/fluid-cli/internal/llm/tools.go similarity index 97% rename from fluid/internal/llm/tools.go rename to fluid-cli/internal/llm/tools.go index 31e1ca3f..fa0b7dbe 100644 --- a/fluid/internal/llm/tools.go +++ b/fluid-cli/internal/llm/tools.go @@ -41,7 +41,7 @@ func GetTools() []Tool { Type: "function", Function: Function{ Name: "create_sandbox", - Description: "Create a new sandbox VM by cloning from a source VM.", + Description: "Create a new sandbox VM by cloning from a source VM. Set live=true for current state, live=false to use cached image if available.", Parameters: ParameterSchema{ Type: "object", Properties: map[string]Property{ @@ -61,6 +61,10 @@ func GetTools() []Tool { Type: "integer", Description: "RAM in MB (default: 4096).", }, + "live": { + Type: "boolean", + Description: "If true, clone from the VM's live current state. If false (default), use cached image if available.", + }, }, Required: []string{"source_vm"}, }, diff --git a/fluid/internal/mcp/handlers.go b/fluid-cli/internal/mcp/handlers.go similarity index 72% rename from fluid/internal/mcp/handlers.go rename to fluid-cli/internal/mcp/handlers.go index d9c29d6b..deb61547 100644 --- a/fluid/internal/mcp/handlers.go +++ b/fluid-cli/internal/mcp/handlers.go @@ -1,12 +1,10 @@ package mcp import ( - "bytes" "context" "encoding/base64" "encoding/json" "fmt" - "os/exec" "path/filepath" "strings" "time" @@ -14,8 +12,7 @@ import ( "github.com/mark3labs/mcp-go/mcp" "github.com/aspectrr/fluid.sh/fluid/internal/ansible" - "github.com/aspectrr/fluid.sh/fluid/internal/config" - "github.com/aspectrr/fluid.sh/fluid/internal/store" + "github.com/aspectrr/fluid.sh/fluid/internal/sandbox" ) // jsonResult marshals v to JSON and returns it as a text tool result. @@ -47,29 +44,6 @@ func shellEscape(s string) (string, error) { return "'" + strings.ReplaceAll(s, "'", "'\\''") + "'", nil } -// findHostForSourceVM finds the host that has the given source VM. -func (s *Server) findHostForSourceVM(ctx context.Context, sourceVM, hostName string) (*config.HostConfig, error) { - if s.multiHostMgr == nil { - return nil, nil - } - - if hostName != "" { - hosts := s.multiHostMgr.GetHosts() - for i := range hosts { - if hosts[i].Name == hostName { - return &hosts[i], nil - } - } - return nil, fmt.Errorf("host %q not found in configuration", hostName) - } - - host, err := s.multiHostMgr.FindHostForVM(ctx, sourceVM) - if err != nil { - return nil, fmt.Errorf("find host for VM %s: %w", sourceVM, err) - } - return host, nil -} - // trackToolCall records an mcp_tool_call telemetry event. func (s *Server) trackToolCall(toolName string) { if s.telemetry != nil { @@ -87,7 +61,7 @@ func (s *Server) handleListSandboxes(ctx context.Context, request mcp.CallToolRe ctx, cancel := context.WithTimeout(ctx, 5*time.Minute) defer cancel() - sandboxes, err := s.vmService.GetSandboxes(ctx, store.SandboxFilter{}, nil) + sandboxes, err := s.service.ListSandboxes(ctx) if err != nil { s.logger.Error("list_sandboxes failed", "error", err) return errorResult(map[string]any{"error": fmt.Sprintf("list sandboxes: %s", err)}) @@ -97,19 +71,13 @@ func (s *Server) handleListSandboxes(ctx context.Context, request mcp.CallToolRe for _, sb := range sandboxes { item := map[string]any{ "id": sb.ID, - "name": sb.SandboxName, + "name": sb.Name, "state": sb.State, "base_image": sb.BaseImage, "created_at": sb.CreatedAt.Format(time.RFC3339), } - if sb.IPAddress != nil { - item["ip"] = *sb.IPAddress - } - if sb.HostName != nil { - item["host"] = *sb.HostName - } - if sb.HostAddress != nil { - item["host_address"] = *sb.HostAddress + if sb.IPAddress != "" { + item["ip"] = sb.IPAddress } result = append(result, item) } @@ -130,50 +98,29 @@ func (s *Server) handleCreateSandbox(ctx context.Context, request mcp.CallToolRe if sourceVM == "" { return nil, fmt.Errorf("source_vm is required") } - hostName := request.GetString("host", "") cpu := request.GetInt("cpu", 0) memoryMB := request.GetInt("memory_mb", 0) - - var host *config.HostConfig - if s.multiHostMgr != nil { - var err error - host, err = s.findHostForSourceVM(ctx, sourceVM, hostName) - if err != nil { - s.logger.Error("create_sandbox failed", "error", err, "source_vm", sourceVM) - return errorResult(map[string]any{"source_vm": sourceVM, "error": fmt.Sprintf("find host for source VM: %s", err)}) - } - } - - if host != nil { - sb, ip, err := s.vmService.CreateSandboxOnHost(ctx, host, sourceVM, mcpAgentID, "", cpu, memoryMB, nil, true, true) - if err != nil { - s.logger.Error("create_sandbox failed", "error", err, "source_vm", sourceVM, "host", host.Name) - return errorResult(map[string]any{"source_vm": sourceVM, "host": host.Name, "error": fmt.Sprintf("create sandbox on host: %s", err)}) - } - result := map[string]any{ - "sandbox_id": sb.ID, - "name": sb.SandboxName, - "state": sb.State, - "host": host.Name, - } - if ip != "" { - result["ip"] = ip - } - return jsonResult(result) - } - - sb, ip, err := s.vmService.CreateSandbox(ctx, sourceVM, mcpAgentID, "", cpu, memoryMB, nil, true, true) + live := request.GetBool("live", false) + + sb, err := s.service.CreateSandbox(ctx, sandbox.CreateRequest{ + SourceVM: sourceVM, + AgentID: mcpAgentID, + VCPUs: cpu, + MemoryMB: memoryMB, + Live: live, + }) if err != nil { s.logger.Error("create_sandbox failed", "error", err, "source_vm", sourceVM) return errorResult(map[string]any{"source_vm": sourceVM, "error": fmt.Sprintf("create sandbox: %s", err)}) } + result := map[string]any{ "sandbox_id": sb.ID, - "name": sb.SandboxName, + "name": sb.Name, "state": sb.State, } - if ip != "" { - result["ip"] = ip + if sb.IPAddress != "" { + result["ip"] = sb.IPAddress } return jsonResult(result) } @@ -189,7 +136,7 @@ func (s *Server) handleDestroySandbox(ctx context.Context, request mcp.CallToolR return nil, fmt.Errorf("sandbox_id is required") } - _, err := s.vmService.DestroySandbox(ctx, id) + err := s.service.DestroySandbox(ctx, id) if err != nil { s.logger.Error("destroy_sandbox failed", "error", err, "sandbox_id", id) return errorResult(map[string]any{"sandbox_id": id, "error": fmt.Sprintf("destroy sandbox: %s", err)}) @@ -216,12 +163,9 @@ func (s *Server) handleRunCommand(ctx context.Context, request mcp.CallToolReque return nil, fmt.Errorf("command is required") } - // 0 means "use configured default" - vm.Service falls back to cfg.CommandTimeout (default 10m). timeoutSec := request.GetInt("timeout_seconds", 0) - timeout := time.Duration(timeoutSec) * time.Second - user := s.cfg.SSH.DefaultUser - result, err := s.vmService.RunCommand(ctx, sandboxID, user, "", command, timeout, nil) + result, err := s.service.RunCommand(ctx, sandboxID, command, timeoutSec, nil) if err != nil { s.logger.Error("run_command failed", "error", err, "sandbox_id", sandboxID, "command", command) resp := map[string]any{ @@ -256,7 +200,7 @@ func (s *Server) handleStartSandbox(ctx context.Context, request mcp.CallToolReq return nil, fmt.Errorf("sandbox_id is required") } - ip, err := s.vmService.StartSandbox(ctx, id, true) + sb, err := s.service.StartSandbox(ctx, id) if err != nil { s.logger.Error("start_sandbox failed", "error", err, "sandbox_id", id) return errorResult(map[string]any{"sandbox_id": id, "error": fmt.Sprintf("start sandbox: %s", err)}) @@ -266,8 +210,8 @@ func (s *Server) handleStartSandbox(ctx context.Context, request mcp.CallToolReq "started": true, "sandbox_id": id, } - if ip != "" { - result["ip"] = ip + if sb.IPAddress != "" { + result["ip"] = sb.IPAddress } return jsonResult(result) } @@ -283,7 +227,7 @@ func (s *Server) handleStopSandbox(ctx context.Context, request mcp.CallToolRequ return nil, fmt.Errorf("sandbox_id is required") } - err := s.vmService.StopSandbox(ctx, id, false) + err := s.service.StopSandbox(ctx, id, false) if err != nil { s.logger.Error("stop_sandbox failed", "error", err, "sandbox_id", id) return errorResult(map[string]any{"sandbox_id": id, "error": fmt.Sprintf("stop sandbox: %s", err)}) @@ -306,7 +250,7 @@ func (s *Server) handleGetSandbox(ctx context.Context, request mcp.CallToolReque return nil, fmt.Errorf("sandbox_id is required") } - sb, err := s.vmService.GetSandbox(ctx, id) + sb, err := s.service.GetSandbox(ctx, id) if err != nil { s.logger.Error("get_sandbox failed", "error", err, "sandbox_id", id) return errorResult(map[string]any{"sandbox_id": id, "error": fmt.Sprintf("get sandbox: %s", err)}) @@ -314,22 +258,14 @@ func (s *Server) handleGetSandbox(ctx context.Context, request mcp.CallToolReque result := map[string]any{ "sandbox_id": sb.ID, - "name": sb.SandboxName, + "name": sb.Name, "state": sb.State, "base_image": sb.BaseImage, - "network": sb.Network, "agent_id": sb.AgentID, "created_at": sb.CreatedAt.Format(time.RFC3339), - "updated_at": sb.UpdatedAt.Format(time.RFC3339), - } - if sb.IPAddress != nil { - result["ip"] = *sb.IPAddress } - if sb.HostName != nil { - result["host"] = *sb.HostName - } - if sb.HostAddress != nil { - result["host_address"] = *sb.HostAddress + if sb.IPAddress != "" { + result["ip"] = sb.IPAddress } return jsonResult(result) @@ -341,121 +277,30 @@ func (s *Server) handleListVMs(ctx context.Context, request mcp.CallToolRequest) ctx, cancel := context.WithTimeout(ctx, 2*time.Minute) defer cancel() - var vms []map[string]any - var hostErrors []map[string]any - var err error - - if s.multiHostMgr != nil { - vms, hostErrors, err = s.listVMsFromHosts(ctx) - } else { - vms, err = s.listVMsLocal(ctx) - } - if err != nil { - return err.(*listVMsError).result() - } - - total := len(vms) - - limit := request.GetInt("limit", 0) - offset := request.GetInt("offset", 0) - if offset > 0 && offset < len(vms) { - vms = vms[offset:] - } else if offset >= len(vms) { - vms = vms[:0] - } - if limit > 0 && limit < len(vms) { - vms = vms[:limit] - } - - response := map[string]any{ - "vms": vms, - "count": len(vms), - "total": total, - } - if len(hostErrors) > 0 { - response["host_errors"] = hostErrors - } - - return jsonResult(response) -} - -// listVMsError wraps an error with a pre-built error result. -type listVMsError struct { - res *mcp.CallToolResult - err error -} - -func (e *listVMsError) Error() string { return e.err.Error() } -func (e *listVMsError) result() (*mcp.CallToolResult, error) { - return e.res, nil -} - -func (s *Server) listVMsFromHosts(ctx context.Context) ([]map[string]any, []map[string]any, error) { - listResult, err := s.multiHostMgr.ListDomains(ctx) + vms, err := s.service.ListVMs(ctx) if err != nil { s.logger.Error("list_vms failed", "error", err) - res, _ := errorResult(map[string]any{"error": fmt.Sprintf("list domains from hosts: %s", err)}) - return nil, nil, &listVMsError{res: res, err: err} + return errorResult(map[string]any{"error": fmt.Sprintf("list vms: %s", err)}) } - vms := make([]map[string]any, 0) - for _, domain := range listResult.Domains { - if strings.HasPrefix(domain.Name, "sbx-") { - continue - } + result := make([]map[string]any, 0, len(vms)) + for _, vm := range vms { item := map[string]any{ - "name": domain.Name, - "state": domain.State.String(), - "host": domain.HostName, - "host_address": domain.HostAddress, + "name": vm.Name, + "state": vm.State, + "prepared": vm.Prepared, } - if domain.UUID != "" { - item["uuid"] = domain.UUID + if vm.IPAddress != "" { + item["ip"] = vm.IPAddress } - vms = append(vms, item) - } - - var hostErrors []map[string]any - if len(listResult.HostErrors) > 0 { - hostErrors = make([]map[string]any, 0, len(listResult.HostErrors)) - for _, he := range listResult.HostErrors { - hostErrors = append(hostErrors, map[string]any{ - "host": he.HostName, - "address": he.HostAddress, - "error": he.Error, - }) - } - } - - return vms, hostErrors, nil -} - -func (s *Server) listVMsLocal(ctx context.Context) ([]map[string]any, error) { - cmd := exec.CommandContext(ctx, "virsh", "list", "--all", "--name") - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - s.logger.Error("list_vms failed", "error", err) - res, _ := errorResult(map[string]any{"error": fmt.Sprintf("virsh list: %s: %s", err, stderr.String())}) - return nil, &listVMsError{res: res, err: err} - } - - vms := make([]map[string]any, 0) - for _, name := range strings.Split(stdout.String(), "\n") { - name = strings.TrimSpace(name) - if name == "" || strings.HasPrefix(name, "sbx-") { - continue - } - vms = append(vms, map[string]any{ - "name": name, - "state": "unknown", - "host": "local", - }) + result = append(result, item) } - return vms, nil + return jsonResult(map[string]any{ + "vms": result, + "count": len(result), + "total": len(result), + }) } func (s *Server) handleCreateSnapshot(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { @@ -473,17 +318,16 @@ func (s *Server) handleCreateSnapshot(ctx context.Context, request mcp.CallToolR name = fmt.Sprintf("snap-%d", time.Now().Unix()) } - snap, err := s.vmService.CreateSnapshot(ctx, sandboxID, name, false) + snap, err := s.service.CreateSnapshot(ctx, sandboxID, name) if err != nil { s.logger.Error("create_snapshot failed", "error", err, "sandbox_id", sandboxID) return errorResult(map[string]any{"sandbox_id": sandboxID, "error": fmt.Sprintf("create snapshot: %s", err)}) } return jsonResult(map[string]any{ - "snapshot_id": snap.ID, + "snapshot_id": snap.SnapshotID, "sandbox_id": sandboxID, - "name": snap.Name, - "kind": snap.Kind, + "name": snap.SnapshotName, }) } @@ -580,8 +424,6 @@ func (s *Server) handleEditFile(ctx context.Context, request mcp.CallToolRequest oldStr := request.GetString("old_str", "") newStr := request.GetString("new_str", "") - user := s.cfg.SSH.DefaultUser - escapedPath, err := shellEscape(path) if err != nil { s.logger.Error("edit_file failed", "error", err, "sandbox_id", sandboxID, "path", path) @@ -596,7 +438,7 @@ func (s *Server) handleEditFile(ctx context.Context, request mcp.CallToolRequest } encoded := base64.StdEncoding.EncodeToString([]byte(newStr)) cmd := fmt.Sprintf("base64 -d > %s << '--FLUID_B64--'\n%s\n--FLUID_B64--", escapedPath, encoded) - result, err := s.vmService.RunCommand(ctx, sandboxID, user, "", cmd, 0, nil) + result, err := s.service.RunCommand(ctx, sandboxID, cmd, 0, nil) if err != nil { s.logger.Error("edit_file failed", "error", err, "sandbox_id", sandboxID, "path", path) resp := map[string]any{"sandbox_id": sandboxID, "path": path, "error": fmt.Sprintf("create file: %s", err)} @@ -622,7 +464,7 @@ func (s *Server) handleEditFile(ctx context.Context, request mcp.CallToolRequest } // Read existing file - readResult, err := s.vmService.RunCommand(ctx, sandboxID, user, "", fmt.Sprintf("base64 %s", escapedPath), 0, nil) + readResult, err := s.service.RunCommand(ctx, sandboxID, fmt.Sprintf("base64 %s", escapedPath), 0, nil) if err != nil { s.logger.Error("edit_file failed", "error", err, "sandbox_id", sandboxID, "path", path) resp := map[string]any{"sandbox_id": sandboxID, "path": path, "error": fmt.Sprintf("read file for edit: %s", err)} @@ -668,7 +510,7 @@ func (s *Server) handleEditFile(ctx context.Context, request mcp.CallToolRequest } encoded := base64.StdEncoding.EncodeToString([]byte(edited)) writeCmd := fmt.Sprintf("base64 -d > %s << '--FLUID_B64--'\n%s\n--FLUID_B64--", escapedPath, encoded) - writeResult, err := s.vmService.RunCommand(ctx, sandboxID, user, "", writeCmd, 0, nil) + writeResult, err := s.service.RunCommand(ctx, sandboxID, writeCmd, 0, nil) if err != nil { s.logger.Error("edit_file failed", "error", err, "sandbox_id", sandboxID, "path", path) resp := map[string]any{"sandbox_id": sandboxID, "path": path, "error": fmt.Sprintf("write file: %s", err)} @@ -709,13 +551,12 @@ func (s *Server) handleReadFile(ctx context.Context, request mcp.CallToolRequest return nil, fmt.Errorf("invalid path: %w", err) } - user := s.cfg.SSH.DefaultUser escapedPath, err := shellEscape(path) if err != nil { s.logger.Error("read_file failed", "error", err, "sandbox_id", sandboxID, "path", path) return errorResult(map[string]any{"sandbox_id": sandboxID, "path": path, "error": fmt.Sprintf("invalid path: %s", err)}) } - result, err := s.vmService.RunCommand(ctx, sandboxID, user, "", fmt.Sprintf("base64 %s", escapedPath), 0, nil) + result, err := s.service.RunCommand(ctx, sandboxID, fmt.Sprintf("base64 %s", escapedPath), 0, nil) if err != nil { s.logger.Error("read_file failed", "error", err, "sandbox_id", sandboxID, "path", path) resp := map[string]any{"sandbox_id": sandboxID, "path": path, "error": fmt.Sprintf("read file: %s", err)} @@ -840,11 +681,9 @@ func (s *Server) handleRunSourceCommand(ctx context.Context, request mcp.CallToo return nil, fmt.Errorf("command is required") } - // 0 means "use configured default" - vm.Service falls back to cfg.CommandTimeout (default 10m). timeoutSec := request.GetInt("timeout_seconds", 0) - timeout := time.Duration(timeoutSec) * time.Second - result, err := s.vmService.RunSourceVMCommand(ctx, sourceVM, command, timeout) + result, err := s.service.RunSourceCommand(ctx, sourceVM, command, timeoutSec) if err != nil { s.logger.Error("run_source_command failed", "error", err, "source_vm", sourceVM, "command", command) resp := map[string]any{ @@ -883,40 +722,15 @@ func (s *Server) handleReadSourceFile(ctx context.Context, request mcp.CallToolR return nil, fmt.Errorf("invalid path: %w", err) } - escapedPath, err := shellEscape(path) - if err != nil { - s.logger.Error("read_source_file failed", "error", err, "source_vm", sourceVM, "path", path) - return errorResult(map[string]any{"source_vm": sourceVM, "path": path, "error": fmt.Sprintf("invalid path: %s", err)}) - } - cmd := fmt.Sprintf("base64 %s", escapedPath) - result, err := s.vmService.RunSourceVMCommand(ctx, sourceVM, cmd, 0) - if err != nil { - s.logger.Error("read_source_file failed", "error", err, "source_vm", sourceVM, "path", path) - resp := map[string]any{"source_vm": sourceVM, "path": path, "error": fmt.Sprintf("read source file: %s", err)} - if result != nil { - resp["exit_code"] = result.ExitCode - resp["stderr"] = result.Stderr - } - return errorResult(resp) - } - if result.ExitCode != 0 { - s.logger.Error("read_source_file failed", "error", fmt.Sprintf("exit code %d", result.ExitCode), "source_vm", sourceVM, "path", path) - return errorResult(map[string]any{ - "source_vm": sourceVM, "path": path, - "exit_code": result.ExitCode, "stderr": result.Stderr, - "error": fmt.Sprintf("read source file failed with exit code %d", result.ExitCode), - }) - } - - decoded, err := base64.StdEncoding.DecodeString(strings.TrimSpace(result.Stdout)) + content, err := s.service.ReadSourceFile(ctx, sourceVM, path) if err != nil { s.logger.Error("read_source_file failed", "error", err, "source_vm", sourceVM, "path", path) - return errorResult(map[string]any{"source_vm": sourceVM, "path": path, "error": fmt.Sprintf("decode file content: %s", err)}) + return errorResult(map[string]any{"source_vm": sourceVM, "path": path, "error": fmt.Sprintf("read source file: %s", err)}) } return jsonResult(map[string]any{ "source_vm": sourceVM, "path": path, - "content": string(decoded), + "content": content, }) } diff --git a/fluid/internal/mcp/handlers_test.go b/fluid-cli/internal/mcp/handlers_test.go similarity index 69% rename from fluid/internal/mcp/handlers_test.go rename to fluid-cli/internal/mcp/handlers_test.go index 2f7b4542..b195ed20 100644 --- a/fluid/internal/mcp/handlers_test.go +++ b/fluid-cli/internal/mcp/handlers_test.go @@ -17,10 +17,8 @@ import ( "github.com/stretchr/testify/require" "github.com/aspectrr/fluid.sh/fluid/internal/config" - "github.com/aspectrr/fluid.sh/fluid/internal/provider" - "github.com/aspectrr/fluid.sh/fluid/internal/sshkeys" + "github.com/aspectrr/fluid.sh/fluid/internal/sandbox" "github.com/aspectrr/fluid.sh/fluid/internal/store" - "github.com/aspectrr/fluid.sh/fluid/internal/vm" ) // --- helpers --- @@ -60,7 +58,7 @@ func testConfig() *config.Config { } } -// --- mock store --- +// --- mock store (for playbooks) --- type mockStore struct { sandboxes map[string]*store.Sandbox @@ -205,31 +203,135 @@ func (m *mockStore) GetSourceVM(ctx context.Context, name string) (*store.Source func (m *mockStore) UpsertSourceVM(ctx context.Context, svm *store.SourceVM) error { return nil } func (m *mockStore) ListSourceVMs(ctx context.Context) ([]*store.SourceVM, error) { return nil, nil } -// --- test server helper --- +// --- mock sandbox.Service --- + +type mockSandboxService struct { + listSandboxesFn func(ctx context.Context) ([]*sandbox.SandboxInfo, error) + createSandboxFn func(ctx context.Context, req sandbox.CreateRequest) (*sandbox.SandboxInfo, error) + getSandboxFn func(ctx context.Context, id string) (*sandbox.SandboxInfo, error) + destroySandboxFn func(ctx context.Context, id string) error + startSandboxFn func(ctx context.Context, id string) (*sandbox.SandboxInfo, error) + stopSandboxFn func(ctx context.Context, id string, force bool) error + runCommandFn func(ctx context.Context, sandboxID, command string, timeoutSec int, env map[string]string) (*sandbox.CommandResult, error) + createSnapshotFn func(ctx context.Context, sandboxID, name string) (*sandbox.SnapshotInfo, error) + listVMsFn func(ctx context.Context) ([]*sandbox.VMInfo, error) + runSourceCommandFn func(ctx context.Context, vmName, command string, timeoutSec int) (*sandbox.SourceCommandResult, error) + readSourceFileFn func(ctx context.Context, vmName, path string) (string, error) +} + +func (m *mockSandboxService) CreateSandbox(ctx context.Context, req sandbox.CreateRequest) (*sandbox.SandboxInfo, error) { + if m.createSandboxFn != nil { + return m.createSandboxFn(ctx, req) + } + return &sandbox.SandboxInfo{ID: "SBX-new", Name: "sbx-new", State: "RUNNING"}, nil +} + +func (m *mockSandboxService) GetSandbox(ctx context.Context, id string) (*sandbox.SandboxInfo, error) { + if m.getSandboxFn != nil { + return m.getSandboxFn(ctx, id) + } + return nil, fmt.Errorf("sandbox not found: %s", id) +} + +func (m *mockSandboxService) ListSandboxes(ctx context.Context) ([]*sandbox.SandboxInfo, error) { + if m.listSandboxesFn != nil { + return m.listSandboxesFn(ctx) + } + return nil, nil +} + +func (m *mockSandboxService) DestroySandbox(ctx context.Context, id string) error { + if m.destroySandboxFn != nil { + return m.destroySandboxFn(ctx, id) + } + return nil +} + +func (m *mockSandboxService) StartSandbox(ctx context.Context, id string) (*sandbox.SandboxInfo, error) { + if m.startSandboxFn != nil { + return m.startSandboxFn(ctx, id) + } + return &sandbox.SandboxInfo{ID: id, State: "RUNNING"}, nil +} + +func (m *mockSandboxService) StopSandbox(ctx context.Context, id string, force bool) error { + if m.stopSandboxFn != nil { + return m.stopSandboxFn(ctx, id, force) + } + return nil +} + +func (m *mockSandboxService) RunCommand(ctx context.Context, sandboxID, command string, timeoutSec int, env map[string]string) (*sandbox.CommandResult, error) { + if m.runCommandFn != nil { + return m.runCommandFn(ctx, sandboxID, command, timeoutSec, env) + } + return &sandbox.CommandResult{SandboxID: sandboxID, ExitCode: 0}, nil +} + +func (m *mockSandboxService) CreateSnapshot(ctx context.Context, sandboxID, name string) (*sandbox.SnapshotInfo, error) { + if m.createSnapshotFn != nil { + return m.createSnapshotFn(ctx, sandboxID, name) + } + return &sandbox.SnapshotInfo{SnapshotID: "SNAP-1", SnapshotName: name, SandboxID: sandboxID}, nil +} + +func (m *mockSandboxService) ListVMs(ctx context.Context) ([]*sandbox.VMInfo, error) { + if m.listVMsFn != nil { + return m.listVMsFn(ctx) + } + return nil, nil +} + +func (m *mockSandboxService) ValidateSourceVM(ctx context.Context, vmName string) (*sandbox.ValidationInfo, error) { + return &sandbox.ValidationInfo{VMName: vmName, Valid: true}, nil +} + +func (m *mockSandboxService) PrepareSourceVM(ctx context.Context, vmName, sshUser, keyPath string) (*sandbox.PrepareInfo, error) { + return &sandbox.PrepareInfo{SourceVM: vmName, Prepared: true}, nil +} + +func (m *mockSandboxService) RunSourceCommand(ctx context.Context, vmName, command string, timeoutSec int) (*sandbox.SourceCommandResult, error) { + if m.runSourceCommandFn != nil { + return m.runSourceCommandFn(ctx, vmName, command, timeoutSec) + } + return &sandbox.SourceCommandResult{SourceVM: vmName, ExitCode: 0}, nil +} + +func (m *mockSandboxService) ReadSourceFile(ctx context.Context, vmName, path string) (string, error) { + if m.readSourceFileFn != nil { + return m.readSourceFileFn(ctx, vmName, path) + } + return "", nil +} + +func (m *mockSandboxService) GetHostInfo(ctx context.Context) (*sandbox.HostInfo, error) { + return &sandbox.HostInfo{}, nil +} + +func (m *mockSandboxService) Health(ctx context.Context) error { return nil } +func (m *mockSandboxService) Close() error { return nil } + +// --- test server helpers --- func testServer() *Server { st := newMockStore() cfg := testConfig() return &Server{ - cfg: cfg, - store: st, - vmService: nil, // Most tests don't need the full vmService - logger: noopLogger(), + cfg: cfg, + store: st, + service: &mockSandboxService{}, + logger: noopLogger(), } } -func testServerWithSandboxes(sandboxes ...*store.Sandbox) *Server { +func testServerWithService(svc sandbox.Service) *Server { st := newMockStore() - for _, sb := range sandboxes { - st.sandboxes[sb.ID] = sb - } cfg := testConfig() - vmSvc := vm.NewService(nil, st, vm.Config{}) return &Server{ - cfg: cfg, - store: st, - vmService: vmSvc, - logger: noopLogger(), + cfg: cfg, + store: st, + service: svc, + logger: noopLogger(), } } @@ -290,13 +392,11 @@ func TestTrackToolCall_NilTelemetry(t *testing.T) { func TestTrackToolCall_HandlerIntegration(t *testing.T) { mt := &mockTelemetry{} - st := newMockStore() - cfg := testConfig() - vmSvc := vm.NewService(nil, st, vm.Config{}) + svc := &mockSandboxService{} srv := &Server{ - cfg: cfg, - store: st, - vmService: vmSvc, + cfg: testConfig(), + store: newMockStore(), + service: svc, telemetry: mt, logger: noopLogger(), } @@ -394,7 +494,7 @@ func TestErrorResult_MarshalError(t *testing.T) { // --- handleListSandboxes tests --- func TestHandleListSandboxes_Empty(t *testing.T) { - srv := testServerWithSandboxes() + srv := testServer() ctx := context.Background() result, err := srv.handleListSandboxes(ctx, newRequest("list_sandboxes", nil)) @@ -408,22 +508,22 @@ func TestHandleListSandboxes_Empty(t *testing.T) { } func TestHandleListSandboxes_WithSandboxes(t *testing.T) { - ip := "192.168.1.10" - host := "host1" - hostAddr := "10.0.0.1" now := time.Now() - srv := testServerWithSandboxes( - &store.Sandbox{ - ID: "SBX-1", - SandboxName: "sbx-test", - State: store.SandboxStateRunning, - BaseImage: "ubuntu-base", - IPAddress: &ip, - HostName: &host, - HostAddress: &hostAddr, - CreatedAt: now, + svc := &mockSandboxService{ + listSandboxesFn: func(ctx context.Context) ([]*sandbox.SandboxInfo, error) { + return []*sandbox.SandboxInfo{ + { + ID: "SBX-1", + Name: "sbx-test", + State: "RUNNING", + BaseImage: "ubuntu-base", + IPAddress: "192.168.1.10", + CreatedAt: now, + }, + }, nil }, - ) + } + srv := testServerWithService(svc) ctx := context.Background() result, err := srv.handleListSandboxes(ctx, newRequest("list_sandboxes", nil)) @@ -437,23 +537,15 @@ func TestHandleListSandboxes_WithSandboxes(t *testing.T) { assert.Equal(t, "sbx-test", sb["name"]) assert.Equal(t, "RUNNING", sb["state"]) assert.Equal(t, "192.168.1.10", sb["ip"]) - assert.Equal(t, "host1", sb["host"]) - assert.Equal(t, "10.0.0.1", sb["host_address"]) } func TestHandleListSandboxes_StoreError(t *testing.T) { - st := newMockStore() - st.listSandboxesFn = func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - return nil, fmt.Errorf("db connection failed") - } - cfg := testConfig() - vmSvc := vm.NewService(nil, st, vm.Config{}) - srv := &Server{ - cfg: cfg, - store: st, - vmService: vmSvc, - logger: noopLogger(), + svc := &mockSandboxService{ + listSandboxesFn: func(ctx context.Context) ([]*sandbox.SandboxInfo, error) { + return nil, fmt.Errorf("db connection failed") + }, } + srv := testServerWithService(svc) ctx := context.Background() result, err := srv.handleListSandboxes(ctx, newRequest("list_sandboxes", nil)) @@ -466,21 +558,21 @@ func TestHandleListSandboxes_StoreError(t *testing.T) { // --- handleGetSandbox tests --- func TestHandleGetSandbox_Success(t *testing.T) { - ip := "192.168.1.10" now := time.Now() - srv := testServerWithSandboxes( - &store.Sandbox{ - ID: "SBX-1", - SandboxName: "sbx-test", - State: store.SandboxStateRunning, - BaseImage: "ubuntu-base", - Network: "default", - AgentID: "mcp-agent", - IPAddress: &ip, - CreatedAt: now, - UpdatedAt: now, + svc := &mockSandboxService{ + getSandboxFn: func(ctx context.Context, id string) (*sandbox.SandboxInfo, error) { + return &sandbox.SandboxInfo{ + ID: "SBX-1", + Name: "sbx-test", + State: "RUNNING", + BaseImage: "ubuntu-base", + AgentID: "mcp-agent", + IPAddress: "192.168.1.10", + CreatedAt: now, + }, nil }, - ) + } + srv := testServerWithService(svc) ctx := context.Background() result, err := srv.handleGetSandbox(ctx, newRequest("get_sandbox", map[string]any{ @@ -493,13 +585,12 @@ func TestHandleGetSandbox_Success(t *testing.T) { assert.Equal(t, "sbx-test", m["name"]) assert.Equal(t, "RUNNING", m["state"]) assert.Equal(t, "ubuntu-base", m["base_image"]) - assert.Equal(t, "default", m["network"]) assert.Equal(t, "mcp-agent", m["agent_id"]) assert.Equal(t, "192.168.1.10", m["ip"]) } func TestHandleGetSandbox_MissingID(t *testing.T) { - srv := testServerWithSandboxes() + srv := testServer() ctx := context.Background() _, err := srv.handleGetSandbox(ctx, newRequest("get_sandbox", map[string]any{})) @@ -508,7 +599,7 @@ func TestHandleGetSandbox_MissingID(t *testing.T) { } func TestHandleGetSandbox_NotFound(t *testing.T) { - srv := testServerWithSandboxes() + srv := testServer() ctx := context.Background() result, err := srv.handleGetSandbox(ctx, newRequest("get_sandbox", map[string]any{ @@ -824,29 +915,17 @@ func TestHandleListPlaybooks_NoPlaybooksDir(t *testing.T) { assert.Equal(t, float64(0), m["count"]) } -// --- findHostForSourceVM tests --- - -func TestFindHostForSourceVM_NoMultiHost(t *testing.T) { - srv := testServer() - ctx := context.Background() - - host, err := srv.findHostForSourceVM(ctx, "ubuntu-base", "") - assert.NoError(t, err) - assert.Nil(t, host) -} - // --- handleListVMs tests --- -// handleListVMs is tested indirectly since it depends on virsh or multiHostMgr. -// We test the dispatcher logic and ensure no panics on nil multiHostMgr. -func TestHandleListVMs_NoMultiHost_VirshUnavailable(t *testing.T) { +func TestHandleListVMs_Empty(t *testing.T) { srv := testServer() ctx := context.Background() - // With no multiHostMgr, this calls listVMsLocal which shells out to virsh. - // On machines without virsh, this will return an error - that's expected behavior. - _, _ = srv.handleListVMs(ctx, newRequest("list_vms", nil)) - // We just verify it doesn't panic + result, err := srv.handleListVMs(ctx, newRequest("list_vms", nil)) + require.NoError(t, err) + + m := parseJSON(t, result) + assert.Equal(t, float64(0), m["count"]) } // --- security tests --- @@ -876,35 +955,6 @@ func TestHandleReadFile_NullByteInPath(t *testing.T) { assert.Contains(t, err.Error(), "invalid path") } -func TestHandleEditFile_PathTraversal(t *testing.T) { - srv := testServerWithSandboxes( - &store.Sandbox{ - ID: "SBX-1", - SandboxName: "sbx-test", - State: store.SandboxStateRunning, - BaseImage: "ubuntu-base", - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - }, - ) - ctx := context.Background() - - // Path traversal with absolute path - validateFilePath cleans it - // "/var/lib/../../etc/passwd" cleans to "/etc/passwd" which is valid - // So this should NOT return an error at the validation stage - // (it would fail later when trying to connect to the non-existent sandbox) - _, err := srv.handleEditFile(ctx, newRequest("edit_file", map[string]any{ - "sandbox_id": "SBX-1", - "path": "/var/lib/../../etc/passwd", - "new_str": "content", - })) - // This passes path validation but fails at vmService level (no real SSH) - // The important thing is the path gets cleaned - // We can't easily test the cleaned path without a mock vmService, - // so just verify it doesn't fail at validation - assert.NotContains(t, fmt.Sprintf("%v", err), "invalid path") -} - func TestHandleEditFile_FileTooLarge(t *testing.T) { srv := testServer() ctx := context.Background() @@ -922,168 +972,34 @@ func TestHandleEditFile_FileTooLarge(t *testing.T) { } func TestHandleRunCommand_IncludesCommandInError(t *testing.T) { - srv := testServerWithSandboxes( - &store.Sandbox{ - ID: "SBX-1", - SandboxName: "sbx-test", - State: store.SandboxStateRunning, - BaseImage: "ubuntu-base", - CreatedAt: time.Now(), - UpdatedAt: time.Now(), + svc := &mockSandboxService{ + runCommandFn: func(ctx context.Context, sandboxID, command string, timeoutSec int, env map[string]string) (*sandbox.CommandResult, error) { + return nil, fmt.Errorf("ssh connection failed") }, - ) + } + srv := testServerWithService(svc) ctx := context.Background() - // This will fail because there's no real SSH connection, but the error - // response should include the command that was attempted result, err := srv.handleRunCommand(ctx, newRequest("run_command", map[string]any{ "sandbox_id": "SBX-1", "command": "whoami", })) - require.NoError(t, err) // errorResult returns nil error + require.NoError(t, err) require.True(t, result.IsError) m := parseJSON(t, result) assert.Equal(t, "whoami", m["command"]) } -// --- mock provider.Manager --- - -type mockProviderManager struct{} - -func (m *mockProviderManager) CloneVM(_ context.Context, _, _ string, _, _ int, _ string) (provider.VMRef, error) { - return provider.VMRef{}, nil -} - -func (m *mockProviderManager) CloneFromVM(_ context.Context, _, _ string, _, _ int, _ string) (provider.VMRef, error) { - return provider.VMRef{}, nil -} -func (m *mockProviderManager) InjectSSHKey(_ context.Context, _, _, _ string) error { return nil } -func (m *mockProviderManager) StartVM(_ context.Context, _ string) error { return nil } -func (m *mockProviderManager) StopVM(_ context.Context, _ string, _ bool) error { return nil } -func (m *mockProviderManager) DestroyVM(_ context.Context, _ string) error { return nil } -func (m *mockProviderManager) CreateSnapshot(_ context.Context, _, _ string, _ bool) (provider.SnapshotRef, error) { - return provider.SnapshotRef{}, nil -} - -func (m *mockProviderManager) DiffSnapshot(_ context.Context, _, _, _ string) (*provider.FSComparePlan, error) { - return nil, nil -} - -func (m *mockProviderManager) GetIPAddress(_ context.Context, _ string, _ time.Duration) (string, string, error) { - return "192.168.122.100", "52:54:00:00:00:01", nil -} - -func (m *mockProviderManager) GetVMState(_ context.Context, _ string) (provider.VMState, error) { - return provider.VMStateRunning, nil -} - -func (m *mockProviderManager) ValidateSourceVM(_ context.Context, _ string) (*provider.VMValidationResult, error) { - return &provider.VMValidationResult{Valid: true}, nil -} - -func (m *mockProviderManager) CheckHostResources(_ context.Context, _, _ int) (*provider.ResourceCheckResult, error) { - return &provider.ResourceCheckResult{Valid: true}, nil -} - -// --- mock SSHRunner --- - -type mockSSHRunner struct { - runFn func(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (stdout, stderr string, exitCode int, err error) -} - -func (m *mockSSHRunner) Run(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error) { - if m.runFn != nil { - return m.runFn(ctx, addr, user, privateKeyPath, command, timeout, env, proxyJump) - } - return "", "", 0, nil -} - -func (m *mockSSHRunner) RunWithCert(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error) { - if m.runFn != nil { - return m.runFn(ctx, addr, user, privateKeyPath, command, timeout, env, proxyJump) - } - return "", "", 0, nil -} - -func (m *mockSSHRunner) RunStreaming(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string, proxyJump string, outputChan chan<- vm.OutputChunk) (string, string, int, error) { - if m.runFn != nil { - return m.runFn(ctx, addr, user, privateKeyPath, command, timeout, env, proxyJump) - } - return "", "", 0, nil -} - -func (m *mockSSHRunner) RunWithCertStreaming(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string, proxyJump string, outputChan chan<- vm.OutputChunk) (string, string, int, error) { - if m.runFn != nil { - return m.runFn(ctx, addr, user, privateKeyPath, command, timeout, env, proxyJump) - } - return "", "", 0, nil -} - -// --- mock KeyProvider --- - -type mockKeyProvider struct{} - -func (m *mockKeyProvider) GetCredentials(_ context.Context, _, _ string) (*sshkeys.Credentials, error) { - return &sshkeys.Credentials{ - PrivateKeyPath: "/tmp/test-key", - CertificatePath: "/tmp/test-key-cert.pub", - PublicKey: "ssh-ed25519 AAAA...", - Username: "sandbox", - }, nil -} - -func (m *mockKeyProvider) GetSourceVMCredentials(_ context.Context, _ string) (*sshkeys.Credentials, error) { - return &sshkeys.Credentials{ - PrivateKeyPath: "/tmp/test-key", - CertificatePath: "/tmp/test-key-cert.pub", - PublicKey: "ssh-ed25519 AAAA...", - Username: "sandbox", - }, nil -} -func (m *mockKeyProvider) CleanupSandbox(_ context.Context, _ string) error { return nil } -func (m *mockKeyProvider) Close() error { return nil } - -// --- test server with mock VM infrastructure --- - -func testServerWithMockVM(sshFn func(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error), sandboxes ...*store.Sandbox) *Server { - st := newMockStore() - for _, sb := range sandboxes { - st.sandboxes[sb.ID] = sb - } - cfg := testConfig() - mockMgr := &mockProviderManager{} - mockSSH := &mockSSHRunner{runFn: sshFn} - mockKeys := &mockKeyProvider{} - vmSvc := vm.NewService(mockMgr, st, vm.Config{ - CommandTimeout: 30 * time.Second, - }, vm.WithSSHRunner(mockSSH), vm.WithKeyManager(mockKeys)) - return &Server{ - cfg: cfg, - store: st, - vmService: vmSvc, - logger: noopLogger(), - } -} - -// --- handleEditFile old_str_not_found test --- +// --- handleEditFile with mock VM --- func TestHandleEditFile_OldStrNotFound(t *testing.T) { - ip := "192.168.122.100" - srv := testServerWithMockVM( - func(_ context.Context, _, _, _, command string, _ time.Duration, _ map[string]string, _ string) (string, string, int, error) { + svc := &mockSandboxService{ + runCommandFn: func(_ context.Context, _, command string, _ int, _ map[string]string) (*sandbox.CommandResult, error) { // Return base64-encoded "hello world" for any read command - return "aGVsbG8gd29ybGQ=", "", 0, nil - }, - &store.Sandbox{ - ID: "SBX-1", - SandboxName: "sbx-test", - State: store.SandboxStateRunning, - BaseImage: "ubuntu-base", - IPAddress: &ip, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), + return &sandbox.CommandResult{ExitCode: 0, Stdout: "aGVsbG8gd29ybGQ="}, nil }, - ) + } + srv := testServerWithService(svc) ctx := context.Background() result, err := srv.handleEditFile(ctx, newRequest("edit_file", map[string]any{ @@ -1102,44 +1018,30 @@ func TestHandleEditFile_OldStrNotFound(t *testing.T) { } func TestHandleEditFile_ReplaceAll(t *testing.T) { - ip := "192.168.122.100" var writtenContent string - srv := testServerWithMockVM( - func(_ context.Context, _, _, _, command string, _ time.Duration, _ map[string]string, _ string) (string, string, int, error) { + svc := &mockSandboxService{ + runCommandFn: func(_ context.Context, _, command string, _ int, _ map[string]string) (*sandbox.CommandResult, error) { if strings.Contains(command, "base64 -d >") { - // Write command - capture the base64 content from the heredoc. - // commandWithEnv wraps with bash -lc %q, so the heredoc delimiter - // appears with surrounding quotes. Extract the base64 content between - // the first and second occurrence of the delimiter. + // Write command - capture the base64 content from the heredoc const delim = "--FLUID_B64--" first := strings.Index(command, delim) if first >= 0 { rest := command[first+len(delim):] second := strings.Index(rest, delim) if second > 0 { - // Content is between delimiters, with separator chars on each side b64 := rest[:second] - // Trim any separator characters (literal \n, actual newline, or quotes) b64 = strings.Trim(b64, "\\n\n'\"") - decoded, _ := base64Decode(b64) - writtenContent = decoded + decoded, _ := base64.StdEncoding.DecodeString(strings.TrimSpace(b64)) + writtenContent = string(decoded) } } - return "", "", 0, nil + return &sandbox.CommandResult{ExitCode: 0}, nil } // Read command - return base64("aaa bbb aaa") - return "YWFhIGJiYiBhYWE=", "", 0, nil - }, - &store.Sandbox{ - ID: "SBX-1", - SandboxName: "sbx-test", - State: store.SandboxStateRunning, - BaseImage: "ubuntu-base", - IPAddress: &ip, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), + return &sandbox.CommandResult{ExitCode: 0, Stdout: "YWFhIGJiYiBhYWE="}, nil }, - ) + } + srv := testServerWithService(svc) ctx := context.Background() result, err := srv.handleEditFile(ctx, newRequest("edit_file", map[string]any{ @@ -1158,11 +1060,3 @@ func TestHandleEditFile_ReplaceAll(t *testing.T) { // Verify all occurrences were replaced assert.Equal(t, "zzz bbb zzz", writtenContent) } - -func base64Decode(s string) (string, error) { - decoded, err := base64.StdEncoding.DecodeString(strings.TrimSpace(s)) - if err != nil { - return "", err - } - return string(decoded), nil -} diff --git a/fluid/internal/mcp/server.go b/fluid-cli/internal/mcp/server.go similarity index 79% rename from fluid/internal/mcp/server.go rename to fluid-cli/internal/mcp/server.go index 5dc9531a..b25e94e4 100644 --- a/fluid/internal/mcp/server.go +++ b/fluid-cli/internal/mcp/server.go @@ -8,10 +8,9 @@ import ( "github.com/aspectrr/fluid.sh/fluid/internal/ansible" "github.com/aspectrr/fluid.sh/fluid/internal/config" - "github.com/aspectrr/fluid.sh/fluid/internal/libvirt" + "github.com/aspectrr/fluid.sh/fluid/internal/sandbox" "github.com/aspectrr/fluid.sh/fluid/internal/store" "github.com/aspectrr/fluid.sh/fluid/internal/telemetry" - "github.com/aspectrr/fluid.sh/fluid/internal/vm" ) const ( @@ -19,37 +18,28 @@ const ( mcpAgentID = "mcp-agent" ) -// Note: The MCP server uses stdio transport with a single client connection. -// Rate limiting is unnecessary for this architecture since there is at most -// one concurrent client. If the transport changes to HTTP/SSE, add rate limiting. - // Server wraps an MCP server that exposes fluid tools over stdio. type Server struct { cfg *config.Config store store.Store - vmService *vm.Service + service sandbox.Service playbookService *ansible.PlaybookService telemetry telemetry.Service logger *slog.Logger - multiHostMgr *libvirt.MultiHostDomainManager mcpServer *server.MCPServer } // NewServer creates a new MCP server wired to the fluid services. -func NewServer(cfg *config.Config, st store.Store, vmService *vm.Service, tele telemetry.Service, logger *slog.Logger) *Server { +func NewServer(cfg *config.Config, st store.Store, svc sandbox.Service, tele telemetry.Service, logger *slog.Logger) *Server { s := &Server{ cfg: cfg, store: st, - vmService: vmService, + service: svc, playbookService: ansible.NewPlaybookService(st, cfg.Ansible.PlaybooksDir), telemetry: tele, logger: logger, } - if len(cfg.Hosts) > 0 { - s.multiHostMgr = libvirt.NewMultiHostDomainManager(cfg.Hosts, logger) - } - s.mcpServer = server.NewMCPServer("fluid", "0.1.0", server.WithToolCapabilities(false), ) @@ -70,11 +60,11 @@ func (s *Server) registerTools() { ), s.handleListSandboxes) s.mcpServer.AddTool(mcp.NewTool("create_sandbox", - mcp.WithDescription("Create a new sandbox VM by cloning from a source VM."), + mcp.WithDescription("Create a new sandbox VM by cloning from a source VM. Set live=true for current state, live=false to use cached image if available."), mcp.WithString("source_vm", mcp.Required(), mcp.Description("The name of the source VM to clone from (e.g., 'ubuntu-base').")), - mcp.WithString("host", mcp.Description("Optional target host name for multi-host setups.")), mcp.WithNumber("cpu", mcp.Description("Number of vCPUs (default: 2).")), mcp.WithNumber("memory_mb", mcp.Description("RAM in MB (default: 4096).")), + mcp.WithBoolean("live", mcp.Description("If true, clone from the VM's live current state. If false (default), use cached image if available.")), ), s.handleCreateSandbox) s.mcpServer.AddTool(mcp.NewTool("destroy_sandbox", @@ -105,9 +95,7 @@ func (s *Server) registerTools() { ), s.handleGetSandbox) s.mcpServer.AddTool(mcp.NewTool("list_vms", - mcp.WithDescription("List available host VMs (base images) that can be cloned to create sandboxes. Does not include sandboxes - use list_sandboxes for those."), - mcp.WithNumber("limit", mcp.Description("Maximum number of VMs to return. 0 or omitted returns all.")), - mcp.WithNumber("offset", mcp.Description("Number of VMs to skip before returning results. Default: 0.")), + mcp.WithDescription("List available source VMs that can be cloned to create sandboxes."), ), s.handleListVMs) s.mcpServer.AddTool(mcp.NewTool("create_snapshot", @@ -132,7 +120,7 @@ func (s *Server) registerTools() { ), s.handleAddPlaybookTask) s.mcpServer.AddTool(mcp.NewTool("edit_file", - mcp.WithDescription("Edit a file on a sandbox VM by replacing text or create a new file. If old_str is empty, creates/overwrites the file with new_str. Otherwise replaces the first occurrence of old_str with new_str (or all occurrences if replace_all is true)."), + mcp.WithDescription("Edit a file on a sandbox VM by replacing text or create a new file."), mcp.WithString("sandbox_id", mcp.Required(), mcp.Description("The ID of the sandbox containing the file.")), mcp.WithString("path", mcp.Required(), mcp.Description("The absolute path to the file inside the sandbox.")), mcp.WithString("old_str", mcp.Description("The string to find and replace. If empty, the file will be created/overwritten with new_str.")), @@ -141,13 +129,13 @@ func (s *Server) registerTools() { ), s.handleEditFile) s.mcpServer.AddTool(mcp.NewTool("read_file", - mcp.WithDescription("Read the contents of a file on a sandbox VM via SSH."), + mcp.WithDescription("Read the contents of a file on a sandbox VM."), mcp.WithString("sandbox_id", mcp.Required(), mcp.Description("The ID of the sandbox containing the file.")), mcp.WithString("path", mcp.Required(), mcp.Description("The absolute path to the file inside the sandbox.")), ), s.handleReadFile) s.mcpServer.AddTool(mcp.NewTool("list_playbooks", - mcp.WithDescription("List all Ansible playbooks that have been created. Use get_playbook to retrieve the full contents of a specific playbook."), + mcp.WithDescription("List all Ansible playbooks."), ), s.handleListPlaybooks) s.mcpServer.AddTool(mcp.NewTool("get_playbook", @@ -156,14 +144,14 @@ func (s *Server) registerTools() { ), s.handleGetPlaybook) s.mcpServer.AddTool(mcp.NewTool("run_source_command", - mcp.WithDescription("Execute a read-only command on a source/golden VM. Only diagnostic commands are allowed (ps, ls, cat, systemctl status, etc.)."), + mcp.WithDescription("Execute a read-only command on a source/golden VM."), mcp.WithString("source_vm", mcp.Required(), mcp.Description("The name of the source VM to run the command on.")), mcp.WithString("command", mcp.Required(), mcp.Description("The read-only diagnostic command to execute.")), - mcp.WithNumber("timeout_seconds", mcp.Description("Optional command timeout in seconds. 0 or omitted uses the configured default.")), + mcp.WithNumber("timeout_seconds", mcp.Description("Optional command timeout in seconds.")), ), s.handleRunSourceCommand) s.mcpServer.AddTool(mcp.NewTool("read_source_file", - mcp.WithDescription("Read the contents of a file on a source/golden VM. This is read-only and does not modify the VM."), + mcp.WithDescription("Read the contents of a file on a source/golden VM. This is read-only."), mcp.WithString("source_vm", mcp.Required(), mcp.Description("The name of the source VM containing the file.")), mcp.WithString("path", mcp.Required(), mcp.Description("The absolute path to the file inside the source VM.")), ), s.handleReadSourceFile) diff --git a/fluid/internal/mcp/server_test.go b/fluid-cli/internal/mcp/server_test.go similarity index 74% rename from fluid/internal/mcp/server_test.go rename to fluid-cli/internal/mcp/server_test.go index 95a034d7..3fec1a88 100644 --- a/fluid/internal/mcp/server_test.go +++ b/fluid-cli/internal/mcp/server_test.go @@ -18,7 +18,6 @@ func TestNewServer(t *testing.T) { assert.NotNil(t, srv.mcpServer) assert.NotNil(t, srv.playbookService) assert.NotNil(t, srv.logger) - assert.Nil(t, srv.multiHostMgr) // no hosts configured } func TestNewServer_WithHosts(t *testing.T) { @@ -30,7 +29,7 @@ func TestNewServer_WithHosts(t *testing.T) { srv := NewServer(cfg, st, nil, nil, noopLogger()) require.NotNil(t, srv) - assert.NotNil(t, srv.multiHostMgr) + // multiHostMgr removed in remote mode } func TestNewServer_RegistersAllTools(t *testing.T) { @@ -39,10 +38,5 @@ func TestNewServer_RegistersAllTools(t *testing.T) { srv := NewServer(cfg, st, nil, nil, noopLogger()) require.NotNil(t, srv) - - // The MCP server should have registered 17 tools. - // We can't directly inspect the tools list from the MCPServer, - // but we verify the server was constructed without panicking - // and the mcpServer is non-nil. assert.NotNil(t, srv.mcpServer) } diff --git a/fluid/internal/mcp/validate.go b/fluid-cli/internal/mcp/validate.go similarity index 100% rename from fluid/internal/mcp/validate.go rename to fluid-cli/internal/mcp/validate.go diff --git a/fluid/internal/mcp/validate_test.go b/fluid-cli/internal/mcp/validate_test.go similarity index 100% rename from fluid/internal/mcp/validate_test.go rename to fluid-cli/internal/mcp/validate_test.go diff --git a/fluid-remote/internal/model/clone_result.go b/fluid-cli/internal/model/clone_result.go similarity index 100% rename from fluid-remote/internal/model/clone_result.go rename to fluid-cli/internal/model/clone_result.go diff --git a/fluid-cli/internal/modelsdev/client.go b/fluid-cli/internal/modelsdev/client.go new file mode 100644 index 00000000..5bad6f4a --- /dev/null +++ b/fluid-cli/internal/modelsdev/client.go @@ -0,0 +1,147 @@ +package modelsdev + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "slices" + "strconv" + "strings" + "time" +) + +// Model represents a model from the OpenRouter API. +type Model struct { + ID string `json:"id"` + Name string `json:"name"` + InputCostPer1M float64 `json:"input_cost_per_1m"` + OutputCostPer1M float64 `json:"output_cost_per_1m"` + ContextLimit int `json:"context_limit"` + OutputLimit int `json:"output_limit"` + ToolCall bool `json:"tool_call"` + Reasoning bool `json:"reasoning"` +} + +type openRouterResponse struct { + Data []openRouterEntry `json:"data"` +} + +type openRouterEntry struct { + ID string `json:"id"` + Name string `json:"name"` + ContextLength int `json:"context_length"` + Pricing openRouterPrice `json:"pricing"` + TopProvider *openRouterTop `json:"top_provider"` + SupportedParameters []string `json:"supported_parameters"` +} + +type openRouterPrice struct { + Prompt string `json:"prompt"` + Completion string `json:"completion"` +} + +type openRouterTop struct { + ContextLength int `json:"context_length"` + MaxCompletionTokens int `json:"max_completion_tokens"` +} + +// FetchTopModels fetches models from OpenRouter, groups by provider prefix, +// and returns the top model per provider (highest input cost with tool_call support). +func FetchTopModels(ctx context.Context) ([]Model, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "GET", "https://openrouter.ai/api/v1/models", nil) + if err != nil { + return nil, fmt.Errorf("create request: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("fetch models: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("openrouter API returned %d", resp.StatusCode) + } + + var data openRouterResponse + if err := json.NewDecoder(resp.Body).Decode(&data); err != nil { + return nil, fmt.Errorf("decode response: %w", err) + } + + type candidate struct { + entry openRouterEntry + inputPer1M float64 + } + + topByProvider := make(map[string]candidate) + + for _, entry := range data.Data { + parts := strings.SplitN(entry.ID, "/", 2) + if len(parts) != 2 { + continue + } + provider := parts[0] + + hasToolCall := slices.Contains(entry.SupportedParameters, "tool_choice") + if !hasToolCall { + continue + } + + inputPerToken, err := strconv.ParseFloat(entry.Pricing.Prompt, 64) + if err != nil || inputPerToken <= 0 { + continue + } + + inputPer1M := inputPerToken * 1_000_000 + + existing, ok := topByProvider[provider] + if !ok || inputPer1M > existing.inputPer1M { + topByProvider[provider] = candidate{entry: entry, inputPer1M: inputPer1M} + } + } + + models := make([]Model, 0, len(topByProvider)) + for _, c := range topByProvider { + outputPerToken, _ := strconv.ParseFloat(c.entry.Pricing.Completion, 64) + + outputLimit := 0 + if c.entry.TopProvider != nil { + outputLimit = c.entry.TopProvider.MaxCompletionTokens + } + + hasReasoning := slices.Contains(c.entry.SupportedParameters, "reasoning") || + slices.Contains(c.entry.SupportedParameters, "include_reasoning") + + models = append(models, Model{ + ID: c.entry.ID, + Name: c.entry.Name, + InputCostPer1M: c.inputPer1M, + OutputCostPer1M: outputPerToken * 1_000_000, + ContextLimit: c.entry.ContextLength, + OutputLimit: outputLimit, + ToolCall: true, + Reasoning: hasReasoning, + }) + } + + // Sort by input cost descending + slices.SortFunc(models, func(a, b Model) int { + if a.InputCostPer1M > b.InputCostPer1M { + return -1 + } + if a.InputCostPer1M < b.InputCostPer1M { + return 1 + } + return strings.Compare(a.ID, b.ID) + }) + + if len(models) == 0 { + return nil, fmt.Errorf("no models with tool_call support found") + } + + return models, nil +} diff --git a/fluid/internal/podman/image.go b/fluid-cli/internal/podman/image.go similarity index 100% rename from fluid/internal/podman/image.go rename to fluid-cli/internal/podman/image.go diff --git a/fluid/internal/podman/run.go b/fluid-cli/internal/podman/run.go similarity index 100% rename from fluid/internal/podman/run.go rename to fluid-cli/internal/podman/run.go diff --git a/fluid/internal/readonly/prepare.go b/fluid-cli/internal/readonly/prepare.go similarity index 100% rename from fluid/internal/readonly/prepare.go rename to fluid-cli/internal/readonly/prepare.go diff --git a/fluid/internal/readonly/prepare_test.go b/fluid-cli/internal/readonly/prepare_test.go similarity index 100% rename from fluid/internal/readonly/prepare_test.go rename to fluid-cli/internal/readonly/prepare_test.go diff --git a/fluid/internal/readonly/shell.go b/fluid-cli/internal/readonly/shell.go similarity index 100% rename from fluid/internal/readonly/shell.go rename to fluid-cli/internal/readonly/shell.go diff --git a/fluid/internal/readonly/shell_test.go b/fluid-cli/internal/readonly/shell_test.go similarity index 100% rename from fluid/internal/readonly/shell_test.go rename to fluid-cli/internal/readonly/shell_test.go diff --git a/fluid/internal/readonly/validate.go b/fluid-cli/internal/readonly/validate.go similarity index 100% rename from fluid/internal/readonly/validate.go rename to fluid-cli/internal/readonly/validate.go diff --git a/fluid/internal/readonly/validate_test.go b/fluid-cli/internal/readonly/validate_test.go similarity index 100% rename from fluid/internal/readonly/validate_test.go rename to fluid-cli/internal/readonly/validate_test.go diff --git a/fluid-cli/internal/sandbox/remote.go b/fluid-cli/internal/sandbox/remote.go new file mode 100644 index 00000000..41094343 --- /dev/null +++ b/fluid-cli/internal/sandbox/remote.go @@ -0,0 +1,299 @@ +package sandbox + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "os" + "time" + + "github.com/aspectrr/fluid.sh/fluid/internal/config" + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" +) + +// RemoteService implements Service by calling the fluid-daemon via gRPC. +type RemoteService struct { + conn *grpc.ClientConn + client fluidv1.DaemonServiceClient +} + +// NewRemoteService dials the daemon gRPC endpoint and returns a Service. +// It uses TLS configuration from the ControlPlaneConfig: +// - If DaemonCAFile is set, use it to verify the daemon's TLS cert +// - If DaemonInsecure is false and no CA file, use the system cert pool +// - Only use insecure credentials when DaemonInsecure is explicitly true +func NewRemoteService(addr string, cpCfg config.ControlPlaneConfig) (*RemoteService, error) { + var creds credentials.TransportCredentials + + switch { + case cpCfg.DaemonCAFile != "": + // Use the specified CA certificate + caCert, err := os.ReadFile(cpCfg.DaemonCAFile) + if err != nil { + return nil, fmt.Errorf("read daemon CA file %s: %w", cpCfg.DaemonCAFile, err) + } + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(caCert) { + return nil, fmt.Errorf("failed to parse daemon CA certificate from %s", cpCfg.DaemonCAFile) + } + creds = credentials.NewTLS(&tls.Config{ + RootCAs: certPool, + MinVersion: tls.VersionTLS12, + }) + + case cpCfg.DaemonInsecure: + // Explicitly insecure - no TLS + creds = insecure.NewCredentials() + + default: + // Use system cert pool + creds = credentials.NewTLS(&tls.Config{ + MinVersion: tls.VersionTLS12, + }) + } + + conn, err := grpc.NewClient(addr, + grpc.WithTransportCredentials(creds), + ) + if err != nil { + return nil, fmt.Errorf("dial daemon at %s: %w", addr, err) + } + return &RemoteService{ + conn: conn, + client: fluidv1.NewDaemonServiceClient(conn), + }, nil +} + +func (r *RemoteService) Close() error { + if r.conn != nil { + return r.conn.Close() + } + return nil +} + +func (r *RemoteService) CreateSandbox(ctx context.Context, req CreateRequest) (*SandboxInfo, error) { + resp, err := r.client.CreateSandbox(ctx, &fluidv1.CreateSandboxCommand{ + BaseImage: req.SourceVM, // derived from source_vm - daemon resolves the actual image + SourceVm: req.SourceVM, + Name: req.Name, + Vcpus: int32(req.VCPUs), + MemoryMb: int32(req.MemoryMB), + TtlSeconds: int32(req.TTLSeconds), + AgentId: req.AgentID, + Network: req.Network, + Live: req.Live, + }) + if err != nil { + return nil, err + } + return &SandboxInfo{ + ID: resp.GetSandboxId(), + Name: resp.GetName(), + State: resp.GetState(), + IPAddress: resp.GetIpAddress(), + }, nil +} + +func (r *RemoteService) GetSandbox(ctx context.Context, id string) (*SandboxInfo, error) { + resp, err := r.client.GetSandbox(ctx, &fluidv1.GetSandboxRequest{SandboxId: id}) + if err != nil { + return nil, err + } + return protoToSandboxInfo(resp), nil +} + +func (r *RemoteService) ListSandboxes(ctx context.Context) ([]*SandboxInfo, error) { + resp, err := r.client.ListSandboxes(ctx, &fluidv1.ListSandboxesRequest{}) + if err != nil { + return nil, err + } + result := make([]*SandboxInfo, 0, len(resp.GetSandboxes())) + for _, sb := range resp.GetSandboxes() { + result = append(result, protoToSandboxInfo(sb)) + } + return result, nil +} + +func (r *RemoteService) DestroySandbox(ctx context.Context, id string) error { + _, err := r.client.DestroySandbox(ctx, &fluidv1.DestroySandboxCommand{SandboxId: id}) + return err +} + +func (r *RemoteService) StartSandbox(ctx context.Context, id string) (*SandboxInfo, error) { + resp, err := r.client.StartSandbox(ctx, &fluidv1.StartSandboxCommand{SandboxId: id}) + if err != nil { + return nil, err + } + return &SandboxInfo{ + ID: resp.GetSandboxId(), + State: resp.GetState(), + IPAddress: resp.GetIpAddress(), + }, nil +} + +func (r *RemoteService) StopSandbox(ctx context.Context, id string, force bool) error { + _, err := r.client.StopSandbox(ctx, &fluidv1.StopSandboxCommand{SandboxId: id, Force: force}) + return err +} + +func (r *RemoteService) RunCommand(ctx context.Context, sandboxID, command string, timeoutSec int, env map[string]string) (*CommandResult, error) { + resp, err := r.client.RunCommand(ctx, &fluidv1.RunCommandCommand{ + SandboxId: sandboxID, + Command: command, + TimeoutSeconds: int32(timeoutSec), + Env: env, + }) + if err != nil { + return nil, err + } + return &CommandResult{ + SandboxID: resp.GetSandboxId(), + Stdout: resp.GetStdout(), + Stderr: resp.GetStderr(), + ExitCode: int(resp.GetExitCode()), + DurationMS: resp.GetDurationMs(), + }, nil +} + +func (r *RemoteService) CreateSnapshot(ctx context.Context, sandboxID, name string) (*SnapshotInfo, error) { + resp, err := r.client.CreateSnapshot(ctx, &fluidv1.SnapshotCommand{ + SandboxId: sandboxID, + SnapshotName: name, + }) + if err != nil { + return nil, err + } + return &SnapshotInfo{ + SnapshotID: resp.GetSnapshotId(), + SnapshotName: resp.GetSnapshotName(), + SandboxID: resp.GetSandboxId(), + }, nil +} + +func (r *RemoteService) ListVMs(ctx context.Context) ([]*VMInfo, error) { + resp, err := r.client.ListSourceVMs(ctx, &fluidv1.ListSourceVMsCommand{}) + if err != nil { + return nil, err + } + result := make([]*VMInfo, 0, len(resp.GetVms())) + for _, vm := range resp.GetVms() { + result = append(result, &VMInfo{ + Name: vm.GetName(), + State: vm.GetState(), + IPAddress: vm.GetIpAddress(), + Prepared: vm.GetPrepared(), + }) + } + return result, nil +} + +func (r *RemoteService) ValidateSourceVM(ctx context.Context, vmName string) (*ValidationInfo, error) { + resp, err := r.client.ValidateSourceVM(ctx, &fluidv1.ValidateSourceVMCommand{SourceVm: vmName}) + if err != nil { + return nil, err + } + return &ValidationInfo{ + VMName: resp.GetSourceVm(), + Valid: resp.GetValid(), + State: resp.GetState(), + MACAddress: resp.GetMacAddress(), + IPAddress: resp.GetIpAddress(), + HasNetwork: resp.GetHasNetwork(), + Warnings: resp.GetWarnings(), + Errors: resp.GetErrors(), + }, nil +} + +func (r *RemoteService) PrepareSourceVM(ctx context.Context, vmName, sshUser, keyPath string) (*PrepareInfo, error) { + resp, err := r.client.PrepareSourceVM(ctx, &fluidv1.PrepareSourceVMCommand{ + SourceVm: vmName, + SshUser: sshUser, + SshKeyPath: keyPath, + }) + if err != nil { + return nil, err + } + return &PrepareInfo{ + SourceVM: resp.GetSourceVm(), + IPAddress: resp.GetIpAddress(), + Prepared: resp.GetPrepared(), + UserCreated: resp.GetUserCreated(), + ShellInstalled: resp.GetShellInstalled(), + CAKeyInstalled: resp.GetCaKeyInstalled(), + SSHDConfigured: resp.GetSshdConfigured(), + PrincipalsCreated: resp.GetPrincipalsCreated(), + SSHDRestarted: resp.GetSshdRestarted(), + }, nil +} + +func (r *RemoteService) RunSourceCommand(ctx context.Context, vmName, command string, timeoutSec int) (*SourceCommandResult, error) { + resp, err := r.client.RunSourceCommand(ctx, &fluidv1.RunSourceCommandCommand{ + SourceVm: vmName, + Command: command, + TimeoutSeconds: int32(timeoutSec), + }) + if err != nil { + return nil, err + } + return &SourceCommandResult{ + SourceVM: resp.GetSourceVm(), + ExitCode: int(resp.GetExitCode()), + Stdout: resp.GetStdout(), + Stderr: resp.GetStderr(), + }, nil +} + +func (r *RemoteService) ReadSourceFile(ctx context.Context, vmName, path string) (string, error) { + resp, err := r.client.ReadSourceFile(ctx, &fluidv1.ReadSourceFileCommand{ + SourceVm: vmName, + Path: path, + }) + if err != nil { + return "", err + } + return resp.GetContent(), nil +} + +func (r *RemoteService) GetHostInfo(ctx context.Context) (*HostInfo, error) { + resp, err := r.client.GetHostInfo(ctx, &fluidv1.GetHostInfoRequest{}) + if err != nil { + return nil, err + } + return &HostInfo{ + HostID: resp.GetHostId(), + Hostname: resp.GetHostname(), + Version: resp.GetVersion(), + TotalCPUs: int(resp.GetTotalCpus()), + TotalMemoryMB: resp.GetTotalMemoryMb(), + ActiveSandboxes: int(resp.GetActiveSandboxes()), + BaseImages: resp.GetBaseImages(), + }, nil +} + +func (r *RemoteService) Health(ctx context.Context) error { + _, err := r.client.Health(ctx, &fluidv1.HealthRequest{}) + return err +} + +// protoToSandboxInfo converts a proto SandboxInfo to the canonical type. +func protoToSandboxInfo(pb *fluidv1.SandboxInfo) *SandboxInfo { + var createdAt time.Time + if pb.GetCreatedAt() != "" { + createdAt, _ = time.Parse(time.RFC3339, pb.GetCreatedAt()) + } + return &SandboxInfo{ + ID: pb.GetSandboxId(), + Name: pb.GetName(), + State: pb.GetState(), + IPAddress: pb.GetIpAddress(), + BaseImage: pb.GetBaseImage(), + AgentID: pb.GetAgentId(), + VCPUs: int(pb.GetVcpus()), + MemoryMB: int(pb.GetMemoryMb()), + CreatedAt: createdAt, + } +} diff --git a/fluid-cli/internal/sandbox/service.go b/fluid-cli/internal/sandbox/service.go new file mode 100644 index 00000000..59688f23 --- /dev/null +++ b/fluid-cli/internal/sandbox/service.go @@ -0,0 +1,35 @@ +package sandbox + +import "context" + +// Service abstracts sandbox operations for the CLI, MCP, and TUI layers. +// Implementations may call a remote daemon via gRPC or operate locally. +type Service interface { + // Sandbox lifecycle + CreateSandbox(ctx context.Context, req CreateRequest) (*SandboxInfo, error) + GetSandbox(ctx context.Context, id string) (*SandboxInfo, error) + ListSandboxes(ctx context.Context) ([]*SandboxInfo, error) + DestroySandbox(ctx context.Context, id string) error + StartSandbox(ctx context.Context, id string) (*SandboxInfo, error) + StopSandbox(ctx context.Context, id string, force bool) error + + // Command execution + RunCommand(ctx context.Context, sandboxID, command string, timeoutSec int, env map[string]string) (*CommandResult, error) + + // Snapshots + CreateSnapshot(ctx context.Context, sandboxID, name string) (*SnapshotInfo, error) + + // Source VM operations + ListVMs(ctx context.Context) ([]*VMInfo, error) + ValidateSourceVM(ctx context.Context, vmName string) (*ValidationInfo, error) + PrepareSourceVM(ctx context.Context, vmName, sshUser, keyPath string) (*PrepareInfo, error) + RunSourceCommand(ctx context.Context, vmName, command string, timeoutSec int) (*SourceCommandResult, error) + ReadSourceFile(ctx context.Context, vmName, path string) (string, error) + + // Host info + GetHostInfo(ctx context.Context) (*HostInfo, error) + Health(ctx context.Context) error + + // Close releases resources (e.g. gRPC connection). + Close() error +} diff --git a/fluid-cli/internal/sandbox/types.go b/fluid-cli/internal/sandbox/types.go new file mode 100644 index 00000000..ef9042ba --- /dev/null +++ b/fluid-cli/internal/sandbox/types.go @@ -0,0 +1,99 @@ +// Package sandbox defines the canonical types and service interface for +// sandbox operations. These types decouple the CLI/MCP/TUI layers from +// the transport (gRPC, local provider, etc.). +package sandbox + +import "time" + +// SandboxInfo contains details about a sandbox. +type SandboxInfo struct { + ID string `json:"id"` + Name string `json:"name"` + State string `json:"state"` + IPAddress string `json:"ip_address,omitempty"` + BaseImage string `json:"base_image"` + AgentID string `json:"agent_id"` + VCPUs int `json:"vcpus"` + MemoryMB int `json:"memory_mb"` + CreatedAt time.Time `json:"created_at"` +} + +// CreateRequest holds parameters for creating a sandbox. +type CreateRequest struct { + SourceVM string + Name string + AgentID string + VCPUs int + MemoryMB int + TTLSeconds int + Network string + Live bool +} + +// CommandResult holds the result of a command execution. +type CommandResult struct { + SandboxID string `json:"sandbox_id"` + Stdout string `json:"stdout"` + Stderr string `json:"stderr"` + ExitCode int `json:"exit_code"` + DurationMS int64 `json:"duration_ms"` +} + +// SnapshotInfo holds details about a created snapshot. +type SnapshotInfo struct { + SnapshotID string `json:"snapshot_id"` + SnapshotName string `json:"snapshot_name"` + SandboxID string `json:"sandbox_id"` +} + +// VMInfo describes a source VM available for cloning. +type VMInfo struct { + Name string `json:"name"` + State string `json:"state"` + IPAddress string `json:"ip_address,omitempty"` + Prepared bool `json:"prepared"` +} + +// ValidationInfo contains source VM validation results. +type ValidationInfo struct { + VMName string `json:"vm_name"` + Valid bool `json:"valid"` + State string `json:"state"` + MACAddress string `json:"mac_address,omitempty"` + IPAddress string `json:"ip_address,omitempty"` + HasNetwork bool `json:"has_network"` + Warnings []string `json:"warnings,omitempty"` + Errors []string `json:"errors,omitempty"` +} + +// PrepareInfo contains the result of preparing a source VM. +type PrepareInfo struct { + SourceVM string `json:"source_vm"` + IPAddress string `json:"ip_address,omitempty"` + Prepared bool `json:"prepared"` + UserCreated bool `json:"user_created"` + ShellInstalled bool `json:"shell_installed"` + CAKeyInstalled bool `json:"ca_key_installed"` + SSHDConfigured bool `json:"sshd_configured"` + PrincipalsCreated bool `json:"principals_created"` + SSHDRestarted bool `json:"sshd_restarted"` +} + +// SourceCommandResult holds the output of a source VM command. +type SourceCommandResult struct { + SourceVM string `json:"source_vm"` + ExitCode int `json:"exit_code"` + Stdout string `json:"stdout"` + Stderr string `json:"stderr"` +} + +// HostInfo contains host resource and capability information. +type HostInfo struct { + HostID string `json:"host_id"` + Hostname string `json:"hostname"` + Version string `json:"version"` + TotalCPUs int `json:"total_cpus"` + TotalMemoryMB int64 `json:"total_memory_mb"` + ActiveSandboxes int `json:"active_sandboxes"` + BaseImages []string `json:"base_images"` +} diff --git a/fluid-cli/internal/setup/detect.go b/fluid-cli/internal/setup/detect.go new file mode 100644 index 00000000..27910b41 --- /dev/null +++ b/fluid-cli/internal/setup/detect.go @@ -0,0 +1,49 @@ +package setup + +import ( + "context" + "fmt" + "strings" + + "github.com/aspectrr/fluid.sh/fluid/internal/hostexec" +) + +// DistroInfo holds detected OS distribution information. +type DistroInfo struct { + ID string // e.g. "ubuntu", "debian", "fedora", "rocky" + Name string // e.g. "Ubuntu 22.04" + PkgManager string // "apt" or "dnf" +} + +// DetectOS reads /etc/os-release to determine the distribution and package manager. +func DetectOS(ctx context.Context, run hostexec.RunFunc) (DistroInfo, error) { + stdout, _, code, err := run(ctx, "cat /etc/os-release 2>/dev/null") + if err != nil || code != 0 { + return DistroInfo{}, fmt.Errorf("cannot read /etc/os-release: host may not be Linux") + } + + info := DistroInfo{} + for _, line := range strings.Split(stdout, "\n") { + line = strings.TrimSpace(line) + if k, v, ok := strings.Cut(line, "="); ok { + v = strings.Trim(v, "\"") + switch k { + case "ID": + info.ID = v + case "PRETTY_NAME": + info.Name = v + } + } + } + + switch info.ID { + case "ubuntu", "debian", "linuxmint", "pop": + info.PkgManager = "apt" + case "fedora", "rocky", "almalinux", "centos", "rhel": + info.PkgManager = "dnf" + default: + return info, fmt.Errorf("unsupported distribution %q - see https://fluid.sh/docs/daemon for manual setup", info.ID) + } + + return info, nil +} diff --git a/fluid-cli/internal/setup/setup.go b/fluid-cli/internal/setup/setup.go new file mode 100644 index 00000000..aa7a229a --- /dev/null +++ b/fluid-cli/internal/setup/setup.go @@ -0,0 +1,31 @@ +package setup + +import ( + "context" + + "github.com/aspectrr/fluid.sh/fluid/internal/hostexec" +) + +// StepDef defines a single setup step. +type StepDef struct { + Name string + Description string + Commands []string // Human-readable commands shown before execution + // Check returns true if this step is already done (skip). + Check func(ctx context.Context, run hostexec.RunFunc) (done bool, err error) + // Execute runs the step. Uses sudoRun for privileged operations. + Execute func(ctx context.Context, sudoRun hostexec.RunFunc) error +} + +// StepResult holds the outcome of executing a single setup step. +type StepResult struct { + Name string + Skipped bool // already done per Check() + Success bool + Error string +} + +// AllSteps returns the ordered list of setup steps for the given distro. +func AllSteps(distro DistroInfo) []StepDef { + return allSteps(distro) +} diff --git a/fluid-cli/internal/setup/setup_test.go b/fluid-cli/internal/setup/setup_test.go new file mode 100644 index 00000000..57181876 --- /dev/null +++ b/fluid-cli/internal/setup/setup_test.go @@ -0,0 +1,144 @@ +package setup + +import ( + "context" + "strings" + "testing" + + "github.com/aspectrr/fluid.sh/fluid/internal/hostexec" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDetectOSUbuntu(t *testing.T) { + run := hostexec.RunFunc(func(ctx context.Context, command string) (string, string, int, error) { + return `NAME="Ubuntu" +VERSION="22.04.3 LTS (Jammy Jellyfish)" +ID=ubuntu +PRETTY_NAME="Ubuntu 22.04.3 LTS" +`, "", 0, nil + }) + + distro, err := DetectOS(context.Background(), run) + require.NoError(t, err) + assert.Equal(t, "ubuntu", distro.ID) + assert.Equal(t, "apt", distro.PkgManager) + assert.Equal(t, "Ubuntu 22.04.3 LTS", distro.Name) +} + +func TestDetectOSFedora(t *testing.T) { + run := hostexec.RunFunc(func(ctx context.Context, command string) (string, string, int, error) { + return `NAME="Fedora Linux" +ID=fedora +PRETTY_NAME="Fedora Linux 39" +`, "", 0, nil + }) + + distro, err := DetectOS(context.Background(), run) + require.NoError(t, err) + assert.Equal(t, "fedora", distro.ID) + assert.Equal(t, "dnf", distro.PkgManager) +} + +func TestDetectOSUnsupported(t *testing.T) { + run := hostexec.RunFunc(func(ctx context.Context, command string) (string, string, int, error) { + return `NAME="Arch Linux" +ID=arch +PRETTY_NAME="Arch Linux" +`, "", 0, nil + }) + + _, err := DetectOS(context.Background(), run) + assert.Error(t, err) + assert.Contains(t, err.Error(), "unsupported distribution") +} + +func TestDetectOSNoFile(t *testing.T) { + run := hostexec.RunFunc(func(ctx context.Context, command string) (string, string, int, error) { + return "", "No such file", 1, nil + }) + + _, err := DetectOS(context.Background(), run) + assert.Error(t, err) +} + +func TestAllStepsCount(t *testing.T) { + distro := DistroInfo{ID: "ubuntu", PkgManager: "apt"} + steps := AllSteps(distro) + assert.Len(t, steps, 8) +} + +func TestStepsIdempotent(t *testing.T) { + // Simulate a system where everything is already set up + run := hostexec.RunFunc(func(ctx context.Context, command string) (string, string, int, error) { + // systemctl is-active needs to return "active" for the enable-and-start check + if command == "systemctl is-active fluid-daemon 2>/dev/null" { + return "active\n", "", 0, nil + } + // id -nG check for libvirt group membership + if strings.Contains(command, "id -nG fluid-daemon") { + return "fluid-daemon libvirt\n", "", 0, nil + } + return "", "", 0, nil // everything else passes + }) + + distro := DistroInfo{ID: "ubuntu", PkgManager: "apt"} + steps := AllSteps(distro) + + for _, step := range steps { + done, err := step.Check(context.Background(), run) + assert.NoError(t, err) + assert.True(t, done, "step %s should report done on already-configured system", step.Name) + } +} + +func TestStepsFreshInstall(t *testing.T) { + // Simulate a system where nothing is set up + run := hostexec.RunFunc(func(ctx context.Context, command string) (string, string, int, error) { + return "", "", 1, nil // everything fails + }) + + distro := DistroInfo{ID: "ubuntu", PkgManager: "apt"} + steps := AllSteps(distro) + + for _, step := range steps { + done, err := step.Check(context.Background(), run) + assert.NoError(t, err) + assert.False(t, done, "step %s should report not-done on fresh system", step.Name) + } +} + +func TestStepExecuteSuccess(t *testing.T) { + executedCmds := make([]string, 0) + sudoRun := hostexec.RunFunc(func(ctx context.Context, command string) (string, string, int, error) { + executedCmds = append(executedCmds, command) + return "", "", 0, nil + }) + + distro := DistroInfo{ID: "ubuntu", PkgManager: "apt"} + steps := AllSteps(distro) + + // Execute the first step (install dependencies) + err := steps[0].Execute(context.Background(), sudoRun) + assert.NoError(t, err) + assert.Greater(t, len(executedCmds), 0) +} + +func TestStepsHaveCommands(t *testing.T) { + steps := AllSteps(DistroInfo{ID: "ubuntu", PkgManager: "apt"}) + for _, step := range steps { + assert.NotEmpty(t, step.Commands, "step %s should have Commands", step.Name) + } +} + +func TestStepExecuteFailure(t *testing.T) { + sudoRun := hostexec.RunFunc(func(ctx context.Context, command string) (string, string, int, error) { + return "", "E: Unable to locate package", 100, nil + }) + + distro := DistroInfo{ID: "ubuntu", PkgManager: "apt"} + steps := AllSteps(distro) + + err := steps[0].Execute(context.Background(), sudoRun) + assert.Error(t, err) +} diff --git a/fluid-cli/internal/setup/steps.go b/fluid-cli/internal/setup/steps.go new file mode 100644 index 00000000..1d27ab0d --- /dev/null +++ b/fluid-cli/internal/setup/steps.go @@ -0,0 +1,295 @@ +package setup + +import ( + "context" + "fmt" + "runtime" + "strings" + + "github.com/aspectrr/fluid.sh/fluid/internal/hostexec" +) + +func allSteps(distro DistroInfo) []StepDef { + return []StepDef{ + stepInstallPrereqs(distro), + stepDownloadDaemon(), + stepExtractAndInstall(), + stepCreateUserDirs(), + stepAddLibvirtGroup(), + stepGenerateConfig(), + stepCreateSystemdUnit(), + stepEnableAndStart(), + } +} + +func installPrereqCommands(distro DistroInfo) []string { + switch distro.PkgManager { + case "apt": + return []string{ + "apt-get update -qq", + "apt-get install -y qemu-system-x86 qemu-utils libvirt-daemon-system libvirt-clients iproute2 bridge-utils openssh-client", + } + case "dnf": + return []string{ + "dnf install -y qemu-kvm qemu-img libvirt libvirt-client iproute bridge-utils openssh-clients", + } + default: + return []string{"(unsupported package manager)"} + } +} + +func stepInstallPrereqs(distro DistroInfo) StepDef { + return StepDef{ + Name: "Install dependencies", + Description: "Install QEMU, libvirt, and networking tools", + Commands: installPrereqCommands(distro), + Check: func(ctx context.Context, run hostexec.RunFunc) (bool, error) { + _, _, code, _ := run(ctx, "which qemu-system-x86_64 >/dev/null 2>&1 && which virsh >/dev/null 2>&1") + return code == 0, nil + }, + Execute: func(ctx context.Context, sudoRun hostexec.RunFunc) error { + var cmd string + switch distro.PkgManager { + case "apt": + cmd = "apt-get update -qq && apt-get install -y -qq qemu-system-x86 qemu-utils libvirt-daemon-system libvirt-clients iproute2 bridge-utils openssh-client" + case "dnf": + cmd = "dnf install -y qemu-kvm qemu-img libvirt libvirt-client iproute bridge-utils openssh-clients" + default: + return fmt.Errorf("unsupported package manager: %s", distro.PkgManager) + } + _, stderr, code, err := sudoRun(ctx, cmd) + if err != nil { + return fmt.Errorf("install dependencies: %w", err) + } + if code != 0 { + return fmt.Errorf("install dependencies failed (exit %d): %s", code, stderr) + } + return nil + }, + } +} + +func stepDownloadDaemon() StepDef { + arch := runtime.GOARCH + return StepDef{ + Name: "Download release assets", + Description: "Download the fluid-daemon versioned tarball", + Commands: []string{ + fmt.Sprintf("curl -fsSL -o /tmp/fluid-daemon.tar.gz https://github.com/aspectrr/fluid.sh/releases/latest/download/fluid-daemon_linux_%s.tar.gz", arch), + }, + Check: func(ctx context.Context, run hostexec.RunFunc) (bool, error) { + _, _, code, _ := run(ctx, "test -f /tmp/fluid-daemon.tar.gz || which fluid-daemon >/dev/null 2>&1") + return code == 0, nil + }, + Execute: func(ctx context.Context, sudoRun hostexec.RunFunc) error { + arch := runtime.GOARCH + cmd := fmt.Sprintf( + "curl -fsSL -o /tmp/fluid-daemon.tar.gz https://github.com/aspectrr/fluid.sh/releases/latest/download/fluid-daemon_linux_%s.tar.gz", + arch, + ) + _, stderr, code, err := sudoRun(ctx, cmd) + if err != nil { + return fmt.Errorf("download fluid-daemon: %w", err) + } + if code != 0 { + return fmt.Errorf("download failed (exit %d): %s", code, stderr) + } + return nil + }, + } +} + +func stepExtractAndInstall() StepDef { + return StepDef{ + Name: "Extract and install binary", + Description: "Extract tarball and install fluid-daemon to /usr/local/bin", + Commands: []string{ + "tar -xzf /tmp/fluid-daemon.tar.gz -C /tmp", + "install -m 755 /tmp/fluid-daemon /usr/local/bin/", + }, + Check: func(ctx context.Context, run hostexec.RunFunc) (bool, error) { + _, _, code, _ := run(ctx, "which fluid-daemon >/dev/null 2>&1") + return code == 0, nil + }, + Execute: func(ctx context.Context, sudoRun hostexec.RunFunc) error { + cmd := "tar -xzf /tmp/fluid-daemon.tar.gz -C /tmp && install -m 755 /tmp/fluid-daemon /usr/local/bin/" + _, stderr, code, err := sudoRun(ctx, cmd) + if err != nil { + return fmt.Errorf("extract and install: %w", err) + } + if code != 0 { + return fmt.Errorf("extract and install failed (exit %d): %s", code, stderr) + } + return nil + }, + } +} + +func stepCreateUserDirs() StepDef { + return StepDef{ + Name: "Create system user and directories", + Description: "Create the fluid-daemon system user and storage directories", + Commands: []string{ + "useradd --system --home /var/lib/fluid-daemon --shell /usr/sbin/nologin fluid-daemon", + "mkdir -p /etc/fluid-daemon /var/lib/fluid-daemon /var/log/fluid-daemon", + "chown -R fluid-daemon:fluid-daemon /var/lib/fluid-daemon /var/log/fluid-daemon", + }, + Check: func(ctx context.Context, run hostexec.RunFunc) (bool, error) { + _, _, code, _ := run(ctx, "id fluid-daemon >/dev/null 2>&1") + return code == 0, nil + }, + Execute: func(ctx context.Context, sudoRun hostexec.RunFunc) error { + cmds := []string{ + "id fluid-daemon >/dev/null 2>&1 || useradd --system --home /var/lib/fluid-daemon --shell /usr/sbin/nologin fluid-daemon", + "mkdir -p /etc/fluid-daemon /var/lib/fluid-daemon /var/log/fluid-daemon", + "chown -R fluid-daemon:fluid-daemon /var/lib/fluid-daemon /var/log/fluid-daemon", + } + cmd := strings.Join(cmds, " && ") + _, stderr, code, err := sudoRun(ctx, cmd) + if err != nil { + return fmt.Errorf("create user/dirs: %w", err) + } + if code != 0 { + return fmt.Errorf("create user/dirs failed (exit %d): %s", code, stderr) + } + return nil + }, + } +} + +func stepAddLibvirtGroup() StepDef { + return StepDef{ + Name: "Add user to libvirt group", + Description: "Add fluid-daemon user to the libvirt group for VM access", + Commands: []string{ + "usermod -aG libvirt fluid-daemon", + }, + Check: func(ctx context.Context, run hostexec.RunFunc) (bool, error) { + _, _, code, _ := run(ctx, "id -nG fluid-daemon 2>/dev/null | grep -qw libvirt") + return code == 0, nil + }, + Execute: func(ctx context.Context, sudoRun hostexec.RunFunc) error { + _, stderr, code, err := sudoRun(ctx, "usermod -aG libvirt fluid-daemon") + if err != nil { + return fmt.Errorf("add libvirt group: %w", err) + } + if code != 0 { + return fmt.Errorf("add libvirt group failed (exit %d): %s", code, stderr) + } + return nil + }, + } +} + +func stepGenerateConfig() StepDef { + return StepDef{ + Name: "Configure daemon.yaml", + Description: "Write default daemon configuration file", + Commands: []string{ + "Write /etc/fluid-daemon/daemon.yaml (default config)", + }, + Check: func(ctx context.Context, run hostexec.RunFunc) (bool, error) { + _, _, code, _ := run(ctx, "test -f /etc/fluid-daemon/daemon.yaml") + return code == 0, nil + }, + Execute: func(ctx context.Context, sudoRun hostexec.RunFunc) error { + configYAML := `listen: + grpc: ":9091" + +backend: qemu + +storage: + images: /var/lib/fluid-daemon/images + overlays: /var/lib/fluid-daemon/overlays + state: /var/lib/fluid-daemon/state.db + +network: + bridge: fluid0 + subnet: 10.0.0.0/24 + +ssh: + ca_key_path: /etc/fluid-daemon/ssh_ca + ca_pub_key_path: /etc/fluid-daemon/ssh_ca.pub + key_dir: /etc/fluid-daemon/keys + cert_ttl: 30m + default_user: sandbox +` + cmd := fmt.Sprintf("cat > /etc/fluid-daemon/daemon.yaml << 'FLUID_CONFIG_EOF'\n%sFLUID_CONFIG_EOF", configYAML) + _, stderr, code, err := sudoRun(ctx, cmd) + if err != nil { + return fmt.Errorf("write config: %w", err) + } + if code != 0 { + return fmt.Errorf("write config failed (exit %d): %s", code, stderr) + } + return nil + }, + } +} + +func stepCreateSystemdUnit() StepDef { + return StepDef{ + Name: "Create systemd unit", + Description: "Install the fluid-daemon systemd service file", + Commands: []string{ + "Write /etc/systemd/system/fluid-daemon.service", + }, + Check: func(ctx context.Context, run hostexec.RunFunc) (bool, error) { + _, _, code, _ := run(ctx, "test -f /etc/systemd/system/fluid-daemon.service") + return code == 0, nil + }, + Execute: func(ctx context.Context, sudoRun hostexec.RunFunc) error { + unit := `[Unit] +Description=fluid-daemon sandbox host +After=network.target libvirtd.service + +[Service] +User=fluid-daemon +Group=fluid-daemon +ExecStart=/usr/local/bin/fluid-daemon --config /etc/fluid-daemon/daemon.yaml +Restart=on-failure +RestartSec=5 +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +` + cmd := fmt.Sprintf("cat > /etc/systemd/system/fluid-daemon.service << 'FLUID_UNIT_EOF'\n%sFLUID_UNIT_EOF", unit) + _, stderr, code, err := sudoRun(ctx, cmd) + if err != nil { + return fmt.Errorf("write systemd unit: %w", err) + } + if code != 0 { + return fmt.Errorf("write systemd unit failed (exit %d): %s", code, stderr) + } + return nil + }, + } +} + +func stepEnableAndStart() StepDef { + return StepDef{ + Name: "Enable and start daemon", + Description: "Reload systemd, enable and start the fluid-daemon service", + Commands: []string{ + "systemctl daemon-reload", + "systemctl enable fluid-daemon", + "systemctl start fluid-daemon", + }, + Check: func(ctx context.Context, run hostexec.RunFunc) (bool, error) { + stdout, _, _, _ := run(ctx, "systemctl is-active fluid-daemon 2>/dev/null") + return strings.TrimSpace(stdout) == "active", nil + }, + Execute: func(ctx context.Context, sudoRun hostexec.RunFunc) error { + cmd := "systemctl daemon-reload && systemctl enable fluid-daemon && systemctl start fluid-daemon" + _, stderr, code, err := sudoRun(ctx, cmd) + if err != nil { + return fmt.Errorf("enable/start daemon: %w", err) + } + if code != 0 { + return fmt.Errorf("enable/start daemon failed (exit %d): %s", code, stderr) + } + return nil + }, + } +} diff --git a/fluid/internal/store/sqlite/sqlite.go b/fluid-cli/internal/store/sqlite/sqlite.go similarity index 100% rename from fluid/internal/store/sqlite/sqlite.go rename to fluid-cli/internal/store/sqlite/sqlite.go diff --git a/fluid/internal/store/sqlite/sqlite_test.go b/fluid-cli/internal/store/sqlite/sqlite_test.go similarity index 100% rename from fluid/internal/store/sqlite/sqlite_test.go rename to fluid-cli/internal/store/sqlite/sqlite_test.go diff --git a/fluid/internal/store/store.go b/fluid-cli/internal/store/store.go similarity index 100% rename from fluid/internal/store/store.go rename to fluid-cli/internal/store/store.go diff --git a/fluid/internal/telemetry/telemetry.go b/fluid-cli/internal/telemetry/telemetry.go similarity index 92% rename from fluid/internal/telemetry/telemetry.go rename to fluid-cli/internal/telemetry/telemetry.go index da7e7d07..1d71f2a2 100644 --- a/fluid/internal/telemetry/telemetry.go +++ b/fluid-cli/internal/telemetry/telemetry.go @@ -9,9 +9,9 @@ import ( "github.com/posthog/posthog-go" ) -// posthogAPIKey is the PostHog API key. By default uses dev key. +// posthogAPIKey is the PostHog API key. Empty by default - must be injected at build time. // Override at build time with: -ldflags "-X github.com/aspectrr/fluid.sh/fluid/internal/telemetry.posthogAPIKey=YOUR_KEY" -var posthogAPIKey = "phc_nZdxqaqWmZhHpWPIsUFqmwtr9WfyYaae0IOdRmh8YGT" +var posthogAPIKey = "" // Service defines the interface for telemetry operations. type Service interface { @@ -40,7 +40,7 @@ type posthogService struct { // NewService creates a new telemetry service based on configuration. func NewService(cfg config.TelemetryConfig) (Service, error) { - if !cfg.EnableAnonymousUsage { + if !cfg.EnableAnonymousUsage || posthogAPIKey == "" { return &NoopService{}, nil } diff --git a/fluid/internal/telemetry/telemetry_test.go b/fluid-cli/internal/telemetry/telemetry_test.go similarity index 100% rename from fluid/internal/telemetry/telemetry_test.go rename to fluid-cli/internal/telemetry/telemetry_test.go diff --git a/fluid/internal/tui/agent.go b/fluid-cli/internal/tui/agent.go similarity index 61% rename from fluid/internal/tui/agent.go rename to fluid-cli/internal/tui/agent.go index db74fdce..fd4c82ec 100644 --- a/fluid/internal/tui/agent.go +++ b/fluid-cli/internal/tui/agent.go @@ -1,16 +1,12 @@ package tui import ( - "bytes" "context" - "crypto/sha256" "encoding/base64" "encoding/json" "fmt" "io" "log/slog" - "os" - "os/exec" "path/filepath" "strings" "time" @@ -19,13 +15,10 @@ import ( "github.com/aspectrr/fluid.sh/fluid/internal/ansible" "github.com/aspectrr/fluid.sh/fluid/internal/config" - "github.com/aspectrr/fluid.sh/fluid/internal/libvirt" "github.com/aspectrr/fluid.sh/fluid/internal/llm" - "github.com/aspectrr/fluid.sh/fluid/internal/provider" - "github.com/aspectrr/fluid.sh/fluid/internal/readonly" + "github.com/aspectrr/fluid.sh/fluid/internal/sandbox" "github.com/aspectrr/fluid.sh/fluid/internal/store" "github.com/aspectrr/fluid.sh/fluid/internal/telemetry" - "github.com/aspectrr/fluid.sh/fluid/internal/vm" ) // PendingApproval represents a sandbox creation waiting for memory approval @@ -41,16 +34,12 @@ type PendingApproval struct { type FluidAgent struct { cfg *config.Config store store.Store - vmService *vm.Service - manager provider.Manager + service sandbox.Service llmClient llm.Client playbookService *ansible.PlaybookService telemetry telemetry.Service logger *slog.Logger - // Multi-host support - multiHostMgr *libvirt.MultiHostDomainManager - // Status callback for sending updates to TUI statusCallback func(tea.Msg) @@ -69,18 +58,9 @@ type FluidAgent struct { currentSourceVM string autoReadOnly bool - // Pending approval for memory-constrained sandbox creation - pendingApproval *PendingApproval - // Pending approval for network access pendingNetworkApproval *PendingNetworkApproval - // Pending approval for source VM preparation - pendingSourcePrepareApproval *PendingSourcePrepareApproval - - // Track VMs that have been prepared during this session (avoid re-prompting) - preparedSourceVMs map[string]bool - // Read-only mode: only query tools are available to the LLM readOnly bool } @@ -91,14 +71,8 @@ type PendingNetworkApproval struct { ResponseChan chan bool } -// PendingSourcePrepareApproval represents a source prepare request waiting for approval -type PendingSourcePrepareApproval struct { - Request SourcePrepareApprovalRequest - ResponseChan chan bool -} - // NewFluidAgent creates a new fluid agent -func NewFluidAgent(cfg *config.Config, store store.Store, vmService *vm.Service, manager provider.Manager, tele telemetry.Service, logger *slog.Logger) *FluidAgent { +func NewFluidAgent(cfg *config.Config, st store.Store, svc sandbox.Service, tele telemetry.Service, logger *slog.Logger) *FluidAgent { if logger == nil { logger = slog.New(slog.NewTextHandler(io.Discard, nil)) } @@ -108,24 +82,16 @@ func NewFluidAgent(cfg *config.Config, store store.Store, vmService *vm.Service, llmClient = llm.NewOpenRouterClient(cfg.AIAgent) } - agent := &FluidAgent{ + return &FluidAgent{ cfg: cfg, - store: store, - vmService: vmService, - manager: manager, + store: st, + service: svc, llmClient: llmClient, - playbookService: ansible.NewPlaybookService(store, cfg.Ansible.PlaybooksDir), + playbookService: ansible.NewPlaybookService(st, cfg.Ansible.PlaybooksDir), telemetry: tele, logger: logger, history: make([]llm.Message, 0), } - - // Initialize multi-host manager if hosts are configured - if len(cfg.Hosts) > 0 { - agent.multiHostMgr = libvirt.NewMultiHostDomainManager(cfg.Hosts, logger) - } - - return agent } // SetStatusCallback sets the callback function for status updates @@ -391,11 +357,12 @@ func (a *FluidAgent) executeTool(ctx context.Context, tc llm.ToolCall) (any, err Host string `json:"host"` CPU int `json:"cpu"` MemoryMB int `json:"memory_mb"` + Live bool `json:"live"` } if err := json.Unmarshal([]byte(tc.Function.Arguments), &args); err != nil { return nil, err } - return a.createSandbox(ctx, args.SourceVM, args.Host, args.CPU, args.MemoryMB) + return a.createSandbox(ctx, args.SourceVM, args.Host, args.CPU, args.MemoryMB, args.Live) case "destroy_sandbox": var args struct { SandboxID string `json:"sandbox_id"` @@ -678,7 +645,7 @@ func (a *FluidAgent) RunCompact() tea.Cmd { // Command implementations func (a *FluidAgent) listSandboxes(ctx context.Context) (map[string]any, error) { - sandboxes, err := a.vmService.GetSandboxes(ctx, store.SandboxFilter{}, nil) + sandboxes, err := a.service.ListSandboxes(ctx) if err != nil { a.logger.Error("list sandboxes query failed", "error", err) return nil, err @@ -689,19 +656,13 @@ func (a *FluidAgent) listSandboxes(ctx context.Context) (map[string]any, error) for _, sb := range sandboxes { item := map[string]any{ "id": sb.ID, - "name": sb.SandboxName, + "name": sb.Name, "state": sb.State, "base_image": sb.BaseImage, "created_at": sb.CreatedAt.Format(time.RFC3339), } - if sb.IPAddress != nil { - item["ip"] = *sb.IPAddress - } - if sb.HostName != nil { - item["host"] = *sb.HostName - } - if sb.HostAddress != nil { - item["host_address"] = *sb.HostAddress + if sb.IPAddress != "" { + item["ip"] = sb.IPAddress } result = append(result, item) } @@ -712,147 +673,40 @@ func (a *FluidAgent) listSandboxes(ctx context.Context) (map[string]any, error) }, nil } -func (a *FluidAgent) createSandbox(ctx context.Context, sourceVM, hostName string, cpu, memoryMB int) (map[string]any, error) { +func (a *FluidAgent) createSandbox(ctx context.Context, sourceVM, hostName string, cpu, memoryMB int, live bool) (map[string]any, error) { if sourceVM == "" { return nil, fmt.Errorf("source-vm is required (e.g., create ubuntu-base)") } - a.logger.Info("sandbox creation attempt", "source_vm", sourceVM, "host", hostName, "cpu", cpu, "memory_mb", memoryMB) - - // Determine target host and manager - var host *config.HostConfig - var mgr provider.Manager - - if a.multiHostMgr != nil { - var err error - host, err = a.findHostForSourceVM(ctx, sourceVM, hostName) - if err != nil { - return nil, fmt.Errorf("find host for source VM: %w", err) - } - } - - if host != nil { - mgr = a.vmService.GetRemoteManager(host) - } else { - mgr = a.vmService.GetManager() - } - - // Use provided values or defaults - cpuCount := cpu - if cpuCount <= 0 { - cpuCount = a.vmService.GetDefaultCPUs() - } - memMB := memoryMB - if memMB <= 0 { - memMB = a.vmService.GetDefaultMemory() - } - - // Sandbox name will be auto-generated as sbx-{id} - - validation := a.vmService.CheckResourcesForSandbox(ctx, mgr, sourceVM, cpuCount, memMB) - - // If source VM is invalid, fail immediately - if !validation.SourceVMValid { - return nil, fmt.Errorf("source VM validation failed: %s", strings.Join(validation.VMErrors, "; ")) - } - - // If resources are insufficient, request human approval - if validation.NeedsApproval { - a.logger.Warn("resource validation needs approval", "source_vm", sourceVM, "required_mb", validation.ResourceCheck.RequiredMemoryMB, "available_mb", validation.ResourceCheck.AvailableMemoryMB) - hostNameStr := "local" - if host != nil { - hostNameStr = host.Name - } - - request := MemoryApprovalRequest{ - SourceVM: sourceVM, - HostName: hostNameStr, - RequiredMemoryMB: validation.ResourceCheck.RequiredMemoryMB, - AvailableMemoryMB: validation.ResourceCheck.AvailableMemoryMB, - TotalMemoryMB: validation.ResourceCheck.TotalMemoryMB, - Warnings: validation.ResourceCheck.Warnings, - Errors: validation.ResourceCheck.Errors, - } - - // Create a channel to wait for approval response - responseChan := make(chan bool, 1) - - // Store pending approval and send request to TUI - a.pendingApproval = &PendingApproval{ - Request: request, - SourceVM: sourceVM, - SandboxName: "", // Auto-generated as sbx-{id} - Host: host, - ResponseChan: responseChan, - } - - // Send the approval request to the TUI - a.sendStatus(MemoryApprovalRequestMsg{Request: request}) - - // Wait for response (this blocks the agent until user responds) - approved := <-responseChan - a.pendingApproval = nil - a.logger.Info("memory approval response", "approved", approved, "source_vm", sourceVM) - - if !approved { - return nil, fmt.Errorf("sandbox creation denied: insufficient memory (need %d MB, have %d MB available) - human approval was not granted", - validation.ResourceCheck.RequiredMemoryMB, validation.ResourceCheck.AvailableMemoryMB) - } - } - - // Proceed with creation (resources approved or sufficient) - if host != nil { - // Create on remote host - sb, ip, err := a.vmService.CreateSandboxOnHost(ctx, host, sourceVM, "tui-agent", "", cpuCount, memMB, nil, true, true) - if err != nil { - a.logger.Error("sandbox creation failed", "source_vm", sourceVM, "host", host.Name, "error", err) - return nil, err - } - a.logger.Info("sandbox created", "sandbox_id", sb.ID, "host", host.Name, "ip", ip) + a.logger.Info("sandbox creation attempt", "source_vm", sourceVM, "cpu", cpu, "memory_mb", memoryMB, "live", live) - // Track the created sandbox for cleanup on exit - a.createdSandboxes = append(a.createdSandboxes, sb.ID) - - // Set as current sandbox for status bar display - a.currentSandboxID = sb.ID - a.currentSandboxHost = host.Name - a.currentSandboxBaseImage = sb.BaseImage - - result := map[string]any{ - "sandbox_id": sb.ID, - "name": sb.SandboxName, - "state": sb.State, - "host": host.Name, - } - if ip != "" { - result["ip"] = ip - } - return result, nil - } - - // Fall back to local creation - sb, ip, err := a.vmService.CreateSandbox(ctx, sourceVM, "tui-agent", "", cpuCount, memMB, nil, true, true) + sb, err := a.service.CreateSandbox(ctx, sandbox.CreateRequest{ + SourceVM: sourceVM, + AgentID: "tui-agent", + VCPUs: cpu, + MemoryMB: memoryMB, + Live: live, + }) if err != nil { - a.logger.Error("sandbox creation failed", "source_vm", sourceVM, "host", "local", "error", err) + a.logger.Error("sandbox creation failed", "source_vm", sourceVM, "error", err) return nil, err } - a.logger.Info("sandbox created", "sandbox_id", sb.ID, "host", "local", "ip", ip) + a.logger.Info("sandbox created", "sandbox_id", sb.ID, "ip", sb.IPAddress) // Track the created sandbox for cleanup on exit a.createdSandboxes = append(a.createdSandboxes, sb.ID) // Set as current sandbox for status bar display a.currentSandboxID = sb.ID - a.currentSandboxHost = "local" a.currentSandboxBaseImage = sb.BaseImage result := map[string]any{ "sandbox_id": sb.ID, - "name": sb.SandboxName, + "name": sb.Name, "state": sb.State, } - if ip != "" { - result["ip"] = ip + if sb.IPAddress != "" { + result["ip"] = sb.IPAddress } return result, nil @@ -860,10 +714,8 @@ func (a *FluidAgent) createSandbox(ctx context.Context, sourceVM, hostName strin // HandleApprovalResponse handles the response from the memory approval dialog func (a *FluidAgent) HandleApprovalResponse(approved bool) { - a.logger.Info("memory approval response", "approved", approved) - if a.pendingApproval != nil && a.pendingApproval.ResponseChan != nil { - a.pendingApproval.ResponseChan <- approved - } + // No-op in remote mode - daemon handles resource checking + a.logger.Debug("memory approval response (no-op in remote mode)", "approved", approved) } // HandleNetworkApprovalResponse handles the response from the network approval dialog @@ -876,46 +728,12 @@ func (a *FluidAgent) HandleNetworkApprovalResponse(approved bool) { // HandleSourcePrepareApprovalResponse handles the response from the source prepare approval dialog func (a *FluidAgent) HandleSourcePrepareApprovalResponse(approved bool) { - a.logger.Info("source prepare approval response", "approved", approved) - if a.pendingSourcePrepareApproval != nil && a.pendingSourcePrepareApproval.ResponseChan != nil { - a.pendingSourcePrepareApproval.ResponseChan <- approved - } -} - -// findHostForSourceVM finds the host that has the given source VM. -// If hostName is specified, only that host is checked. -// Returns nil if no remote host has the VM (fallback to local). -func (a *FluidAgent) findHostForSourceVM(ctx context.Context, sourceVM, hostName string) (*config.HostConfig, error) { - if a.multiHostMgr == nil { - return nil, nil - } - - a.logger.Debug("finding host for source VM", "source_vm", sourceVM, "host_name", hostName) - - // If specific host requested, check only that host - if hostName != "" { - hosts := a.multiHostMgr.GetHosts() - for i := range hosts { - if hosts[i].Name == hostName { - return &hosts[i], nil - } - } - a.logger.Error("host not found in configuration", "host", hostName) - return nil, fmt.Errorf("host %q not found in configuration", hostName) - } - - // Search all hosts for the source VM - host, err := a.multiHostMgr.FindHostForVM(ctx, sourceVM) - if err != nil { - // Not found on any remote host - will try local - return nil, nil - } - - return host, nil + // No-op in remote mode - daemon handles source VM preparation + a.logger.Debug("source prepare approval response (no-op in remote mode)", "approved", approved) } func (a *FluidAgent) destroySandbox(ctx context.Context, id string) (map[string]any, error) { - _, err := a.vmService.DestroySandbox(ctx, id) + err := a.service.DestroySandbox(ctx, id) if err != nil { a.logger.Error("destroy sandbox failed", "sandbox_id", id, "error", err) return nil, err @@ -945,16 +763,9 @@ func (a *FluidAgent) runCommand(ctx context.Context, sandboxID, command string) // Update current sandbox if different (user is working with this sandbox) if sandboxID != "" && sandboxID != a.currentSandboxID { a.currentSandboxID = sandboxID - // Try to get host info and base image from sandbox - if sb, err := a.vmService.GetSandbox(ctx, sandboxID); err == nil { - if sb.HostName != nil { - a.currentSandboxHost = *sb.HostName - } else { - a.currentSandboxHost = "local" - } + if sb, err := a.service.GetSandbox(ctx, sandboxID); err == nil { a.currentSandboxBaseImage = sb.BaseImage } else { - a.currentSandboxHost = "local" a.currentSandboxBaseImage = "" } } @@ -970,19 +781,13 @@ func (a *FluidAgent) runCommand(ctx context.Context, sandboxID, command string) URLs: urls, } - // Create a channel to wait for approval response responseChan := make(chan bool, 1) - - // Store pending approval and send request to TUI a.pendingNetworkApproval = &PendingNetworkApproval{ Request: request, ResponseChan: responseChan, } - - // Send the approval request to the TUI a.sendStatus(NetworkApprovalRequestMsg{Request: request}) - // Wait for response (this blocks the agent until user responds) approved := <-responseChan a.pendingNetworkApproval = nil a.logger.Info("network approval result", "approved", approved, "tool", networkTool, "sandbox_id", sandboxID) @@ -996,43 +801,9 @@ func (a *FluidAgent) runCommand(ctx context.Context, sandboxID, command string) } } - // Create callback to send chunks to TUI for live output - outputCallback := func(chunk vm.OutputChunk) { - if chunk.IsRetry && chunk.Retry != nil { - // Signal retry to reset live output and show warning - a.sendStatus(CommandOutputResetMsg{SandboxID: sandboxID}) - a.sendStatus(RetryAttemptMsg{ - SandboxID: sandboxID, - Attempt: chunk.Retry.Attempt, - Max: chunk.Retry.Max, - Delay: chunk.Retry.Delay, - Error: chunk.Retry.Error, - }) - return - } - - // nil Data but NOT a retry usually means a reset signal from elsewhere - if chunk.Data == nil { - a.sendStatus(CommandOutputResetMsg{SandboxID: sandboxID}) - return - } - - a.sendStatus(CommandOutputChunkMsg{ - SandboxID: sandboxID, - IsStderr: chunk.IsStderr, - Chunk: string(chunk.Data), - }) - } - - user := a.cfg.SSH.DefaultUser - result, err := a.vmService.RunCommandWithCallback(ctx, sandboxID, user, "", command, 0, nil, outputCallback) - - // Signal streaming complete - a.sendStatus(CommandOutputDoneMsg{SandboxID: sandboxID}) - + result, err := a.service.RunCommand(ctx, sandboxID, command, 0, nil) if err != nil { a.logger.Error("command execution failed", "sandbox_id", sandboxID, "error", err) - // Return partial result if available if result != nil { return map[string]any{ "sandbox_id": sandboxID, @@ -1118,17 +889,13 @@ func (a *FluidAgent) editFile(ctx context.Context, sandboxID, path, oldStr, newS return nil, fmt.Errorf("path must be absolute: %s", path) } - user := a.cfg.SSH.DefaultUser - // If old_str is empty, create/overwrite the file if oldStr == "" { a.logger.Debug("creating file", "sandbox_id", sandboxID, "path", path) - // Use base64 encoding to safely transfer content over SSH - // This avoids issues with heredocs, special characters, and shell escaping encoded := base64.StdEncoding.EncodeToString([]byte(newStr)) cmd := fmt.Sprintf("echo '%s' | base64 -d > '%s'", encoded, path) - result, err := a.vmService.RunCommand(ctx, sandboxID, user, "", cmd, 0, nil) + result, err := a.service.RunCommand(ctx, sandboxID, cmd, 0, nil) if err != nil { a.logger.Error("failed to create file", "sandbox_id", sandboxID, "path", path, "error", err) return nil, fmt.Errorf("failed to create file: %w", err) @@ -1146,7 +913,7 @@ func (a *FluidAgent) editFile(ctx context.Context, sandboxID, path, oldStr, newS a.logger.Debug("editing file", "sandbox_id", sandboxID, "path", path) // Read the original file using base64 to handle binary/special chars - readResult, err := a.vmService.RunCommand(ctx, sandboxID, user, "", fmt.Sprintf("base64 '%s'", path), 0, nil) + readResult, err := a.service.RunCommand(ctx, sandboxID, fmt.Sprintf("base64 '%s'", path), 0, nil) if err != nil { a.logger.Error("failed to read file for edit", "sandbox_id", sandboxID, "path", path, "error", err) return nil, fmt.Errorf("failed to read file: %w", err) @@ -1179,7 +946,7 @@ func (a *FluidAgent) editFile(ctx context.Context, sandboxID, path, oldStr, newS encoded := base64.StdEncoding.EncodeToString([]byte(edited)) writeCmd := fmt.Sprintf("echo '%s' | base64 -d > '%s'", encoded, path) - writeResult, err := a.vmService.RunCommand(ctx, sandboxID, user, "", writeCmd, 0, nil) + writeResult, err := a.service.RunCommand(ctx, sandboxID, writeCmd, 0, nil) if err != nil { a.logger.Error("failed to write file", "sandbox_id", sandboxID, "path", path, "error", err) return nil, fmt.Errorf("failed to write file: %w", err) @@ -1209,9 +976,8 @@ func (a *FluidAgent) readFile(ctx context.Context, sandboxID, path string) (map[ } a.logger.Debug("read file", "sandbox_id", sandboxID, "path", path) - user := a.cfg.SSH.DefaultUser // Use base64 to safely transfer content that may contain special characters - result, err := a.vmService.RunCommand(ctx, sandboxID, user, "", fmt.Sprintf("base64 '%s'", path), 0, nil) + result, err := a.service.RunCommand(ctx, sandboxID, fmt.Sprintf("base64 '%s'", path), 0, nil) if err != nil { a.logger.Error("failed to read file", "sandbox_id", sandboxID, "path", path, "error", err) return nil, fmt.Errorf("failed to read file: %w", err) @@ -1286,26 +1052,26 @@ func (a *FluidAgent) getPlaybook(ctx context.Context, playbookID string) (map[st } func (a *FluidAgent) startSandbox(ctx context.Context, id string) (map[string]any, error) { - ip, err := a.vmService.StartSandbox(ctx, id, true) + sb, err := a.service.StartSandbox(ctx, id) if err != nil { a.logger.Error("start sandbox failed", "sandbox_id", id, "error", err) return nil, err } - a.logger.Info("sandbox started", "sandbox_id", id, "ip", ip) + a.logger.Info("sandbox started", "sandbox_id", id, "ip", sb.IPAddress) result := map[string]any{ "started": true, "sandbox_id": id, } - if ip != "" { - result["ip"] = ip + if sb.IPAddress != "" { + result["ip"] = sb.IPAddress } return result, nil } func (a *FluidAgent) stopSandbox(ctx context.Context, id string) (map[string]any, error) { - err := a.vmService.StopSandbox(ctx, id, false) + err := a.service.StopSandbox(ctx, id, false) if err != nil { a.logger.Error("stop sandbox failed", "sandbox_id", id, "error", err) return nil, err @@ -1319,7 +1085,7 @@ func (a *FluidAgent) stopSandbox(ctx context.Context, id string) (map[string]any } func (a *FluidAgent) getSandbox(ctx context.Context, id string) (map[string]any, error) { - sb, err := a.vmService.GetSandbox(ctx, id) + sb, err := a.service.GetSandbox(ctx, id) if err != nil { a.logger.Error("get sandbox failed", "sandbox_id", id, "error", err) return nil, err @@ -1327,117 +1093,39 @@ func (a *FluidAgent) getSandbox(ctx context.Context, id string) (map[string]any, result := map[string]any{ "sandbox_id": sb.ID, - "name": sb.SandboxName, + "name": sb.Name, "state": sb.State, "base_image": sb.BaseImage, - "network": sb.Network, "agent_id": sb.AgentID, "created_at": sb.CreatedAt.Format(time.RFC3339), - "updated_at": sb.UpdatedAt.Format(time.RFC3339), - } - if sb.IPAddress != nil { - result["ip"] = *sb.IPAddress } - if sb.HostName != nil { - result["host"] = *sb.HostName - } - if sb.HostAddress != nil { - result["host_address"] = *sb.HostAddress + if sb.IPAddress != "" { + result["ip"] = sb.IPAddress } return result, nil } func (a *FluidAgent) listVMs(ctx context.Context) (map[string]any, error) { - // If multihost manager is configured, query remote hosts - if a.multiHostMgr != nil { - return a.listVMsFromHosts(ctx) - } - - // Fall back to local virsh - return a.listVMsLocal(ctx) -} - -// listVMsFromHosts queries all configured remote hosts for VMs (excludes sandboxes) -func (a *FluidAgent) listVMsFromHosts(ctx context.Context) (map[string]any, error) { - listResult, err := a.multiHostMgr.ListDomains(ctx) + vms, err := a.service.ListVMs(ctx) if err != nil { - a.logger.Error("list domains from hosts failed", "error", err) - return nil, fmt.Errorf("list domains from hosts: %w", err) + a.logger.Error("list VMs failed", "error", err) + return nil, err } - result := make([]map[string]any, 0) - for _, domain := range listResult.Domains { - // Skip sandboxes (names starting with "sbx-") - if strings.HasPrefix(domain.Name, "sbx-") { - continue - } + result := make([]map[string]any, 0, len(vms)) + for _, v := range vms { item := map[string]any{ - "name": domain.Name, - "state": domain.State.String(), - "host": domain.HostName, - "host_address": domain.HostAddress, + "name": v.Name, + "state": v.State, + "prepared": v.Prepared, } - if domain.UUID != "" { - item["uuid"] = domain.UUID + if v.IPAddress != "" { + item["ip"] = v.IPAddress } result = append(result, item) } - // Include any host errors in the response - response := map[string]any{ - "vms": result, - "count": len(result), - } - - if len(listResult.HostErrors) > 0 { - for _, he := range listResult.HostErrors { - a.logger.Warn("host error listing VMs", "host", he.HostName, "address", he.HostAddress, "error", he.Error) - } - errors := make([]map[string]any, 0, len(listResult.HostErrors)) - for _, he := range listResult.HostErrors { - errors = append(errors, map[string]any{ - "host": he.HostName, - "address": he.HostAddress, - "error": he.Error, - }) - } - response["host_errors"] = errors - } - - return response, nil -} - -// listVMsLocal queries local virsh for VMs (excludes sandboxes) -func (a *FluidAgent) listVMsLocal(ctx context.Context) (map[string]any, error) { - // Use virsh list --all --name to get all VMs - cmd := exec.CommandContext(ctx, "virsh", "list", "--all", "--name") - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - a.logger.Error("virsh list command failed", "error", err, "stderr", stderr.String()) - return nil, fmt.Errorf("virsh list: %w: %s", err, stderr.String()) - } - - result := make([]map[string]any, 0) - for _, name := range strings.Split(stdout.String(), "\n") { - name = strings.TrimSpace(name) - if name == "" { - continue - } - // Skip sandboxes (names starting with "sbx-") - if strings.HasPrefix(name, "sbx-") { - continue - } - result = append(result, map[string]any{ - "name": name, - "state": "unknown", - "host": "local", - }) - } - return map[string]any{ "vms": result, "count": len(result), @@ -1449,18 +1137,17 @@ func (a *FluidAgent) createSnapshot(ctx context.Context, sandboxID, name string) name = fmt.Sprintf("snap-%d", time.Now().Unix()) } - snap, err := a.vmService.CreateSnapshot(ctx, sandboxID, name, false) + snap, err := a.service.CreateSnapshot(ctx, sandboxID, name) if err != nil { a.logger.Error("create snapshot failed", "sandbox_id", sandboxID, "name", name, "error", err) return nil, err } - a.logger.Info("snapshot created", "sandbox_id", sandboxID, "snapshot_id", snap.ID, "name", snap.Name) + a.logger.Info("snapshot created", "sandbox_id", sandboxID, "snapshot_id", snap.SnapshotID, "name", snap.SnapshotName) return map[string]any{ - "snapshot_id": snap.ID, + "snapshot_id": snap.SnapshotID, "sandbox_id": sandboxID, - "name": snap.Name, - "kind": snap.Kind, + "name": snap.SnapshotName, }, nil } @@ -1565,91 +1252,40 @@ func (a *FluidAgent) formatSandboxesResult(result map[string]any, err error) str return b.String() } -// listHostsWithVMs queries all hosts and returns VMs differentiated by type (host VM vs sandbox) +// listHostsWithVMs returns host info from the daemon func (a *FluidAgent) listHostsWithVMs(ctx context.Context) (map[string]any, error) { - // Get sandboxes from database - sandboxes, err := a.vmService.GetSandboxes(ctx, store.SandboxFilter{}, nil) + info, err := a.service.GetHostInfo(ctx) if err != nil { - a.logger.Error("list sandboxes for host view failed", "error", err) - return nil, fmt.Errorf("list sandboxes: %w", err) - } - - // Build a set of sandbox names for quick lookup - sandboxNames := make(map[string]bool) - for _, sb := range sandboxes { - sandboxNames[sb.SandboxName] = true + a.logger.Error("get host info failed", "error", err) + return nil, err } - // Get all domains from libvirt - var domains []map[string]any - var hostErrors []map[string]any - - if a.multiHostMgr != nil { - listResult, err := a.multiHostMgr.ListDomains(ctx) - if err != nil { - a.logger.Error("list domains from hosts failed", "error", err) - return nil, fmt.Errorf("list domains from hosts: %w", err) - } - for _, domain := range listResult.Domains { - isSandbox := strings.HasPrefix(domain.Name, "sbx-") || sandboxNames[domain.Name] - domains = append(domains, map[string]any{ - "name": domain.Name, - "state": domain.State.String(), - "host": domain.HostName, - "host_address": domain.HostAddress, - "type": vmType(isSandbox), - }) - } - for _, he := range listResult.HostErrors { - hostErrors = append(hostErrors, map[string]any{ - "host": he.HostName, - "address": he.HostAddress, - "error": he.Error, - }) - } - } else { - // Local virsh - cmd := exec.CommandContext(ctx, "virsh", "list", "--all", "--name") - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("virsh list: %w: %s", err, stderr.String()) - } + // Build a combined view with VMs and sandboxes + vms, _ := a.service.ListVMs(ctx) + sandboxes, _ := a.service.ListSandboxes(ctx) - for _, name := range strings.Split(stdout.String(), "\n") { - name = strings.TrimSpace(name) - if name == "" { - continue - } - isSandbox := strings.HasPrefix(name, "sbx-") || sandboxNames[name] - domains = append(domains, map[string]any{ - "name": name, - "state": "unknown", - "host": "local", - "type": vmType(isSandbox), - }) - } + domains := make([]map[string]any, 0) + for _, v := range vms { + domains = append(domains, map[string]any{ + "name": v.Name, + "state": v.State, + "host": info.Hostname, + "type": "host_vm", + }) + } + for _, sb := range sandboxes { + domains = append(domains, map[string]any{ + "name": sb.Name, + "state": sb.State, + "host": info.Hostname, + "type": "sandbox", + }) } - response := map[string]any{ + return map[string]any{ "domains": domains, "count": len(domains), - } - if len(hostErrors) > 0 { - response["host_errors"] = hostErrors - } - - return response, nil -} - -// vmType returns "sandbox" or "host_vm" based on whether the domain is a sandbox -func vmType(isSandbox bool) string { - if isSandbox { - return "sandbox" - } - return "host_vm" + }, nil } func (a *FluidAgent) formatHostsResult(result map[string]any, err error) string { @@ -1828,38 +1464,7 @@ func (a *FluidAgent) runSourceCommand(ctx context.Context, sourceVM, command str } }() - // Proactive check: ensure source VM is prepared before attempting command - if err := a.ensureSourceVMPrepared(ctx, sourceVM); err != nil { - return nil, err - } - - // Create output callback for streaming. - outputCallback := func(chunk vm.OutputChunk) { - if chunk.IsRetry && chunk.Retry != nil { - a.sendStatus(CommandOutputResetMsg{SandboxID: sourceVM}) - a.sendStatus(RetryAttemptMsg{ - SandboxID: sourceVM, - Attempt: chunk.Retry.Attempt, - Max: chunk.Retry.Max, - Delay: chunk.Retry.Delay, - Error: chunk.Retry.Error, - }) - return - } - if chunk.Data == nil { - a.sendStatus(CommandOutputResetMsg{SandboxID: sourceVM}) - return - } - a.sendStatus(CommandOutputChunkMsg{ - SandboxID: sourceVM, - IsStderr: chunk.IsStderr, - Chunk: string(chunk.Data), - }) - } - - result, err := a.vmService.RunSourceVMCommandWithCallback(ctx, sourceVM, command, 0, outputCallback) - a.sendStatus(CommandOutputDoneMsg{SandboxID: sourceVM}) - + result, err := a.service.RunSourceCommand(ctx, sourceVM, command, 0) if err != nil { a.logger.Error("source command failed", "source_vm", sourceVM, "error", err) if result != nil { @@ -1913,30 +1518,16 @@ func (a *FluidAgent) readSourceFile(ctx context.Context, sourceVM, path string) } }() - // Proactive check: ensure source VM is prepared before attempting read - if err := a.ensureSourceVMPrepared(ctx, sourceVM); err != nil { - return nil, err - } - - cmd := fmt.Sprintf("base64 %s", shellEscape(path)) - result, err := a.vmService.RunSourceVMCommand(ctx, sourceVM, cmd, 0) + content, err := a.service.ReadSourceFile(ctx, sourceVM, path) if err != nil { a.logger.Error("failed to read file from source VM", "source_vm", sourceVM, "path", path, "error", err) return nil, fmt.Errorf("failed to read file from source VM: %w", err) } - if result.ExitCode != 0 { - return nil, fmt.Errorf("failed to read file: %s", result.Stderr) - } - - decoded, err := base64.StdEncoding.DecodeString(strings.TrimSpace(result.Stdout)) - if err != nil { - return nil, fmt.Errorf("failed to decode file content: %w", err) - } return map[string]any{ "source_vm": sourceVM, "path": path, - "content": string(decoded), + "content": content, }, nil } @@ -1952,12 +1543,12 @@ func (a *FluidAgent) Cleanup(ctx context.Context) error { var errs []error for _, id := range a.createdSandboxes { // Check if sandbox still exists before destroying - if _, err := a.vmService.GetSandbox(ctx, id); err != nil { + if _, err := a.service.GetSandbox(ctx, id); err != nil { // Sandbox no longer exists (already destroyed by user), skip continue } - if _, err := a.vmService.DestroySandbox(ctx, id); err != nil { + if err := a.service.DestroySandbox(ctx, id); err != nil { a.logger.Warn("cleanup: failed to destroy sandbox", "sandbox_id", id, "error", err) errs = append(errs, fmt.Errorf("destroy sandbox %s: %w", id, err)) // Continue trying to destroy others even if one fails @@ -2017,7 +1608,7 @@ func (a *FluidAgent) CleanupWithProgress(sandboxIDs []string) { ctx, cancel := context.WithTimeout(context.Background(), perSandboxTimeout) // Check if sandbox still exists - if _, err := a.vmService.GetSandbox(ctx, id); err != nil { + if _, err := a.service.GetSandbox(ctx, id); err != nil { // Already destroyed cancel() skipped++ @@ -2030,7 +1621,7 @@ func (a *FluidAgent) CleanupWithProgress(sandboxIDs []string) { } // Destroy the sandbox - if _, err := a.vmService.DestroySandbox(ctx, id); err != nil { + if err := a.service.DestroySandbox(ctx, id); err != nil { failed++ a.logger.Warn("cleanup: failed to destroy sandbox", "sandbox_id", id, "error", err) a.sendStatus(CleanupProgressMsg{ @@ -2088,341 +1679,3 @@ func (a *FluidAgent) GetCurrentSourceVM() string { func (a *FluidAgent) ClearAutoReadOnly() { a.autoReadOnly = false } - -// isSourceVMPrepared checks the DB (with in-memory cache) to determine if a source VM is prepared. -func (a *FluidAgent) isSourceVMPrepared(ctx context.Context, sourceVM string) bool { - // Fast path: session cache - if a.preparedSourceVMs[sourceVM] { - a.logger.Debug("source VM prepared (cache hit)", "source_vm", sourceVM) - return true - } - - svm, err := a.store.GetSourceVM(ctx, sourceVM) - if err != nil || !svm.Prepared { - a.logger.Debug("source VM not prepared in DB", "source_vm", sourceVM, "error", err) - return false - } - - // Verify CA fingerprint still matches - currentFP := a.caFingerprint() - if currentFP != "" && svm.CAFingerprint != nil && *svm.CAFingerprint != currentFP { - // CA was rotated - preparation is stale - a.logger.Warn("source VM CA fingerprint mismatch (stale preparation)", "source_vm", sourceVM) - return false - } - - // Populate session cache - if a.preparedSourceVMs == nil { - a.preparedSourceVMs = make(map[string]bool) - } - a.preparedSourceVMs[sourceVM] = true - return true -} - -// caFingerprint returns the SHA256 fingerprint of the CA public key file. -func (a *FluidAgent) caFingerprint() string { - caPubKeyPath := a.cfg.SSH.CAPubPath - if caPubKeyPath == "" { - caPubKeyPath = a.cfg.SSH.CAKeyPath + ".pub" - } - data, err := os.ReadFile(caPubKeyPath) - if err != nil { - return "" - } - h := sha256.Sum256(data) - return fmt.Sprintf("%x", h) -} - -// ensureSourceVMPrepared proactively checks if a source VM is prepared and prompts if not. -// Returns nil if prepared (or preparation succeeded), error if denied or failed. -func (a *FluidAgent) ensureSourceVMPrepared(ctx context.Context, sourceVM string) error { - if a.isSourceVMPrepared(ctx, sourceVM) { - a.logger.Debug("source VM already prepared", "source_vm", sourceVM) - return nil - } - - a.logger.Warn("source VM not prepared, requesting approval", "source_vm", sourceVM) - // Not prepared - prompt the user proactively - if !a.requestSourcePrepareApproval(sourceVM, fmt.Errorf("source VM %q has not been prepared for read-only access", sourceVM)) { - a.logger.Info("source VM preparation denied", "source_vm", sourceVM) - return fmt.Errorf("source VM %q is not prepared for read-only access and preparation was denied", sourceVM) - } - a.logger.Info("source VM preparation approved", "source_vm", sourceVM) - - a.sendStatus(ToolStartMsg{ToolName: "source_prepare", Args: map[string]any{"source_vm": sourceVM}}) - if err := a.prepareSourceVM(ctx, sourceVM); err != nil { - a.sendStatus(ToolCompleteMsg{ToolName: "source_prepare", Success: false, Error: err.Error()}) - return fmt.Errorf("source prepare failed: %w", err) - } - a.sendStatus(ToolCompleteMsg{ToolName: "source_prepare", Success: true, Result: map[string]any{"source_vm": sourceVM, "status": "prepared"}}) - return nil -} - -// requestSourcePrepareApproval prompts the user to approve source VM preparation. -// Returns true if approved, false otherwise. -func (a *FluidAgent) requestSourcePrepareApproval(sourceVM string, connErr error) bool { - // Skip if already prepared this session - if a.preparedSourceVMs[sourceVM] { - a.logger.Debug("source prepare approval skipped (session cache)", "source_vm", sourceVM) - return false - } - - a.logger.Info("requesting source prepare approval", "source_vm", sourceVM, "error", connErr) - request := SourcePrepareApprovalRequest{ - SourceVM: sourceVM, - Error: connErr.Error(), - } - - responseChan := make(chan bool, 1) - a.pendingSourcePrepareApproval = &PendingSourcePrepareApproval{ - Request: request, - ResponseChan: responseChan, - } - - a.sendStatus(SourcePrepareApprovalRequestMsg{Request: request}) - - approved := <-responseChan - a.pendingSourcePrepareApproval = nil - a.logger.Info("source prepare approval result", "source_vm", sourceVM, "approved", approved) - return approved -} - -// prepareSourceVM runs the readonly.Prepare flow for a source VM. -func (a *FluidAgent) prepareSourceVM(ctx context.Context, sourceVM string) error { - a.logger.Info("prepareSourceVM starting", "source_vm", sourceVM) - - // Read CA pub key - caPubKeyPath := a.cfg.SSH.CAPubPath - if caPubKeyPath == "" { - caPubKeyPath = a.cfg.SSH.CAKeyPath + ".pub" - } - caPubKeyBytes, err := os.ReadFile(caPubKeyPath) - if err != nil { - a.logger.Error("failed to read CA pub key", "path", caPubKeyPath, "error", err) - return fmt.Errorf("read CA pub key from %s: %w", caPubKeyPath, err) - } - - // Find host once for multihost mode - var host *config.HostConfig - if a.multiHostMgr != nil { - h, err := a.multiHostMgr.FindHostForVM(ctx, sourceVM) - if err == nil && h != nil { - host = h - a.logger.Info("found host for source VM", "source_vm", sourceVM, "host", host.Name, "address", host.Address) - } else if err != nil { - a.logger.Warn("failed to find host for source VM", "source_vm", sourceVM, "error", err) - } - } - - // Ensure VM is running before IP discovery - if host != nil { - if err := a.ensureVMRunningOnHost(ctx, sourceVM, host); err != nil { - a.logger.Error("failed to ensure VM running on host", "source_vm", sourceVM, "host", host.Name, "error", err) - return fmt.Errorf("ensure VM running on %s: %w", host.Name, err) - } - } else { - state, err := a.manager.GetVMState(ctx, sourceVM) - if err == nil && (state == libvirt.VMStateShutOff || state == libvirt.VMStatePaused) { - a.logger.Info("starting source VM", "source_vm", sourceVM, "state", state) - if err := a.manager.StartVM(ctx, sourceVM); err != nil { - a.logger.Error("failed to start source VM", "source_vm", sourceVM, "error", err) - return fmt.Errorf("start source VM %s: %w", sourceVM, err) - } - time.Sleep(10 * time.Second) - } - } - - // Discover VM IP - var ip string - if host != nil { - ip, err = a.discoverVMIPOnHost(ctx, sourceVM, host) - if err != nil { - a.logger.Error("failed to discover IP on host", "source_vm", sourceVM, "host", host.Name, "error", err) - return fmt.Errorf("discover IP for %s on %s: %w", sourceVM, host.Name, err) - } - } - if ip == "" { - // Try local libvirt manager with retry/timeout - discoveredIP, _, err := a.manager.GetIPAddress(ctx, sourceVM, 60*time.Second) - if err != nil { - a.logger.Error("failed to discover IP locally", "source_vm", sourceVM, "error", err) - return fmt.Errorf("discover IP for %s: %w", sourceVM, err) - } - ip = discoveredIP - } - a.logger.Info("discovered VM IP", "source_vm", sourceVM, "ip", ip) - - // Create SSH run function using configured VM user (prepare needs privileged access) - // In multihost mode, use the host as ProxyJump since the VM is on the host's private network - proxyJump := a.cfg.SSH.ProxyJump - vmUser := "root" - if host != nil { - sshUser := host.SSHUser - if sshUser == "" { - sshUser = "root" - } - proxyJump = fmt.Sprintf("%s@%s", sshUser, host.Address) - if host.SSHVMUser != "" { - vmUser = host.SSHVMUser - } - } - a.logger.Debug("SSH config for prepare", "source_vm", sourceVM, "ip", ip, "user", vmUser, "proxy_jump", proxyJump) - sshRun := makeSSHRunFunc(ip, vmUser, proxyJump) - - // Run prepare - // Wire progress callback to TUI status channel - onProgress := func(p readonly.PrepareProgress) { - stepNum := int(p.Step) + 1 - a.sendStatus(SourcePrepareProgressMsg{ - SourceVM: sourceVM, - StepName: p.StepName, - StepNum: stepNum, - Total: p.Total, - Done: p.Done, - }) - } - prepResult, err := readonly.Prepare(ctx, sshRun, string(caPubKeyBytes), onProgress, a.logger) - if err != nil { - a.logger.Error("readonly.Prepare failed", "source_vm", sourceVM, "ip", ip, "proxy_jump", proxyJump, "error", err) - sshCmd := fmt.Sprintf("ssh %s@%s \"whoami\"", vmUser, ip) - if proxyJump != "" { - sshCmd = fmt.Sprintf("ssh -J %s %s@%s \"whoami\"", proxyJump, vmUser, ip) - } - return fmt.Errorf("prepare failed (debug: test SSH with `%s`, check /settings for SSHVMUser): %w", sshCmd, err) - } - a.logger.Info("prepareSourceVM completed", "source_vm", sourceVM, "result", prepResult) - - // Track as prepared in session cache - if a.preparedSourceVMs == nil { - a.preparedSourceVMs = make(map[string]bool) - } - a.preparedSourceVMs[sourceVM] = true - - // Persist to store - now := time.Now().UTC() - fp := a.caFingerprint() - prepJSON, _ := json.Marshal(prepResult) - prepJSONStr := string(prepJSON) - - svm := &store.SourceVM{ - Name: sourceVM, - Prepared: true, - PreparedAt: &now, - PrepareJSON: &prepJSONStr, - CAFingerprint: &fp, - } - - // Attach host info if available - if host != nil { - svm.HostName = &host.Name - svm.HostAddress = &host.Address - } - - if err := a.store.UpsertSourceVM(ctx, svm); err != nil { - // Log but don't fail - preparation itself succeeded - a.logger.Warn("failed to persist source VM preparation state", "vm", sourceVM, "error", err) - } - - return nil -} - -// sshArgsForHost builds the common SSH arguments for connecting to a remote host, -// including the port flag which is required for non-default SSH ports. -func (a *FluidAgent) sshArgsForHost(host *config.HostConfig) []string { - args := []string{ - "-o", "StrictHostKeyChecking=no", - "-o", "UserKnownHostsFile=/dev/null", - "-o", "ConnectTimeout=15", - "-o", "BatchMode=yes", - } - port := host.SSHPort - if port == 0 { - port = 22 - } - args = append(args, "-p", fmt.Sprintf("%d", port)) - // No ProxyJump here - this SSHes directly to the host - return args -} - -// discoverVMIPOnHost discovers a VM's IP address on a remote host via virsh. -// Retries every 2 seconds up to 60 seconds to allow time for IP assignment after boot. -func (a *FluidAgent) discoverVMIPOnHost(ctx context.Context, vmName string, host *config.HostConfig) (string, error) { - sshUser := host.SSHUser - if sshUser == "" { - sshUser = "root" - } - sshTarget := fmt.Sprintf("%s@%s", sshUser, host.Address) - baseArgs := a.sshArgsForHost(host) - - timeout := 60 * time.Second - if a.cfg.VM.IPDiscoveryTimeout > 0 { - timeout = a.cfg.VM.IPDiscoveryTimeout - } - deadline := time.Now().Add(timeout) - - var lastErr error - for time.Now().Before(deadline) { - args := append(append([]string{}, baseArgs...), sshTarget, "virsh", "domifaddr", vmName) - cmd := exec.CommandContext(ctx, "ssh", args...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - if err := cmd.Run(); err != nil { - a.logger.Debug("VM IP discovery attempt failed", "vm", vmName, "host", host.Name, "error", err, "stderr", stderr.String()) - lastErr = fmt.Errorf("%w (stderr: %s)", err, strings.TrimSpace(stderr.String())) - } else { - ip := parseIPFromVirshOutput(stdout.String()) - if ip != "" { - a.logger.Info("discovered VM IP on host", "vm", vmName, "host", host.Name, "ip", ip) - return ip, nil - } - lastErr = fmt.Errorf("no IP in virsh output for %s", vmName) - } - - select { - case <-ctx.Done(): - return "", ctx.Err() - case <-time.After(2 * time.Second): - } - } - return "", fmt.Errorf("timeout discovering IP for %s on %s after %s: %w", vmName, host.Name, timeout, lastErr) -} - -// ensureVMRunningOnHost checks if a VM is running on a remote host and starts it if needed. -func (a *FluidAgent) ensureVMRunningOnHost(ctx context.Context, vmName string, host *config.HostConfig) error { - sshUser := host.SSHUser - if sshUser == "" { - sshUser = "root" - } - sshTarget := fmt.Sprintf("%s@%s", sshUser, host.Address) - baseArgs := a.sshArgsForHost(host) - - // Check VM state - stateArgs := append(append([]string{}, baseArgs...), sshTarget, "virsh", "domstate", vmName) - cmd := exec.CommandContext(ctx, "ssh", stateArgs...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - if err := cmd.Run(); err != nil { - a.logger.Error("SSH to host failed", "host", host.Name, "error", err, "stderr", stderr.String()) - return fmt.Errorf("check VM state for %s on %s: %w (stderr: %s)", vmName, host.Name, err, strings.TrimSpace(stderr.String())) - } - - state := strings.TrimSpace(stdout.String()) - a.logger.Debug("VM state check", "vm", vmName, "host", host.Name, "state", state) - if state == "shut off" || state == "paused" { - startArgs := append(append([]string{}, baseArgs...), sshTarget, "virsh", "start", vmName) - startCmd := exec.CommandContext(ctx, "ssh", startArgs...) - var startStderr bytes.Buffer - startCmd.Stderr = &startStderr - if err := startCmd.Run(); err != nil { - a.logger.Error("SSH virsh start failed", "host", host.Name, "vm", vmName, "error", err, "stderr", startStderr.String()) - return fmt.Errorf("start VM %s on %s: %w (stderr: %s)", vmName, host.Name, err, strings.TrimSpace(startStderr.String())) - } - a.logger.Info("VM started on host", "vm", vmName, "host", host.Name) - time.Sleep(10 * time.Second) - } else { - a.logger.Debug("VM already running on host", "vm", vmName, "host", host.Name, "state", state) - } - return nil -} diff --git a/fluid/internal/tui/agent_test.go b/fluid-cli/internal/tui/agent_test.go similarity index 100% rename from fluid/internal/tui/agent_test.go rename to fluid-cli/internal/tui/agent_test.go diff --git a/fluid/internal/tui/ascii.go b/fluid-cli/internal/tui/ascii.go similarity index 100% rename from fluid/internal/tui/ascii.go rename to fluid-cli/internal/tui/ascii.go diff --git a/fluid/internal/tui/confirm.go b/fluid-cli/internal/tui/confirm.go similarity index 100% rename from fluid/internal/tui/confirm.go rename to fluid-cli/internal/tui/confirm.go diff --git a/fluid/internal/tui/demo.go b/fluid-cli/internal/tui/demo.go similarity index 100% rename from fluid/internal/tui/demo.go rename to fluid-cli/internal/tui/demo.go diff --git a/fluid/internal/tui/history.go b/fluid-cli/internal/tui/history.go similarity index 100% rename from fluid/internal/tui/history.go rename to fluid-cli/internal/tui/history.go diff --git a/fluid/internal/tui/history_test.go b/fluid-cli/internal/tui/history_test.go similarity index 100% rename from fluid/internal/tui/history_test.go rename to fluid-cli/internal/tui/history_test.go diff --git a/fluid/internal/tui/logo.go b/fluid-cli/internal/tui/logo.go similarity index 98% rename from fluid/internal/tui/logo.go rename to fluid-cli/internal/tui/logo.go index ca2d2c46..636970c5 100644 --- a/fluid/internal/tui/logo.go +++ b/fluid-cli/internal/tui/logo.go @@ -7,8 +7,8 @@ import ( "github.com/charmbracelet/lipgloss" ) -// Version is the current version of Fluid -const Version = "0.1.0" +// Version is the current version of Fluid (set via ldflags at build time) +var Version = "dev" //nolint:staticcheck // ST1018: ANSI escape sequences are intentional for terminal colors var BannerLogo = []string{ diff --git a/fluid/internal/tui/messages.go b/fluid-cli/internal/tui/messages.go similarity index 97% rename from fluid/internal/tui/messages.go rename to fluid-cli/internal/tui/messages.go index 8760df2e..f5486cc8 100644 --- a/fluid/internal/tui/messages.go +++ b/fluid-cli/internal/tui/messages.go @@ -218,3 +218,8 @@ type AutoReadOnlyMsg struct { SourceVM string Enabled bool } + +// UpdateAvailableMsg is sent when a newer version is available +type UpdateAvailableMsg struct { + Version string +} diff --git a/fluid/internal/tui/model.go b/fluid-cli/internal/tui/model.go similarity index 98% rename from fluid/internal/tui/model.go rename to fluid-cli/internal/tui/model.go index bdc17456..b6b31a20 100644 --- a/fluid/internal/tui/model.go +++ b/fluid-cli/internal/tui/model.go @@ -13,6 +13,7 @@ import ( "github.com/charmbracelet/lipgloss" "github.com/aspectrr/fluid.sh/fluid/internal/config" + "github.com/aspectrr/fluid.sh/fluid/internal/updater" ) // State represents the current state of the TUI @@ -252,9 +253,26 @@ func (m Model) Init() tea.Cmd { return tea.Batch( textarea.Blink, m.spinner.Tick, + checkForUpdate(), ) } +// checkForUpdate spawns a background goroutine that checks for updates. +// Only runs once per 24 hours. Fails silently. +func checkForUpdate() tea.Cmd { + return func() tea.Msg { + if !updater.ShouldCheck() { + return nil + } + latest, _, needsUpdate, err := updater.CheckLatest(Version) + updater.MarkChecked() + if err != nil || !needsUpdate { + return nil + } + return UpdateAvailableMsg{Version: latest} + } +} + // listenForStatus returns a command that listens for status updates from the agent func (m Model) listenForStatus() tea.Cmd { return func() tea.Msg { @@ -958,6 +976,11 @@ func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { return m, nil + case UpdateAvailableMsg: + m.addSystemMessage(fmt.Sprintf("Update available: v%s - run `fluid update`", msg.Version)) + m.updateViewportContent(false) + return m, nil + case spinner.TickMsg: var cmd tea.Cmd m.spinner, cmd = m.spinner.Update(msg) diff --git a/fluid-cli/internal/tui/modelpicker.go b/fluid-cli/internal/tui/modelpicker.go new file mode 100644 index 00000000..efd571d3 --- /dev/null +++ b/fluid-cli/internal/tui/modelpicker.go @@ -0,0 +1,253 @@ +package tui + +import ( + "context" + "fmt" + "strings" + + tea "github.com/charmbracelet/bubbletea" + "github.com/charmbracelet/lipgloss" + + "github.com/aspectrr/fluid.sh/fluid/internal/config" + "github.com/aspectrr/fluid.sh/fluid/internal/modelsdev" +) + +// ModelPickerCloseMsg is sent when the model picker is closed. +type ModelPickerCloseMsg struct { + Selected bool + Model string + CompactModel string + TotalContextTokens int +} + +// ModelsLoadedMsg is sent when models finish loading. +type ModelsLoadedMsg struct { + Models []modelsdev.Model +} + +// ModelsLoadErrorMsg is sent when model loading fails. +type ModelsLoadErrorMsg struct { + Err error +} + +// ModelPickerModel is the Bubble Tea model for the /model picker. +type ModelPickerModel struct { + models []modelsdev.Model + cursor int + width, height int + styles Styles + selectingCompact bool + mainModel *modelsdev.Model + loading bool + loadErr error + scrollY int + cfg *config.Config +} + +// NewModelPickerModel creates a new model picker. +func NewModelPickerModel(cfg *config.Config) ModelPickerModel { + return ModelPickerModel{ + styles: DefaultStyles(), + loading: true, + cfg: cfg, + } +} + +// Init starts the async model fetch. +func (m ModelPickerModel) Init() tea.Cmd { + return func() tea.Msg { + models, err := modelsdev.FetchTopModels(context.Background()) + if err != nil { + return ModelsLoadErrorMsg{Err: err} + } + return ModelsLoadedMsg{Models: models} + } +} + +// Update handles messages for the model picker. +func (m ModelPickerModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { + switch msg := msg.(type) { + case ModelsLoadedMsg: + m.loading = false + m.models = msg.Models + m.cursor = 0 + m.scrollY = 0 + return m, nil + + case ModelsLoadErrorMsg: + m.loading = false + m.loadErr = msg.Err + return m, nil + + case tea.WindowSizeMsg: + m.width = msg.Width + m.height = msg.Height + return m, nil + + case tea.KeyMsg: + switch msg.String() { + case "esc", "ctrl+c": + return m, func() tea.Msg { + return ModelPickerCloseMsg{Selected: false} + } + + case "up", "k": + if m.cursor > 0 { + m.cursor-- + m.ensureVisible() + } + return m, nil + + case "down", "j": + if m.cursor < len(m.models)-1 { + m.cursor++ + m.ensureVisible() + } + return m, nil + + case "enter": + if m.loadErr != nil { + return m, func() tea.Msg { + return ModelPickerCloseMsg{Selected: false} + } + } + if len(m.models) == 0 { + return m, nil + } + + selected := m.models[m.cursor] + + if !m.selectingCompact { + // First selection: main model + m.mainModel = &selected + m.selectingCompact = true + m.cursor = 0 + m.scrollY = 0 + return m, nil + } + + // Second selection: compact model + return m, func() tea.Msg { + return ModelPickerCloseMsg{ + Selected: true, + Model: m.mainModel.ID, + CompactModel: selected.ID, + TotalContextTokens: m.mainModel.ContextLimit, + } + } + } + } + + return m, nil +} + +// View renders the model picker. +func (m ModelPickerModel) View() string { + var b strings.Builder + + titleStyle := lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("#3B82F6")) + helpStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")) + + if m.selectingCompact { + b.WriteString(titleStyle.Render("Select Compact Model")) + } else { + b.WriteString(titleStyle.Render("Select Model")) + } + b.WriteString("\n") + b.WriteString(helpStyle.Render("Up/Down: navigate | Enter: select | Esc: cancel")) + b.WriteString("\n\n") + + if m.loading { + b.WriteString(helpStyle.Render("Loading models from OpenRouter...")) + return b.String() + } + + if m.loadErr != nil { + errStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("#EF4444")) + b.WriteString(errStyle.Render(fmt.Sprintf("Failed to load models: %v", m.loadErr))) + b.WriteString("\n") + b.WriteString(helpStyle.Render("Press Enter or Esc to go back. Configure model manually in /settings.")) + return b.String() + } + + if len(m.models) == 0 { + b.WriteString(helpStyle.Render("No models found.")) + return b.String() + } + + if m.mainModel != nil { + selectedStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("#10B981")) + b.WriteString(selectedStyle.Render(fmt.Sprintf("Main model: %s", m.mainModel.ID))) + b.WriteString("\n\n") + } + + visibleItems := m.visibleCount() + start := m.scrollY + end := start + visibleItems + if end > len(m.models) { + end = len(m.models) + } + + nameWidth := 40 + if m.width > 0 && m.width < 100 { + nameWidth = m.width / 3 + } + + for i := start; i < end; i++ { + model := m.models[i] + name := model.Name + if len(name) > nameWidth { + name = name[:nameWidth-3] + "..." + } + + cost := fmt.Sprintf("$%.2f/$%.2f per 1M", model.InputCostPer1M, model.OutputCostPer1M) + ctx := fmt.Sprintf("%dk ctx", model.ContextLimit/1000) + + var tags []string + if model.ToolCall { + tags = append(tags, "tools") + } + if model.Reasoning { + tags = append(tags, "reasoning") + } + tagStr := strings.Join(tags, ",") + + line := fmt.Sprintf("%-*s %-24s %-10s %s", nameWidth, name, cost, ctx, tagStr) + + if i == m.cursor { + cursorStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("#3B82F6")).Bold(true) + b.WriteString(cursorStyle.Render("> " + line)) + } else { + b.WriteString(" " + line) + } + b.WriteString("\n") + } + + if len(m.models) > visibleItems { + b.WriteString("\n") + b.WriteString(helpStyle.Render(fmt.Sprintf(" %d/%d models", m.cursor+1, len(m.models)))) + } + + return b.String() +} + +func (m ModelPickerModel) visibleCount() int { + available := m.height - 8 // header, help, padding + if m.mainModel != nil { + available -= 2 + } + if available < 5 { + return 5 + } + return available +} + +func (m *ModelPickerModel) ensureVisible() { + visible := m.visibleCount() + if m.cursor < m.scrollY { + m.scrollY = m.cursor + } + if m.cursor >= m.scrollY+visible { + m.scrollY = m.cursor - visible + 1 + } +} diff --git a/fluid/internal/tui/onboarding.go b/fluid-cli/internal/tui/onboarding.go similarity index 59% rename from fluid/internal/tui/onboarding.go rename to fluid-cli/internal/tui/onboarding.go index 6f970bb3..ef20a136 100644 --- a/fluid/internal/tui/onboarding.go +++ b/fluid-cli/internal/tui/onboarding.go @@ -3,9 +3,12 @@ package tui import ( "bytes" "context" + "encoding/json" "fmt" + "net/http" "os" "os/exec" + "runtime" "strings" "time" @@ -16,7 +19,10 @@ import ( "github.com/charmbracelet/lipgloss" "github.com/aspectrr/fluid.sh/fluid/internal/config" + "github.com/aspectrr/fluid.sh/fluid/internal/doctor" + "github.com/aspectrr/fluid.sh/fluid/internal/hostexec" "github.com/aspectrr/fluid.sh/fluid/internal/readonly" + "github.com/aspectrr/fluid.sh/fluid/internal/setup" ) // OnboardingStep represents the current step in onboarding @@ -25,7 +31,11 @@ type OnboardingStep int const ( StepWelcome OnboardingStep = iota StepInfraChoice - StepAddHosts // New step for adding remote hosts + StepAddHosts // New step for adding remote hosts + StepSandboxHost // Where to install the daemon + StepDaemonSetupChoice + StepDaemonGuided + StepDaemonDoctor StepConnectionTest StepShowResources StepAPIKey @@ -98,6 +108,36 @@ type OnboardingModel struct { demoCurrentTool string demoCurrentArgs map[string]any + // Sandbox host state + sandboxHostIsLocal bool + sandboxHostAddr string + sandboxHostUser string + sandboxHostPort int + sandboxHostInputs []textinput.Model // nil = choice mode, populated = input mode + sandboxHostFocus int + sandboxHostVMs []VMInfo // VMs fetched for "existing VM" selection + sandboxHostLoadingVMs bool // spinner while fetching VM list + sandboxHostSelectedVM int // index in sandboxHostVMs + sandboxHostDiscoveringIP bool // spinner while getting VM IP + sandboxHostProxyJump string // "user@host" for SSH jump (empty = direct) + sandboxHostVMName string // display name of selected VM + + // Daemon setup state + daemonSetupChoice int // 0=guided, 1=docs + daemonDistro setup.DistroInfo + daemonGuidedStep int + daemonGuidedResults []setup.StepResult + daemonGuidedRunning bool + daemonGuidedPreviewing bool + daemonGuidedSteps []setup.StepDef + daemonDoctorResults []doctor.CheckResult + daemonDoctorRunning bool + daemonDoctorComplete bool + + // Docs progress tracking + docsSetupCode string + docsAPIURL string + // For async operations testing bool } @@ -136,10 +176,19 @@ func NewOnboardingModel(cfg *config.Config, configPath string) OnboardingModel { ti.CharLimit = 100 ti.Width = 50 + apiURL := os.Getenv("FLUID_API_URL") + if apiURL == "" { + apiURL = "https://fluid.sh" + if cfg.ControlPlane.Address != "" { + apiURL = cfg.ControlPlane.Address + } + } + return OnboardingModel{ step: StepWelcome, spinner: s, textInput: ti, + docsAPIURL: apiURL, styles: DefaultStyles(), cfg: cfg, configPath: configPath, @@ -152,6 +201,7 @@ func (m OnboardingModel) Init() tea.Cmd { return tea.Batch( m.spinner.Tick, tea.EnterAltScreen, + m.registerDocsSession(), ) } @@ -182,6 +232,37 @@ type sourcePrepareDoneMsg struct { results []SourcePrepareResult } +type docsSessionRegisteredMsg struct { + code string + err error +} + +type sandboxHostVMListDoneMsg struct { + vms []VMInfo + err error +} + +type sandboxHostIPDoneMsg struct { + ip string + user string // SSH user for the VM + jump string // proxy jump string (empty for local VMs) + name string // VM display name + err error +} + +type daemonDetectOSDoneMsg struct { + distro setup.DistroInfo + err error +} + +type daemonGuidedStepDoneMsg struct { + result setup.StepResult +} + +type daemonDoctorDoneMsg struct { + results []doctor.CheckResult +} + type demoTickMsg struct{} func demoTickCmd() tea.Cmd { @@ -204,6 +285,13 @@ func (m OnboardingModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { default: // Let character input (including arrow keys, paste) fall through } + } else if m.step == StepSandboxHost && len(m.sandboxHostInputs) > 0 { + switch msg.String() { + case "ctrl+c", "enter", "tab", "shift+tab", "esc": + return m.handleKeyPress(msg) + default: + // Let character input fall through + } } else if m.step == StepAPIKey && !m.testing { switch msg.String() { case "ctrl+c", "enter", "q", "esc": @@ -235,6 +323,7 @@ func (m OnboardingModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { m.testing = false m.testResults = msg.results m.step = StepShowResources + m.postDocsProgress(2) // Step 2: Launch the TUI m.loadingVMs = true cmds = append(cmds, m.listVMs()) @@ -280,6 +369,74 @@ func (m OnboardingModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { m.demoThinkingDots = (m.demoThinkingDots + 1) % 4 cmds = append(cmds, ThinkingCmd()) } + + case docsSessionRegisteredMsg: + if msg.err == nil && msg.code != "" { + m.docsSetupCode = msg.code + m.postDocsProgress(0) // Step 0: Install CLI - they're running it + } + + case daemonDetectOSDoneMsg: + m.testing = false + if msg.err != nil { + m.errorMsg = msg.err.Error() + // Fall back to doctor screen + m.step = StepDaemonDoctor + m.daemonDoctorRunning = true + cmds = append(cmds, m.runDaemonDoctor()) + } else { + m.daemonDistro = msg.distro + m.daemonGuidedSteps = setup.AllSteps(msg.distro) + m.daemonGuidedStep = 0 + m.daemonGuidedResults = nil + m.step = StepDaemonGuided + // Show preview before running first step + m.daemonGuidedPreviewing = true + } + + case daemonGuidedStepDoneMsg: + m.daemonGuidedRunning = false + m.daemonGuidedResults = append(m.daemonGuidedResults, msg.result) + m.daemonGuidedStep++ + // If all steps done, advance to doctor + if m.daemonGuidedStep >= len(m.daemonGuidedSteps) { + m.step = StepDaemonDoctor + m.daemonDoctorRunning = true + cmds = append(cmds, m.runDaemonDoctor()) + } else { + m.daemonGuidedPreviewing = true + } + + case daemonDoctorDoneMsg: + m.daemonDoctorRunning = false + m.daemonDoctorComplete = true + m.daemonDoctorResults = msg.results + + case sandboxHostVMListDoneMsg: + m.sandboxHostLoadingVMs = false + if msg.err != nil { + m.errorMsg = msg.err.Error() + // Stay in choice mode + return m, nil + } + m.sandboxHostVMs = msg.vms + m.sandboxHostSelectedVM = 0 + + case sandboxHostIPDoneMsg: + m.sandboxHostDiscoveringIP = false + if msg.err != nil { + m.errorMsg = msg.err.Error() + // Stay in VM selection + return m, nil + } + m.sandboxHostAddr = msg.ip + m.sandboxHostUser = msg.user + m.sandboxHostPort = 22 + m.sandboxHostProxyJump = msg.jump + m.sandboxHostVMName = msg.name + m.sandboxHostVMs = nil // clear VM list + m.step = StepDaemonSetupChoice + m.selectedOption = 0 } // Update text input if on API key step @@ -289,6 +446,17 @@ func (m OnboardingModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { cmds = append(cmds, cmd) } + // Update sandbox host inputs if on sandbox host step + if m.step == StepSandboxHost && len(m.sandboxHostInputs) > 0 { + for i := range m.sandboxHostInputs { + if i == m.sandboxHostFocus { + var cmd tea.Cmd + m.sandboxHostInputs[i], cmd = m.sandboxHostInputs[i].Update(msg) + cmds = append(cmds, cmd) + } + } + } + // Update host inputs if on add hosts step if m.step == StepAddHosts && len(m.hostInputs) > 0 { for i := range m.hostInputs { @@ -316,6 +484,12 @@ func (m OnboardingModel) handleKeyPress(msg tea.KeyMsg) (tea.Model, tea.Cmd) { return m, nil } } + if m.step == StepSandboxHost && len(m.sandboxHostInputs) > 0 { + if m.sandboxHostInputs[m.sandboxHostFocus].Value() != "" { + m.sandboxHostInputs[m.sandboxHostFocus].SetValue("") + return m, nil + } + } return m, tea.Quit case "q": @@ -332,11 +506,31 @@ func (m OnboardingModel) handleKeyPress(msg tea.KeyMsg) (tea.Model, tea.Cmd) { m.hostInputs[m.hostInputFocus].Focus() return m, nil } + if m.step == StepSandboxHost && len(m.sandboxHostInputs) > 0 { + m.sandboxHostInputs[m.sandboxHostFocus].Blur() + m.sandboxHostFocus = (m.sandboxHostFocus + 1) % len(m.sandboxHostInputs) + m.sandboxHostInputs[m.sandboxHostFocus].Focus() + return m, nil + } + if m.step == StepSandboxHost && len(m.sandboxHostVMs) > 0 && !m.sandboxHostDiscoveringIP { + if m.sandboxHostSelectedVM < len(m.sandboxHostVMs)-1 { + m.sandboxHostSelectedVM++ + } + return m, nil + } switch m.step { case StepInfraChoice: if m.selectedOption < 3 { m.selectedOption++ } + case StepSandboxHost: + if m.selectedOption < 2 { + m.selectedOption++ + } + case StepDaemonSetupChoice: + if m.selectedOption < 1 { + m.selectedOption++ + } case StepSourcePrepare: if !m.sourcePrepareRunning && !m.sourcePrepareComplete && m.selectedOption < 1 { m.selectedOption++ @@ -358,12 +552,53 @@ func (m OnboardingModel) handleKeyPress(msg tea.KeyMsg) (tea.Model, tea.Cmd) { m.hostInputs[m.hostInputFocus].Focus() return m, nil } - if m.step == StepInfraChoice || m.step == StepSourcePrepare || m.step == StepOfferDemo { + if m.step == StepSandboxHost && len(m.sandboxHostInputs) > 0 { + m.sandboxHostInputs[m.sandboxHostFocus].Blur() + m.sandboxHostFocus-- + if m.sandboxHostFocus < 0 { + m.sandboxHostFocus = len(m.sandboxHostInputs) - 1 + } + m.sandboxHostInputs[m.sandboxHostFocus].Focus() + return m, nil + } + if m.step == StepSandboxHost && len(m.sandboxHostVMs) > 0 && !m.sandboxHostDiscoveringIP { + if m.sandboxHostSelectedVM > 0 { + m.sandboxHostSelectedVM-- + } + return m, nil + } + if m.step == StepInfraChoice || m.step == StepSandboxHost || m.step == StepDaemonSetupChoice || m.step == StepSourcePrepare || m.step == StepOfferDemo { if m.selectedOption > 0 { m.selectedOption-- } } + case "r": + // Retry doctor checks + if m.step == StepDaemonDoctor && m.daemonDoctorComplete && !m.daemonDoctorRunning { + m.daemonDoctorComplete = false + m.daemonDoctorRunning = true + m.daemonDoctorResults = nil + return m, m.runDaemonDoctor() + } + + case "s": + // Skip current guided step + if m.step == StepDaemonGuided && !m.daemonGuidedRunning { + m.daemonGuidedResults = append(m.daemonGuidedResults, setup.StepResult{ + Name: m.daemonGuidedSteps[m.daemonGuidedStep].Name, + Skipped: true, + Success: false, + }) + m.daemonGuidedStep++ + if m.daemonGuidedStep >= len(m.daemonGuidedSteps) { + m.step = StepDaemonDoctor + m.daemonDoctorRunning = true + return m, m.runDaemonDoctor() + } + m.daemonGuidedPreviewing = true + } + case "ctrl+n": // Add another host in host configuration step if m.step == StepAddHosts { @@ -406,14 +641,113 @@ func (m OnboardingModel) handleEnter() (tea.Model, tea.Cmd) { return m, textinput.Blink } } - // Otherwise go directly to connection test - m.step = StepConnectionTest - m.testing = true - return m, tea.Batch(m.spinner.Tick, m.testConnections()) + // Go to sandbox host step + m.step = StepSandboxHost + m.selectedOption = 0 + return m, nil case StepAddHosts: // Save the configured hosts m.saveHostInputs() + m.step = StepSandboxHost + m.selectedOption = 0 + return m, nil + + case StepSandboxHost: + if len(m.sandboxHostInputs) > 0 { + // Input mode: read values and advance + addr := strings.TrimSpace(m.sandboxHostInputs[0].Value()) + if addr == "" { + return m, nil // require address + } + m.sandboxHostAddr = addr + m.sandboxHostUser = strings.TrimSpace(m.sandboxHostInputs[1].Value()) + if m.sandboxHostUser == "" { + m.sandboxHostUser = "root" + } + portStr := strings.TrimSpace(m.sandboxHostInputs[2].Value()) + if portStr != "" { + _, _ = fmt.Sscanf(portStr, "%d", &m.sandboxHostPort) + } + if m.sandboxHostPort == 0 { + m.sandboxHostPort = 22 + } + m.sandboxHostInputs = nil + m.step = StepDaemonSetupChoice + m.selectedOption = 0 + return m, nil + } + // VM selection mode + if len(m.sandboxHostVMs) > 0 && !m.sandboxHostDiscoveringIP { + selected := m.sandboxHostVMs[m.sandboxHostSelectedVM] + m.sandboxHostDiscoveringIP = true + return m, tea.Batch(m.spinner.Tick, m.discoverSandboxHostVMIP(selected)) + } + // Choice mode + if m.selectedOption == 0 { + // Local + m.sandboxHostIsLocal = true + m.step = StepDaemonSetupChoice + m.selectedOption = 0 + return m, nil + } + if m.selectedOption == 1 { + // Remote - show inputs + m.sandboxHostInputs = m.initSandboxHostInputs() + m.sandboxHostFocus = 0 + m.sandboxHostInputs[0].Focus() + return m, textinput.Blink + } + // Existing VM - fetch VM list + m.sandboxHostLoadingVMs = true + return m, tea.Batch(m.spinner.Tick, m.listSandboxHostVMs()) + + case StepDaemonSetupChoice: + m.daemonSetupChoice = m.selectedOption + if m.selectedOption == 0 { + // Guided walkthrough: detect OS first + m.testing = true + return m, tea.Batch(m.spinner.Tick, m.detectDaemonOS()) + } + // Docs path: open browser + openBrowser("https://fluid.sh/docs/daemon") + // Go directly to doctor to validate + m.step = StepDaemonDoctor + m.daemonDoctorRunning = false + m.daemonDoctorComplete = false + return m, nil + + case StepDaemonGuided: + if m.daemonGuidedRunning { + return m, nil + } + if m.daemonGuidedPreviewing && m.daemonGuidedStep < len(m.daemonGuidedSteps) { + // Start execution from preview + m.daemonGuidedPreviewing = false + m.daemonGuidedRunning = true + return m, m.runDaemonGuidedStep() + } + // Run next step + if m.daemonGuidedStep < len(m.daemonGuidedSteps) { + m.daemonGuidedRunning = true + return m, m.runDaemonGuidedStep() + } + // All done, go to doctor + m.step = StepDaemonDoctor + m.daemonDoctorRunning = true + return m, m.runDaemonDoctor() + + case StepDaemonDoctor: + if m.daemonDoctorRunning { + return m, nil + } + if !m.daemonDoctorComplete { + // First time landing here from docs path, run checks + m.daemonDoctorRunning = true + return m, m.runDaemonDoctor() + } + // Complete - advance to connection test + m.postDocsProgress(1) // Step 1: Set up the daemon m.step = StepConnectionTest m.testing = true return m, tea.Batch(m.spinner.Tick, m.testConnections()) @@ -487,6 +821,7 @@ func (m OnboardingModel) handleEnter() (tea.Model, tea.Cmd) { return m, nil case StepWrapUp: + m.postDocsProgress(5) // Step 5: Connect MCP m.step = StepComplete return m, nil @@ -512,6 +847,14 @@ func (m OnboardingModel) View() string { content = m.viewInfraChoice() case StepAddHosts: content = m.viewAddHosts() + case StepSandboxHost: + content = m.viewSandboxHost() + case StepDaemonSetupChoice: + content = m.viewDaemonSetupChoice() + case StepDaemonGuided: + content = m.viewDaemonGuided() + case StepDaemonDoctor: + content = m.viewDaemonDoctor() case StepConnectionTest: content = m.viewConnectionTest() case StepShowResources: @@ -534,6 +877,21 @@ func (m OnboardingModel) View() string { return "" } + if m.docsSetupCode != "" { + codeLabel := lipgloss.NewStyle(). + Foreground(lipgloss.Color("#6B7280")). + Render("Session: " + m.docsSetupCode) + + topBar := lipgloss.NewStyle(). + Width(m.width). + Align(lipgloss.Right). + PaddingRight(2). + Render(codeLabel) + + placed := lipgloss.Place(m.width, m.height-1, lipgloss.Center, lipgloss.Center, content) + return topBar + "\n" + placed + } + return lipgloss.Place( m.width, m.height, @@ -1001,6 +1359,359 @@ Need help? return b.String() } +func (m OnboardingModel) viewSandboxHost() string { + var b strings.Builder + + title := lipgloss.NewStyle(). + Bold(true). + Foreground(lipgloss.Color("#3B82F6")). + Render("Sandbox Host") + + b.WriteString(title) + b.WriteString("\n\n") + + // SSH input mode + if len(m.sandboxHostInputs) > 0 { + desc := lipgloss.NewStyle().Foreground(lipgloss.Color("#9CA3AF")).Render( + "Enter your sandbox host details:", + ) + b.WriteString(desc) + b.WriteString("\n\n") + + labelStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("#9CA3AF")).Width(12) + + b.WriteString(labelStyle.Render(" Address:")) + b.WriteString(" ") + b.WriteString(m.sandboxHostInputs[0].View()) + b.WriteString("\n") + + b.WriteString(labelStyle.Render(" SSH User:")) + b.WriteString(" ") + b.WriteString(m.sandboxHostInputs[1].View()) + b.WriteString("\n") + + b.WriteString(labelStyle.Render(" SSH Port:")) + b.WriteString(" ") + b.WriteString(m.sandboxHostInputs[2].View()) + b.WriteString("\n\n") + + help := lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")) + b.WriteString(help.Render("Tab to navigate | Enter to continue")) + return b.String() + } + + // Loading VMs spinner + if m.sandboxHostLoadingVMs { + b.WriteString(m.spinner.View()) + b.WriteString(" Loading VMs from hosts...") + return b.String() + } + + // VM selection mode + if len(m.sandboxHostVMs) > 0 && !m.sandboxHostDiscoveringIP { + desc := lipgloss.NewStyle().Foreground(lipgloss.Color("#9CA3AF")).Render( + "Select a VM to use as the sandbox host:", + ) + b.WriteString(desc) + b.WriteString("\n\n") + + for i, vm := range m.sandboxHostVMs { + cursor := " " + style := lipgloss.NewStyle().Foreground(lipgloss.Color("#9CA3AF")) + if i == m.sandboxHostSelectedVM { + cursor = "> " + style = lipgloss.NewStyle().Foreground(lipgloss.Color("#3B82F6")).Bold(true) + } + label := vm.Name + if vm.Host != "" { + label += fmt.Sprintf(" (on %s)", vm.Host) + } + b.WriteString(cursor) + b.WriteString(style.Render(label)) + b.WriteString("\n") + } + + b.WriteString("\n") + help := lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")).Render("Arrow keys to select | Enter to confirm") + b.WriteString(help) + + if m.errorMsg != "" { + b.WriteString("\n\n") + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#EF4444")).Render(m.errorMsg)) + } + return b.String() + } + + // Discovering IP spinner + if m.sandboxHostDiscoveringIP { + b.WriteString(m.spinner.View()) + vmName := "VM" + if m.sandboxHostSelectedVM < len(m.sandboxHostVMs) { + vmName = m.sandboxHostVMs[m.sandboxHostSelectedVM].Name + } + b.WriteString(fmt.Sprintf(" Discovering IP for %s...", vmName)) + return b.String() + } + + // Choice mode + desc := lipgloss.NewStyle().Foreground(lipgloss.Color("#9CA3AF")).Render( + "Where should the fluid-daemon be installed?\nThis is the host where sandbox VMs will be created and managed.", + ) + b.WriteString(desc) + b.WriteString("\n\n") + + options := []string{ + "This machine (local)", + "A remote server (SSH)", + "An existing VM on a KVM host", + } + + for i, opt := range options { + cursor := " " + style := lipgloss.NewStyle().Foreground(lipgloss.Color("#9CA3AF")) + if i == m.selectedOption { + cursor = "> " + style = lipgloss.NewStyle().Foreground(lipgloss.Color("#3B82F6")).Bold(true) + } + b.WriteString(cursor) + b.WriteString(style.Render(opt)) + b.WriteString("\n") + } + + b.WriteString("\n") + help := lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")).Render("Use arrow keys to select, Enter to confirm") + b.WriteString(help) + + if m.errorMsg != "" { + b.WriteString("\n\n") + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#EF4444")).Render(m.errorMsg)) + } + + return b.String() +} + +func (m OnboardingModel) viewDaemonSetupChoice() string { + var b strings.Builder + + title := lipgloss.NewStyle(). + Bold(true). + Foreground(lipgloss.Color("#3B82F6")). + Render("Daemon Setup") + + b.WriteString(title) + b.WriteString("\n\n") + + desc := lipgloss.NewStyle().Foreground(lipgloss.Color("#9CA3AF")).Render( + "The fluid daemon needs to be installed on your sandbox host(s).\nHow would you like to proceed?", + ) + b.WriteString(desc) + b.WriteString("\n\n") + + options := []string{ + "Guided walkthrough (recommended)", + "I'll set it up myself (opens docs)", + } + + for i, opt := range options { + cursor := " " + style := lipgloss.NewStyle().Foreground(lipgloss.Color("#9CA3AF")) + if i == m.selectedOption { + cursor = "> " + style = lipgloss.NewStyle().Foreground(lipgloss.Color("#3B82F6")).Bold(true) + } + b.WriteString(cursor) + b.WriteString(style.Render(opt)) + b.WriteString("\n") + } + + b.WriteString("\n") + help := lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")).Render("Use arrow keys to select, Enter to confirm") + b.WriteString(help) + + return b.String() +} + +func (m OnboardingModel) viewDaemonGuided() string { + var b strings.Builder + + title := lipgloss.NewStyle(). + Bold(true). + Foreground(lipgloss.Color("#3B82F6")). + Render("Daemon Setup - Guided Walkthrough") + + b.WriteString(title) + b.WriteString("\n\n") + + // Show distro and target host info + if m.daemonDistro.Name != "" { + distroLabel := lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")).Render( + fmt.Sprintf("Detected: %s (%s)", m.daemonDistro.Name, m.daemonDistro.PkgManager), + ) + b.WriteString(distroLabel) + b.WriteString("\n") + } + targetLabel := lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")).Render( + fmt.Sprintf("Target: %s", m.sandboxHostLabel()), + ) + b.WriteString(targetLabel) + b.WriteString("\n\n") + + // Show completed steps + for i, result := range m.daemonGuidedResults { + var icon, style string + if result.Skipped { + icon = "-" + style = "#6B7280" + } else if result.Success { + icon = "v" + style = "#10B981" + } else { + icon = "x" + style = "#EF4444" + } + line := lipgloss.NewStyle().Foreground(lipgloss.Color(style)).Render( + fmt.Sprintf(" %s %s", icon, m.daemonGuidedSteps[i].Name), + ) + b.WriteString(line) + if result.Skipped { + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")).Render(" (skipped)")) + } + if result.Error != "" { + b.WriteString("\n") + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#EF4444")).Render( + fmt.Sprintf(" %s", result.Error), + )) + } + b.WriteString("\n") + } + + // Show current step + if m.daemonGuidedStep < len(m.daemonGuidedSteps) { + current := m.daemonGuidedSteps[m.daemonGuidedStep] + if m.daemonGuidedRunning { + b.WriteString(fmt.Sprintf(" %s %s...\n", m.spinner.View(), current.Description)) + } else if m.daemonGuidedPreviewing { + // Preview mode: show step name and commands + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#3B82F6")).Bold(true).Render( + fmt.Sprintf(" > %s", current.Name), + )) + b.WriteString("\n") + if len(current.Commands) > 0 { + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")).Render( + " Commands to execute (via sudo):", + )) + b.WriteString("\n") + cmdStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("#9CA3AF")).Width(m.width) + for _, cmd := range current.Commands { + b.WriteString(cmdStyle.Render(fmt.Sprintf(" $ %s", cmd))) + b.WriteString("\n") + } + } + } else { + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#9CA3AF")).Render( + fmt.Sprintf(" > %s", current.Name), + )) + b.WriteString("\n") + } + + // Show remaining steps dimmed + for i := m.daemonGuidedStep + 1; i < len(m.daemonGuidedSteps); i++ { + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#4B5563")).Render( + fmt.Sprintf(" %s", m.daemonGuidedSteps[i].Name), + )) + b.WriteString("\n") + } + + b.WriteString("\n") + stepNum := m.daemonGuidedStep + 1 + total := len(m.daemonGuidedSteps) + if m.daemonGuidedRunning { + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")).Render( + fmt.Sprintf("Step %d/%d", stepNum, total), + )) + } else { + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")).Render( + fmt.Sprintf("Step %d/%d | Enter to execute | 's' to skip", stepNum, total), + )) + } + } + + return b.String() +} + +func (m OnboardingModel) viewDaemonDoctor() string { + var b strings.Builder + + title := lipgloss.NewStyle(). + Bold(true). + Foreground(lipgloss.Color("#3B82F6")). + Render("Checking Daemon Health") + + b.WriteString(title) + b.WriteString("\n\n") + + if m.daemonDoctorRunning { + b.WriteString(m.spinner.View()) + b.WriteString(" Running health checks...") + return b.String() + } + + if !m.daemonDoctorComplete { + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#9CA3AF")).Render( + "Press Enter to check daemon health...", + )) + return b.String() + } + + passed := 0 + failed := 0 + for _, r := range m.daemonDoctorResults { + var icon, style string + if r.Passed { + passed++ + icon = "v" + style = "#10B981" + } else { + failed++ + icon = "x" + style = "#EF4444" + } + line := lipgloss.NewStyle().Foreground(lipgloss.Color(style)).Render( + fmt.Sprintf(" %s %s", icon, r.Message), + ) + b.WriteString(line) + b.WriteString("\n") + if !r.Passed && r.FixCmd != "" { + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")).Render( + fmt.Sprintf(" Fix: %s", r.FixCmd), + )) + b.WriteString("\n") + } + } + + b.WriteString("\n") + total := passed + failed + if failed == 0 { + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#10B981")).Render( + fmt.Sprintf(" %d/%d passed", passed, total), + )) + b.WriteString("\n\n") + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")).Render( + "Press Enter to continue...", + )) + } else { + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#EF4444")).Render( + fmt.Sprintf(" %d/%d passed, %d failed", passed, total, failed), + )) + b.WriteString("\n\n") + b.WriteString(lipgloss.NewStyle().Foreground(lipgloss.Color("#6B7280")).Render( + "[r] Retry [Enter] Continue anyway", + )) + } + + return b.String() +} + // viewAddHosts renders the host configuration view func (m OnboardingModel) viewAddHosts() string { var b strings.Builder @@ -1237,6 +1948,120 @@ func (m OnboardingModel) listVMs() tea.Cmd { } } +func (m OnboardingModel) listSandboxHostVMs() tea.Cmd { + return func() tea.Msg { + var vms []VMInfo + + // Query local if InfraLocal or InfraBoth + if m.infraChoice == InfraLocal || m.infraChoice == InfraBoth { + cmd := exec.Command("virsh", "-c", "qemu:///system", "list", "--all", "--name") + output, err := cmd.Output() + if err == nil { + for _, name := range strings.Split(string(output), "\n") { + name = strings.TrimSpace(name) + if name != "" { + vms = append(vms, VMInfo{ + Name: name, + Host: "local", + State: "available", + }) + } + } + } + } + + // Query remote hosts if InfraRemote or InfraBoth + if m.infraChoice == InfraRemote || m.infraChoice == InfraBoth { + for _, host := range m.cfg.Hosts { + uri := fmt.Sprintf("qemu+ssh://%s@%s/system", host.SSHUser, host.Address) + if host.SSHUser == "" { + uri = fmt.Sprintf("qemu+ssh://root@%s/system", host.Address) + } + cmd := exec.Command("virsh", "-c", uri, "list", "--all", "--name") + output, err := cmd.Output() + if err == nil { + for _, name := range strings.Split(string(output), "\n") { + name = strings.TrimSpace(name) + if name != "" { + vms = append(vms, VMInfo{ + Name: name, + Host: host.Name, + State: "available", + }) + } + } + } + } + } + + if len(vms) == 0 { + return sandboxHostVMListDoneMsg{err: fmt.Errorf("no VMs found on configured hosts")} + } + return sandboxHostVMListDoneMsg{vms: vms} + } +} + +func (m OnboardingModel) discoverSandboxHostVMIP(vm VMInfo) tea.Cmd { + return func() tea.Msg { + var uri, jump, sshUser string + + if vm.Host == "" || vm.Host == "local" { + // Local VM + uri = "qemu:///system" + sshUser = "root" + } else { + // Remote VM - find the host config + var host *config.HostConfig + for i := range m.cfg.Hosts { + if m.cfg.Hosts[i].Name == vm.Host { + host = &m.cfg.Hosts[i] + break + } + } + if host == nil { + return sandboxHostIPDoneMsg{err: fmt.Errorf("host %q not found in config", vm.Host)} + } + + hostUser := host.SSHUser + if hostUser == "" { + hostUser = "root" + } + uri = fmt.Sprintf("qemu+ssh://%s@%s/system", hostUser, host.Address) + jump = fmt.Sprintf("%s@%s", hostUser, host.Address) + sshUser = host.SSHVMUser + if sshUser == "" { + sshUser = "root" + } + } + + // Try agent source first, then lease + ctx := context.Background() + cmd := exec.CommandContext(ctx, "virsh", "-c", uri, "domifaddr", vm.Name, "--source", "agent") + var stdout bytes.Buffer + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + cmd = exec.CommandContext(ctx, "virsh", "-c", uri, "domifaddr", vm.Name, "--source", "lease") + stdout.Reset() + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return sandboxHostIPDoneMsg{err: fmt.Errorf("cannot discover IP: %v", err), name: vm.Name} + } + } + + ip := parseIPFromVirshOutput(stdout.String()) + if ip == "" { + return sandboxHostIPDoneMsg{err: fmt.Errorf("could not discover VM IP address"), name: vm.Name} + } + + return sandboxHostIPDoneMsg{ + ip: ip, + user: sshUser, + jump: jump, + name: vm.Name, + } + } +} + func (m OnboardingModel) testAPIKey() tea.Cmd { return func() tea.Msg { // For now, just validate the format @@ -1420,8 +2245,7 @@ func (m OnboardingModel) runSourcePrepare() tea.Cmd { func makeSSHRunFunc(ip, user, proxyJump string) readonly.SSHRunFunc { return func(ctx context.Context, command string) (string, string, int, error) { args := []string{ - "-o", "StrictHostKeyChecking=no", - "-o", "UserKnownHostsFile=/dev/null", + "-o", "StrictHostKeyChecking=accept-new", "-o", "ConnectTimeout=15", "-o", "BatchMode=yes", } @@ -1512,6 +2336,13 @@ func (m OnboardingModel) advanceDemo() (tea.Model, tea.Cmd) { Role: "tool", Tool: &tr, }) + // Track docs progress for demo tool completions + if step.ToolName == "create_sandbox" && !step.ToolError { + m.postDocsProgress(3) // Step 3: Create a sandbox + } + if step.ToolName == "run_command" && !step.ToolError { + m.postDocsProgress(4) // Step 4: Run commands + } m.demoIndex++ } @@ -1526,6 +2357,182 @@ func (m OnboardingModel) advanceDemo() (tea.Model, tea.Cmd) { }) } +// makeDaemonRunFunc creates the appropriate RunFunc for daemon commands +// based on the sandbox host configuration. +func (m OnboardingModel) makeDaemonRunFunc() hostexec.RunFunc { + if m.sandboxHostIsLocal { + return hostexec.NewLocal() + } + if m.sandboxHostAddr != "" { + user := m.sandboxHostUser + if user == "" { + user = "root" + } + port := m.sandboxHostPort + if port == 0 { + port = 22 + } + if m.sandboxHostProxyJump != "" { + return hostexec.NewSSHWithJump(m.sandboxHostAddr, user, port, m.sandboxHostProxyJump) + } + return hostexec.NewSSH(m.sandboxHostAddr, user, port) + } + return hostexec.NewLocal() +} + +// sandboxHostLabel returns a human-readable label for the target host. +func (m OnboardingModel) sandboxHostLabel() string { + if m.sandboxHostIsLocal || m.sandboxHostAddr == "" { + return "local" + } + user := m.sandboxHostUser + if user == "" { + user = "root" + } + if m.sandboxHostVMName != "" { + return fmt.Sprintf("%s on %s (%s)", m.sandboxHostVMName, m.sandboxHostAddr, user) + } + return fmt.Sprintf("%s (%s)", m.sandboxHostAddr, user) +} + +// initSandboxHostInputs creates the text inputs for remote sandbox host details. +func (m OnboardingModel) initSandboxHostInputs() []textinput.Model { + addrInput := textinput.New() + addrInput.Placeholder = "192.168.1.50 or hostname" + addrInput.CharLimit = 100 + addrInput.Width = 30 + + userInput := textinput.New() + userInput.Placeholder = "root" + userInput.CharLimit = 50 + userInput.Width = 30 + + portInput := textinput.New() + portInput.Placeholder = "22" + portInput.CharLimit = 5 + portInput.Width = 10 + + return []textinput.Model{addrInput, userInput, portInput} +} + +func (m OnboardingModel) detectDaemonOS() tea.Cmd { + return func() tea.Msg { + run := m.makeDaemonRunFunc() + distro, err := setup.DetectOS(context.Background(), run) + return daemonDetectOSDoneMsg{distro: distro, err: err} + } +} + +func (m OnboardingModel) runDaemonGuidedStep() tea.Cmd { + return func() tea.Msg { + if m.daemonGuidedStep >= len(m.daemonGuidedSteps) { + return daemonGuidedStepDoneMsg{result: setup.StepResult{Name: "done", Success: true}} + } + + step := m.daemonGuidedSteps[m.daemonGuidedStep] + run := m.makeDaemonRunFunc() + sudoRun := hostexec.WithSudo(run) + + result := setup.StepResult{Name: step.Name} + + // Check if already done + done, err := step.Check(context.Background(), run) + if err == nil && done { + result.Skipped = true + result.Success = true + return daemonGuidedStepDoneMsg{result: result} + } + + // Execute + if err := step.Execute(context.Background(), sudoRun); err != nil { + result.Error = err.Error() + return daemonGuidedStepDoneMsg{result: result} + } + + result.Success = true + return daemonGuidedStepDoneMsg{result: result} + } +} + +func (m OnboardingModel) runDaemonDoctor() tea.Cmd { + return func() tea.Msg { + run := m.makeDaemonRunFunc() + results := doctor.RunAll(context.Background(), run) + return daemonDoctorDoneMsg{results: results} + } +} + +func openBrowser(url string) { + var cmd *exec.Cmd + switch runtime.GOOS { + case "darwin": + cmd = exec.Command("open", url) + case "windows": + cmd = exec.Command("rundll32", "url.dll,FileProtocolHandler", url) + default: + cmd = exec.Command("xdg-open", url) + } + _ = cmd.Run() +} + +func (m OnboardingModel) registerDocsSession() tea.Cmd { + return func() tea.Msg { + docsURL := m.docsAPIURL + "/docs/quickstart" + var sessionCode string + + // Try to register session - best effort + body := `{"storage_key":"quickstart"}` + apiURL := m.docsAPIURL + "/v1/docs-progress/register" + req, err := http.NewRequest("POST", apiURL, strings.NewReader(body)) + if err == nil { + req.Header.Set("Content-Type", "application/json") + client := &http.Client{Timeout: 5 * time.Second} + resp, err := client.Do(req) + if err == nil { + var result struct { + SessionCode string `json:"session_code"` + } + if err := json.NewDecoder(resp.Body).Decode(&result); err == nil { + sessionCode = result.SessionCode + } + _ = resp.Body.Close() + } + } + + if sessionCode != "" { + docsURL += "?code=" + sessionCode + } + + // Always open browser regardless of registration success + openBrowser(docsURL) + + return docsSessionRegisteredMsg{code: sessionCode} + } +} + +// postDocsProgress sends a step completion to the docs progress API. +// Fire-and-forget: errors are silently ignored. +func (m OnboardingModel) postDocsProgress(stepIndex int) { + if m.docsSetupCode == "" { + return + } + go func() { + body := fmt.Sprintf(`{"session_code":%q,"step_index":%d}`, m.docsSetupCode, stepIndex) + url := m.docsAPIURL + "/v1/docs-progress/complete" + req, err := http.NewRequest("POST", url, strings.NewReader(body)) + if err != nil { + return + } + req.Header.Set("Content-Type", "application/json") + client := &http.Client{Timeout: 5 * time.Second} + resp, err := client.Do(req) + if err != nil { + return + } + _ = resp.Body.Close() + }() +} + // IsComplete returns true if onboarding is finished func (m OnboardingModel) IsComplete() bool { return m.step == StepComplete diff --git a/fluid/internal/tui/playbooks.go b/fluid-cli/internal/tui/playbooks.go similarity index 100% rename from fluid/internal/tui/playbooks.go rename to fluid-cli/internal/tui/playbooks.go diff --git a/fluid/internal/tui/settings.go b/fluid-cli/internal/tui/settings.go similarity index 100% rename from fluid/internal/tui/settings.go rename to fluid-cli/internal/tui/settings.go diff --git a/fluid/internal/tui/styles.go b/fluid-cli/internal/tui/styles.go similarity index 100% rename from fluid/internal/tui/styles.go rename to fluid-cli/internal/tui/styles.go diff --git a/fluid-cli/internal/updater/updater.go b/fluid-cli/internal/updater/updater.go new file mode 100644 index 00000000..0a21df9f --- /dev/null +++ b/fluid-cli/internal/updater/updater.go @@ -0,0 +1,262 @@ +package updater + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +const maxBinarySize = 500 * 1024 * 1024 // 500MB limit for tar entry reads + +const ( + releasesURL = "https://api.github.com/repos/aspectrr/fluid.sh/releases/latest" + cacheFile = ".last-update-check" + cacheTTL = 24 * time.Hour +) + +type githubRelease struct { + TagName string `json:"tag_name"` + Assets []githubAsset `json:"assets"` +} + +type githubAsset struct { + Name string `json:"name"` + BrowserDownloadURL string `json:"browser_download_url"` +} + +// CheckLatest queries GitHub API for the latest release. +// Returns (latestVersion, downloadURL, needsUpdate, error). +func CheckLatest(currentVersion string) (string, string, bool, error) { + req, err := http.NewRequest("GET", releasesURL, nil) + if err != nil { + return "", "", false, err + } + req.Header.Set("Accept", "application/vnd.github+json") + + client := &http.Client{Timeout: 10 * time.Second} + resp, err := client.Do(req) + if err != nil { + return "", "", false, fmt.Errorf("fetch latest release: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return "", "", false, fmt.Errorf("github API returned %d", resp.StatusCode) + } + + var release githubRelease + if err := json.NewDecoder(resp.Body).Decode(&release); err != nil { + return "", "", false, fmt.Errorf("decode release: %w", err) + } + + latest := strings.TrimPrefix(release.TagName, "v") + current := strings.TrimPrefix(currentVersion, "v") + + if current == "dev" || current == "" { + // Dev builds always report as up to date + return latest, "", false, nil + } + + if latest == current { + return latest, "", false, nil + } + + // Find the right asset for this OS/arch + assetName := fmt.Sprintf("fluid_%s_%s_%s.tar.gz", latest, runtime.GOOS, runtime.GOARCH) + var downloadURL string + for _, asset := range release.Assets { + if asset.Name == assetName { + downloadURL = asset.BrowserDownloadURL + break + } + } + + if downloadURL == "" { + return latest, "", false, fmt.Errorf("no release asset found for %s/%s", runtime.GOOS, runtime.GOARCH) + } + + return latest, downloadURL, true, nil +} + +// Update downloads the release archive from downloadURL and replaces the current executable. +func Update(downloadURL string) error { + client := &http.Client{Timeout: 120 * time.Second} + + // Download the tar.gz to a temp file so we can checksum it + resp, err := client.Get(downloadURL) + if err != nil { + return fmt.Errorf("download release: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("download returned %d", resp.StatusCode) + } + + archiveData, err := io.ReadAll(io.LimitReader(resp.Body, maxBinarySize)) + if err != nil { + return fmt.Errorf("read archive: %w", err) + } + + // Download and verify checksum + checksumURL := strings.TrimSuffix(downloadURL, filepath.Base(downloadURL)) + "checksums.txt" + if err := verifyChecksum(client, checksumURL, filepath.Base(downloadURL), archiveData); err != nil { + return fmt.Errorf("checksum verification: %w", err) + } + + // Extract the "fluid" binary from the tar.gz archive + gz, err := gzip.NewReader(bytes.NewReader(archiveData)) + if err != nil { + return fmt.Errorf("open gzip: %w", err) + } + defer func() { _ = gz.Close() }() + + tr := tar.NewReader(gz) + var binaryData []byte + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("read tar: %w", err) + } + // Look for the fluid binary (may be at root or in a subdirectory) + base := filepath.Base(hdr.Name) + if base == "fluid" && hdr.Typeflag == tar.TypeReg { + binaryData, err = io.ReadAll(io.LimitReader(tr, maxBinarySize)) + if err != nil { + return fmt.Errorf("read binary from archive: %w", err) + } + break + } + } + + if binaryData == nil { + return fmt.Errorf("fluid binary not found in archive") + } + + // Get current executable path + execPath, err := os.Executable() + if err != nil { + return fmt.Errorf("get executable path: %w", err) + } + execPath, err = filepath.EvalSymlinks(execPath) + if err != nil { + return fmt.Errorf("resolve symlinks: %w", err) + } + + // Write to temp file in same directory (for atomic rename) + dir := filepath.Dir(execPath) + tmp, err := os.CreateTemp(dir, "fluid-update-*") + if err != nil { + return fmt.Errorf("create temp file: %w", err) + } + tmpPath := tmp.Name() + + if _, err := tmp.Write(binaryData); err != nil { + _ = tmp.Close() + _ = os.Remove(tmpPath) + return fmt.Errorf("write temp binary: %w", err) + } + if err := tmp.Chmod(0o755); err != nil { + _ = tmp.Close() + _ = os.Remove(tmpPath) + return fmt.Errorf("chmod temp binary: %w", err) + } + _ = tmp.Close() + + // Atomic rename over the current executable + if err := os.Rename(tmpPath, execPath); err != nil { + _ = os.Remove(tmpPath) + return fmt.Errorf("replace binary: %w", err) + } + + return nil +} + +// CacheDir returns the fluid config directory path for caching update checks. +func CacheDir() string { + home, err := os.UserHomeDir() + if err != nil { + return "" + } + return filepath.Join(home, ".fluid") +} + +// ShouldCheck returns true if enough time has passed since the last update check. +func ShouldCheck() bool { + dir := CacheDir() + if dir == "" { + return false + } + path := filepath.Join(dir, cacheFile) + info, err := os.Stat(path) + if err != nil { + return true // No cache file, should check + } + return time.Since(info.ModTime()) > cacheTTL +} + +// MarkChecked updates the cache file timestamp. +func MarkChecked() { + dir := CacheDir() + if dir == "" { + return + } + path := filepath.Join(dir, cacheFile) + _ = os.MkdirAll(dir, 0o755) + _ = os.WriteFile(path, []byte(time.Now().Format(time.RFC3339)), 0o644) +} + +// verifyChecksum downloads checksums.txt from the release and verifies the archive SHA256. +func verifyChecksum(client *http.Client, checksumURL, assetName string, data []byte) error { + resp, err := client.Get(checksumURL) + if err != nil { + return fmt.Errorf("download checksums: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("checksums download returned %d", resp.StatusCode) + } + + body, err := io.ReadAll(io.LimitReader(resp.Body, 1024*1024)) // 1MB limit for checksums file + if err != nil { + return fmt.Errorf("read checksums: %w", err) + } + + // Parse checksums file: each line is "sha256hash filename" + var expectedHash string + for _, line := range strings.Split(string(body), "\n") { + parts := strings.Fields(line) + if len(parts) == 2 && parts[1] == assetName { + expectedHash = parts[0] + break + } + } + + if expectedHash == "" { + return fmt.Errorf("no checksum found for %s in checksums.txt", assetName) + } + + actualHash := sha256.Sum256(data) + actualHex := hex.EncodeToString(actualHash[:]) + + if actualHex != expectedHash { + return fmt.Errorf("SHA256 mismatch: expected %s, got %s", expectedHash, actualHex) + } + + return nil +} diff --git a/fluid-remote/internal/workflow/errors.go b/fluid-cli/internal/workflow/errors.go similarity index 100% rename from fluid-remote/internal/workflow/errors.go rename to fluid-cli/internal/workflow/errors.go diff --git a/fluid-daemon/.gitignore b/fluid-daemon/.gitignore new file mode 100644 index 00000000..e660fd93 --- /dev/null +++ b/fluid-daemon/.gitignore @@ -0,0 +1 @@ +bin/ diff --git a/fluid-daemon/AGENTS.md b/fluid-daemon/AGENTS.md new file mode 100644 index 00000000..fcff6e5c --- /dev/null +++ b/fluid-daemon/AGENTS.md @@ -0,0 +1,109 @@ +# Fluid Daemon - Development Guide + +Background service that manages VM sandboxes on a sandbox host. One daemon runs per sandbox host, but each daemon can connect to multiple libvirt hosts over SSH for source VM access. Multiple daemons are typically needed for heavily NATed enterprise networks or separate data centers. Exposes a gRPC API for the CLI and optionally connects upstream to the control plane. + +## Architecture + +``` +fluid CLI (TUI/MCP) + | + v (gRPC :9091) +fluid-daemon + | + +--- libvirt/KVM (sandbox VMs) + +--- SQLite (local state) + +--- SSH CA (ephemeral certs) + +--- Janitor (TTL cleanup) + | + v (optional gRPC stream) +control-plane +``` + +## Tech Stack + +- **Language**: Go +- **VM Backend**: QEMU microVMs via libvirt +- **State**: SQLite +- **Networking**: Bridge + TAP devices +- **SSH**: Internal CA with ephemeral certificates + +## Project Structure + +``` +fluid-daemon/ + cmd/fluid-daemon/main.go # Entry point + internal/ + agent/ # Control plane gRPC client + reconnect + config/ # Configuration loading + daemon/ # Main daemon orchestration + image/ # Image extraction and caching + janitor/ # TTL-based sandbox cleanup + microvm/ # MicroVM manager (overlay, boot) + network/ # Bridge + TAP device management + provider/ # VM provider abstraction + readonly/ # Read-only source VM access + sourcevm/ # Source VM manager + sshca/ # SSH Certificate Authority + sshkeys/ # SSH key management + state/ # SQLite state store + Makefile +``` + +## Quick Start + +```bash +# Build +go build -o bin/fluid-daemon ./cmd/fluid-daemon + +# Run +sudo ./bin/fluid-daemon serve + +# Run with systemd +sudo systemctl enable --now fluid-daemon +``` + +## Configuration + +Default config: `~/.config/fluid/daemon.yaml` + +```yaml +listen: + grpc: ":9091" + +backend: qemu + +storage: + images: /var/lib/fluid/images + overlays: /var/lib/fluid/overlays + state: /var/lib/fluid/state.db + +network: + bridge: fluid0 + subnet: 10.0.0.0/24 + +# Optional: connect to control plane +# control_plane: +# address: "cp.fluid.sh:9090" +# token: "your-host-token" +``` + +## Development + +### Prerequisites + +- Go 1.24+ +- libvirt/KVM +- Root access (for network/VM management) + +### Testing + +```bash +go test ./... -v +go test ./... -coverprofile=coverage.out +``` + +### Build + +```bash +go build -o bin/fluid-daemon ./cmd/fluid-daemon +``` diff --git a/fluid-daemon/Makefile b/fluid-daemon/Makefile new file mode 100644 index 00000000..33d3800a --- /dev/null +++ b/fluid-daemon/Makefile @@ -0,0 +1,60 @@ +BINARY_NAME=fluid-daemon +BUILD_DIR=bin + +.PHONY: all build run clean fmt vet lint test test-coverage check deps tidy install help + +all: fmt vet test build + +build: + go build -o $(BUILD_DIR)/$(BINARY_NAME) ./cmd/fluid-daemon + +run: build + ./$(BUILD_DIR)/$(BINARY_NAME) + +clean: + rm -rf $(BUILD_DIR) + rm -f coverage.out coverage.html + +fmt: + go fmt ./... + +vet: + go vet ./... + +lint: + golangci-lint run --allow-parallel-runners ./... + +test: + go test ./... -v + +test-coverage: + go test ./... -coverprofile=coverage.out + go tool cover -html=coverage.out -o coverage.html + +check: fmt vet lint test + +deps: + go mod download + +tidy: + go mod tidy + +install: + go install ./cmd/fluid-daemon + +help: + @echo "Available targets:" + @echo " all - Run fmt, vet, test, and build (default)" + @echo " build - Build the fluid-daemon binary" + @echo " run - Build and run the daemon" + @echo " clean - Clean build artifacts" + @echo " fmt - Format code" + @echo " vet - Run go vet" + @echo " lint - Run golangci-lint" + @echo " test - Run tests" + @echo " test-coverage - Run tests with coverage" + @echo " check - Run all code quality checks" + @echo " deps - Download dependencies" + @echo " tidy - Tidy and verify dependencies" + @echo " install - Install to GOPATH/bin" + @echo " help - Show this help message" diff --git a/fluid-daemon/cmd/fluid-daemon/main.go b/fluid-daemon/cmd/fluid-daemon/main.go new file mode 100644 index 00000000..0974e4b3 --- /dev/null +++ b/fluid-daemon/cmd/fluid-daemon/main.go @@ -0,0 +1,251 @@ +package main + +import ( + "context" + "flag" + "fmt" + "log/slog" + "net" + "os" + "os/signal" + "path/filepath" + "syscall" + + "google.golang.org/grpc" + + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" + + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/agent" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/config" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/daemon" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/id" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/image" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/janitor" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/microvm" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/network" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/provider" + lxcProvider "github.com/aspectrr/fluid.sh/fluid-daemon/internal/provider/lxc" + microvmProvider "github.com/aspectrr/fluid.sh/fluid-daemon/internal/provider/microvm" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/snapshotpull" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/state" +) + +const version = "0.1.0" + +func main() { + logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ + Level: slog.LevelInfo, + })) + slog.SetDefault(logger) + + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + if err := run(ctx, logger); err != nil { + logger.Error("fatal error", "error", err) + os.Exit(1) + } +} + +func run(ctx context.Context, logger *slog.Logger) error { + configPath := flag.String("config", "", "path to config file") + flag.Parse() + + // Load config + cfgPath := *configPath + if cfgPath == "" { + home, _ := os.UserHomeDir() + cfgPath = filepath.Join(home, ".fluid", "daemon.yaml") + } + + cfg, err := config.Load(cfgPath) + if err != nil { + return err + } + + // Ensure host ID + if cfg.HostID == "" { + hostID, err := id.GenerateRaw() + if err != nil { + return fmt.Errorf("generate host ID: %w", err) + } + cfg.HostID = hostID + _ = config.Save(cfgPath, cfg) + logger.Info("generated host ID", "host_id", cfg.HostID) + } + + logger.Info("fluid-daemon starting", + "host_id", cfg.HostID, + "config", cfgPath, + "provider", cfg.Provider, + ) + + // Initialize SQLite state store + st, err := state.NewStore(cfg.State.DBPath) + if err != nil { + return err + } + defer func() { _ = st.Close() }() + logger.Info("state store initialized", "db_path", cfg.State.DBPath) + + // Initialize provider based on config + var prov provider.SandboxProvider + + switch cfg.Provider { + case "lxc": + prov, err = initLXCProvider(cfg, logger) + if err != nil { + return err + } + logger.Info("LXC provider initialized", + "host", cfg.LXC.Host, + "node", cfg.LXC.Node, + ) + default: // "microvm" or empty (default) + prov, err = initMicroVMProvider(ctx, cfg, logger) + if err != nil { + return err + } + } + + // Recover state from any running sandboxes + if err := prov.RecoverState(ctx); err != nil { + logger.Warn("state recovery failed", "error", err) + } + + // Initialize janitor + destroyFn := func(ctx context.Context, sandboxID string) error { + if err := prov.DestroySandbox(ctx, sandboxID); err != nil { + return err + } + return st.DeleteSandbox(ctx, sandboxID) + } + + jan := janitor.New(st, destroyFn, cfg.Janitor.DefaultTTL, logger) + go jan.Start(ctx, cfg.Janitor.Interval) + + // Initialize snapshot puller + imgStore, err := image.NewStore(cfg.Image.BaseDir, logger) + if err != nil { + return fmt.Errorf("init image store for puller: %w", err) + } + puller := snapshotpull.NewPuller(imgStore, st.DB(), logger) + + // Start DaemonService gRPC server (inbound from CLI) + if cfg.Daemon.Enabled { + daemonSrv := daemon.NewServer(prov, st, puller, cfg.HostID, version, logger) + grpcServer := grpc.NewServer() + fluidv1.RegisterDaemonServiceServer(grpcServer, daemonSrv) + + lis, err := net.Listen("tcp", cfg.Daemon.ListenAddr) + if err != nil { + return fmt.Errorf("listen %s: %w", cfg.Daemon.ListenAddr, err) + } + logger.Info("daemon gRPC server listening", "addr", cfg.Daemon.ListenAddr) + + go func() { + if err := grpcServer.Serve(lis); err != nil { + logger.Error("daemon gRPC server error", "error", err) + } + }() + go func() { + <-ctx.Done() + grpcServer.GracefulStop() + }() + } + + // Initialize gRPC agent client + agentClient := agent.NewClient( + agent.Config{ + HostID: cfg.HostID, + Version: version, + Address: cfg.ControlPlane.Address, + Insecure: cfg.ControlPlane.Insecure, + CertFile: cfg.ControlPlane.CertFile, + KeyFile: cfg.ControlPlane.KeyFile, + CAFile: cfg.ControlPlane.CAFile, + }, + prov, + st, + puller, + logger, + ) + + logger.Info("sandbox-host ready", + "host_id", cfg.HostID, + "control_plane", cfg.ControlPlane.Address, + "provider", cfg.Provider, + ) + + // Start gRPC agent in background (reconnects automatically) + agentErrCh := make(chan error, 1) + go func() { + agentErrCh <- agentClient.Run(ctx) + }() + + // Wait for shutdown signal or agent fatal error + select { + case <-ctx.Done(): + logger.Info("sandbox-host shutting down") + case err := <-agentErrCh: + if err != nil && ctx.Err() == nil { + logger.Error("agent error", "error", err) + return err + } + } + + return nil +} + +func initMicroVMProvider(ctx context.Context, cfg *config.Config, logger *slog.Logger) (provider.SandboxProvider, error) { + // Initialize microVM manager + vmMgr, err := microvm.NewManager(cfg.MicroVM.QEMUBinary, cfg.MicroVM.WorkDir, logger) + if err != nil { + logger.Warn("microVM manager initialization failed (qemu not available)", "error", err) + vmMgr = nil + } else { + logger.Info("microVM manager initialized", "work_dir", cfg.MicroVM.WorkDir) + } + + // Initialize network manager + netMgr := network.NewNetworkManager( + cfg.Network.DefaultBridge, + cfg.Network.BridgeMap, + cfg.Network.DHCPMode, + logger, + ) + logger.Info("network manager initialized", + "default_bridge", cfg.Network.DefaultBridge, + "dhcp_mode", cfg.Network.DHCPMode, + ) + + // Initialize image store + imgStore, err := image.NewStore(cfg.Image.BaseDir, logger) + if err != nil { + return nil, err + } + images, _ := imgStore.ListNames() + logger.Info("image store initialized", + "base_dir", cfg.Image.BaseDir, + "images", len(images), + ) + + return microvmProvider.New(vmMgr, netMgr, imgStore, nil, logger), nil +} + +func initLXCProvider(cfg *config.Config, logger *slog.Logger) (provider.SandboxProvider, error) { + lxcCfg := lxcProvider.Config{ + Host: cfg.LXC.Host, + TokenID: cfg.LXC.TokenID, + Secret: cfg.LXC.Secret, + Node: cfg.LXC.Node, + Storage: cfg.LXC.Storage, + Bridge: cfg.LXC.Bridge, + VMIDStart: cfg.LXC.VMIDStart, + VMIDEnd: cfg.LXC.VMIDEnd, + VerifySSL: cfg.LXC.VerifySSL, + Timeout: cfg.LXC.Timeout, + } + + return lxcProvider.New(lxcCfg, logger) +} diff --git a/fluid-daemon/go.mod b/fluid-daemon/go.mod new file mode 100644 index 00000000..490bef57 --- /dev/null +++ b/fluid-daemon/go.mod @@ -0,0 +1,34 @@ +module github.com/aspectrr/fluid.sh/fluid-daemon + +go 1.24.0 + +toolchain go1.24.4 + +require ( + github.com/aspectrr/fluid.sh/proto/gen/go v0.0.0-00010101000000-000000000000 + github.com/glebarez/sqlite v1.11.0 + github.com/google/uuid v1.6.0 + google.golang.org/grpc v1.79.1 + gopkg.in/yaml.v3 v3.0.1 + gorm.io/gorm v1.31.1 +) + +require ( + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/glebarez/go-sqlite v1.21.2 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.5 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/text v0.32.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/protobuf v1.36.10 // indirect + modernc.org/libc v1.22.5 // indirect + modernc.org/mathutil v1.5.0 // indirect + modernc.org/memory v1.5.0 // indirect + modernc.org/sqlite v1.23.1 // indirect +) + +replace github.com/aspectrr/fluid.sh/proto/gen/go => ../proto/gen/go diff --git a/fluid-daemon/go.sum b/fluid-daemon/go.sum new file mode 100644 index 00000000..156e4a48 --- /dev/null +++ b/fluid-daemon/go.sum @@ -0,0 +1,70 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/glebarez/go-sqlite v1.21.2 h1:3a6LFC4sKahUunAmynQKLZceZCOzUthkRkEAl9gAXWo= +github.com/glebarez/go-sqlite v1.21.2/go.mod h1:sfxdZyhQjTM2Wry3gVYWaW072Ri1WMdWJi0k6+3382k= +github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw= +github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= +google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg= +gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs= +modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE= +modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY= +modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/sqlite v1.23.1 h1:nrSBg4aRQQwq59JpvGEQ15tNxoO5pX/kUjcRNwSAGQM= +modernc.org/sqlite v1.23.1/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk= diff --git a/fluid-daemon/internal/agent/client.go b/fluid-daemon/internal/agent/client.go new file mode 100644 index 00000000..42e40fd8 --- /dev/null +++ b/fluid-daemon/internal/agent/client.go @@ -0,0 +1,760 @@ +// Package agent implements the gRPC client that connects the sandbox host +// to the control plane. It handles registration, heartbeat, and dispatching +// of commands received from the control plane to the sandbox provider. +package agent + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "log/slog" + "os" + "strings" + "sync" + "time" + + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" + + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/provider" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/snapshotpull" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/sshconfig" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/state" + + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" +) + +// Client connects to the control plane via gRPC bidirectional streaming. +type Client struct { + hostID string + hostname string + version string + cpAddr string + insec bool + certFile string + keyFile string + caFile string + + prov provider.SandboxProvider + localStore *state.Store + puller *snapshotpull.Puller + logger *slog.Logger + + // stream is the active bidirectional stream to the control plane. + mu sync.Mutex + stream fluidv1.HostService_ConnectClient + conn *grpc.ClientConn + + // sendMu serializes writes to the gRPC stream. + sendMu sync.Mutex + + // handlerSem bounds the number of concurrent command handler goroutines. + handlerSem chan struct{} +} + +// Config holds configuration for the gRPC agent client. +type Config struct { + HostID string + Hostname string + Version string + Address string + Insecure bool + CertFile string + KeyFile string + CAFile string +} + +// NewClient creates a new agent client. +func NewClient( + cfg Config, + prov provider.SandboxProvider, + localStore *state.Store, + puller *snapshotpull.Puller, + logger *slog.Logger, +) *Client { + hostname := cfg.Hostname + if hostname == "" { + hostname, _ = os.Hostname() + } + + return &Client{ + hostID: cfg.HostID, + hostname: hostname, + version: cfg.Version, + cpAddr: cfg.Address, + insec: cfg.Insecure, + certFile: cfg.CertFile, + keyFile: cfg.KeyFile, + caFile: cfg.CAFile, + prov: prov, + localStore: localStore, + puller: puller, + logger: logger.With("component", "agent"), + handlerSem: make(chan struct{}, 64), + } +} + +// sendMessage serializes writes to the gRPC stream. +func (c *Client) sendMessage(stream fluidv1.HostService_ConnectClient, msg *fluidv1.HostMessage) error { + c.sendMu.Lock() + defer c.sendMu.Unlock() + return stream.Send(msg) +} + +// Run connects to the control plane and runs the message loop. It reconnects +// automatically on failure using exponential backoff. Blocks until ctx is done. +func (c *Client) Run(ctx context.Context) error { + return RunWithReconnect(ctx, c.logger, c.connectAndServe) +} + +// connectAndServe establishes a single connection, registers, and runs the +// message loop. Returns an error when the connection drops. +func (c *Client) connectAndServe(ctx context.Context) error { + opts := []grpc.DialOption{} + if c.insec { + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + tlsCreds, err := c.buildTLSCredentials() + if err != nil { + return fmt.Errorf("build TLS credentials: %w", err) + } + opts = append(opts, grpc.WithTransportCredentials(tlsCreds)) + } + + conn, err := grpc.NewClient(c.cpAddr, opts...) + if err != nil { + return fmt.Errorf("dial control plane %s: %w", c.cpAddr, err) + } + defer func() { _ = conn.Close() }() + + c.mu.Lock() + c.conn = conn + c.mu.Unlock() + + client := fluidv1.NewHostServiceClient(conn) + stream, err := client.Connect(ctx) + if err != nil { + return fmt.Errorf("open stream: %w", err) + } + + c.mu.Lock() + c.stream = stream + c.mu.Unlock() + + defer func() { + c.mu.Lock() + c.stream = nil + c.conn = nil + c.mu.Unlock() + }() + + if err := c.register(stream); err != nil { + return err + } + + heartbeatCtx, heartbeatCancel := context.WithCancel(ctx) + defer heartbeatCancel() + go c.heartbeatLoop(heartbeatCtx, stream) + + return c.recvLoop(ctx, stream) +} + +// register sends the HostRegistration message and waits for RegistrationAck. +func (c *Client) register(stream fluidv1.HostService_ConnectClient) error { + reg := c.buildRegistration() + + reqID := uuid.New().String() + msg := &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_Registration{ + Registration: reg, + }, + } + + c.logger.Info("sending registration", + "host_id", c.hostID, + "hostname", c.hostname, + ) + + if err := c.sendMessage(stream, msg); err != nil { + return fmt.Errorf("send registration: %w", err) + } + + resp, err := stream.Recv() + if err != nil { + return fmt.Errorf("recv registration ack: %w", err) + } + + ack := resp.GetRegistrationAck() + if ack == nil { + return fmt.Errorf("expected RegistrationAck, got different message type") + } + + if !ack.GetAccepted() { + return fmt.Errorf("registration rejected: %s", ack.GetReason()) + } + + if assigned := ack.GetAssignedHostId(); assigned != "" && assigned != c.hostID { + c.logger.Info("host ID reassigned by control plane", "old", c.hostID, "new", assigned) + c.hostID = assigned + } + + c.logger.Info("registered with control plane", "host_id", c.hostID) + return nil +} + +// buildRegistration constructs the HostRegistration message via the provider. +func (c *Client) buildRegistration() *fluidv1.HostRegistration { + reg := &fluidv1.HostRegistration{ + HostId: c.hostID, + Hostname: c.hostname, + Version: c.version, + } + + if c.prov != nil { + caps, err := c.prov.Capabilities(context.Background()) + if err == nil { + reg.TotalCpus = int32(caps.TotalCPUs) + reg.AvailableCpus = int32(caps.AvailableCPUs) + reg.BaseImages = caps.BaseImages + } + + vms, err := c.prov.ListSourceVMs(context.Background()) + if err == nil { + for _, vm := range vms { + reg.SourceVms = append(reg.SourceVms, &fluidv1.SourceVMInfo{ + Name: vm.Name, + State: vm.State, + IpAddress: vm.IPAddress, + Prepared: vm.Prepared, + }) + } + } + } + + return reg +} + +// heartbeatLoop sends periodic heartbeats to the control plane. +func (c *Client) heartbeatLoop(ctx context.Context, stream fluidv1.HostService_ConnectClient) { + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + hb := &fluidv1.Heartbeat{} + + if c.prov != nil { + caps, err := c.prov.Capabilities(ctx) + if err == nil { + hb.AvailableCpus = int32(caps.AvailableCPUs) + } + hb.ActiveSandboxes = int32(c.prov.ActiveSandboxCount()) + vms, err := c.prov.ListSourceVMs(ctx) + if err == nil { + hb.SourceVmCount = int32(len(vms)) + } + } + + msg := &fluidv1.HostMessage{ + Payload: &fluidv1.HostMessage_Heartbeat{ + Heartbeat: hb, + }, + } + + if err := c.sendMessage(stream, msg); err != nil { + c.logger.Error("send heartbeat failed", "error", err) + return + } + } + } +} + +// recvLoop receives and dispatches ControlMessages from the control plane. +func (c *Client) recvLoop(ctx context.Context, stream fluidv1.HostService_ConnectClient) error { + for { + msg, err := stream.Recv() + if err != nil { + if err == io.EOF { + c.logger.Info("stream closed by control plane") + return nil + } + return fmt.Errorf("recv: %w", err) + } + + select { + case c.handlerSem <- struct{}{}: + go func() { + defer func() { <-c.handlerSem }() + c.handleCommand(ctx, stream, msg) + }() + default: + c.logger.Warn("too many concurrent command handlers, dropping", "request_id", msg.GetRequestId()) + } + } +} + +// handleCommand dispatches a ControlMessage to the appropriate handler. +func (c *Client) handleCommand(ctx context.Context, stream fluidv1.HostService_ConnectClient, msg *fluidv1.ControlMessage) { + reqID := msg.GetRequestId() + + var resp *fluidv1.HostMessage + + switch cmd := msg.Payload.(type) { + case *fluidv1.ControlMessage_CreateSandbox: + resp = c.handleCreateSandbox(ctx, reqID, cmd.CreateSandbox) + case *fluidv1.ControlMessage_DestroySandbox: + resp = c.handleDestroySandbox(ctx, reqID, cmd.DestroySandbox) + case *fluidv1.ControlMessage_StartSandbox: + resp = c.handleStartSandbox(ctx, reqID, cmd.StartSandbox) + case *fluidv1.ControlMessage_StopSandbox: + resp = c.handleStopSandbox(ctx, reqID, cmd.StopSandbox) + case *fluidv1.ControlMessage_RunCommand: + resp = c.handleRunCommand(ctx, reqID, cmd.RunCommand) + case *fluidv1.ControlMessage_CreateSnapshot: + resp = c.handleCreateSnapshot(ctx, reqID, cmd.CreateSnapshot) + case *fluidv1.ControlMessage_PrepareSourceVm: + resp = c.handlePrepareSourceVM(ctx, reqID, cmd.PrepareSourceVm) + case *fluidv1.ControlMessage_RunSourceCommand: + resp = c.handleRunSourceCommand(ctx, reqID, cmd.RunSourceCommand) + case *fluidv1.ControlMessage_ReadSourceFile: + resp = c.handleReadSourceFile(ctx, reqID, cmd.ReadSourceFile) + case *fluidv1.ControlMessage_ListSourceVms: + resp = c.handleListSourceVMs(ctx, reqID) + case *fluidv1.ControlMessage_ValidateSourceVm: + resp = c.handleValidateSourceVM(ctx, reqID, cmd.ValidateSourceVm) + case *fluidv1.ControlMessage_DiscoverHosts: + resp = c.handleDiscoverHosts(ctx, reqID, cmd.DiscoverHosts) + default: + c.logger.Warn("unknown command type", "request_id", reqID) + resp = errorResponse(reqID, "", "unknown command type") + } + + if resp != nil { + if err := c.sendMessage(stream, resp); err != nil { + c.logger.Error("send response failed", "request_id", reqID, "error", err) + } + } +} + +// --------------------------------------------------------------------------- +// Sandbox command handlers +// --------------------------------------------------------------------------- + +func (c *Client) handleCreateSandbox(ctx context.Context, reqID string, cmd *fluidv1.CreateSandboxCommand) *fluidv1.HostMessage { + sandboxID := cmd.GetSandboxId() + c.logger.Info("creating sandbox", "sandbox_id", sandboxID, "base_image", cmd.GetBaseImage()) + + // Snapshot-pull if source host connection is provided + baseImage := cmd.GetBaseImage() + if conn := cmd.GetSourceHostConnection(); conn != nil && cmd.GetSourceVm() != "" && c.puller != nil { + var backend snapshotpull.SnapshotBackend + switch conn.GetType() { + case "libvirt": + backend = snapshotpull.NewLibvirtBackend( + conn.GetSshHost(), int(conn.GetSshPort()), + conn.GetSshUser(), conn.GetSshIdentityFile(), c.logger) + case "proxmox": + backend = snapshotpull.NewProxmoxBackend( + conn.GetProxmoxHost(), conn.GetProxmoxTokenId(), + conn.GetProxmoxSecret(), conn.GetProxmoxNode(), + conn.GetProxmoxVerifySsl(), c.logger) + } + if backend != nil { + mode := "cached" + if cmd.GetSnapshotMode() == fluidv1.SnapshotMode_SNAPSHOT_MODE_FRESH { + mode = "fresh" + } + pullResult, err := c.puller.Pull(ctx, snapshotpull.PullRequest{ + SourceHost: conn.GetSshHost(), + VMName: cmd.GetSourceVm(), + SnapshotMode: mode, + }, backend) + if err != nil { + return errorResponse(reqID, sandboxID, fmt.Sprintf("pull snapshot: %v", err)) + } + baseImage = pullResult.ImageName + c.logger.Info("snapshot pulled", "image", baseImage, "cached", pullResult.Cached) + } + } + + result, err := c.prov.CreateSandbox(ctx, provider.CreateRequest{ + SandboxID: sandboxID, + Name: cmd.GetName(), + BaseImage: baseImage, + SourceVM: cmd.GetSourceVm(), + Network: cmd.GetNetwork(), + VCPUs: int(cmd.GetVcpus()), + MemoryMB: int(cmd.GetMemoryMb()), + TTLSeconds: int(cmd.GetTtlSeconds()), + AgentID: cmd.GetAgentId(), + SSHPublicKey: cmd.GetSshPublicKey(), + }) + if err != nil { + return errorResponse(reqID, sandboxID, fmt.Sprintf("create sandbox: %v", err)) + } + + // Persist to local state + localSandbox := &state.Sandbox{ + ID: sandboxID, + Name: result.Name, + BaseImage: baseImage, + State: result.State, + IPAddress: result.IPAddress, + MACAddress: result.MACAddress, + TAPDevice: "", + Bridge: result.Bridge, + VCPUs: int(cmd.GetVcpus()), + MemoryMB: int(cmd.GetMemoryMb()), + TTLSeconds: int(cmd.GetTtlSeconds()), + AgentID: cmd.GetAgentId(), + } + if err := c.localStore.CreateSandbox(ctx, localSandbox); err != nil { + c.logger.Error("failed to persist sandbox locally", "sandbox_id", sandboxID, "error", err) + } + + c.logger.Info("sandbox created", + "sandbox_id", sandboxID, + "ip", result.IPAddress, + "bridge", result.Bridge, + ) + + return &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_SandboxCreated{ + SandboxCreated: &fluidv1.SandboxCreated{ + SandboxId: sandboxID, + Name: result.Name, + State: result.State, + IpAddress: result.IPAddress, + MacAddress: result.MACAddress, + Bridge: result.Bridge, + Pid: int32(result.PID), + }, + }, + } +} + +func (c *Client) handleDestroySandbox(ctx context.Context, reqID string, cmd *fluidv1.DestroySandboxCommand) *fluidv1.HostMessage { + sandboxID := cmd.GetSandboxId() + c.logger.Info("destroying sandbox", "sandbox_id", sandboxID) + + if err := c.prov.DestroySandbox(ctx, sandboxID); err != nil { + c.logger.Error("destroy sandbox failed", "sandbox_id", sandboxID, "error", err) + return errorResponse(reqID, sandboxID, fmt.Sprintf("destroy failed: %s", err.Error())) + } + + if err := c.localStore.DeleteSandbox(ctx, sandboxID); err != nil { + c.logger.Error("delete local sandbox state failed", "sandbox_id", sandboxID, "error", err) + } + + return &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_SandboxDestroyed{ + SandboxDestroyed: &fluidv1.SandboxDestroyed{ + SandboxId: sandboxID, + }, + }, + } +} + +func (c *Client) handleStartSandbox(ctx context.Context, reqID string, cmd *fluidv1.StartSandboxCommand) *fluidv1.HostMessage { + sandboxID := cmd.GetSandboxId() + + result, err := c.prov.StartSandbox(ctx, sandboxID) + if err != nil { + return errorResponse(reqID, sandboxID, fmt.Sprintf("start sandbox: %v", err)) + } + + return &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_SandboxStarted{ + SandboxStarted: &fluidv1.SandboxStarted{ + SandboxId: sandboxID, + State: result.State, + IpAddress: result.IPAddress, + }, + }, + } +} + +func (c *Client) handleStopSandbox(ctx context.Context, reqID string, cmd *fluidv1.StopSandboxCommand) *fluidv1.HostMessage { + sandboxID := cmd.GetSandboxId() + + if err := c.prov.StopSandbox(ctx, sandboxID, cmd.GetForce()); err != nil { + return errorResponse(reqID, sandboxID, fmt.Sprintf("stop: %v", err)) + } + + return &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_SandboxStopped{ + SandboxStopped: &fluidv1.SandboxStopped{ + SandboxId: sandboxID, + State: "STOPPED", + }, + }, + } +} + +func (c *Client) handleRunCommand(ctx context.Context, reqID string, cmd *fluidv1.RunCommandCommand) *fluidv1.HostMessage { + sandboxID := cmd.GetSandboxId() + command := cmd.GetCommand() + + c.logger.Info("running command", "sandbox_id", sandboxID, "command", command) + + timeout := time.Duration(cmd.GetTimeoutSeconds()) * time.Second + + result, err := c.prov.RunCommand(ctx, sandboxID, command, timeout) + if err != nil { + return errorResponse(reqID, sandboxID, fmt.Sprintf("run command: %v", err)) + } + + return &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_CommandResult{ + CommandResult: &fluidv1.CommandResult{ + SandboxId: sandboxID, + Stdout: result.Stdout, + Stderr: result.Stderr, + ExitCode: int32(result.ExitCode), + DurationMs: result.DurationMS, + }, + }, + } +} + +func (c *Client) handleCreateSnapshot(ctx context.Context, reqID string, cmd *fluidv1.SnapshotCommand) *fluidv1.HostMessage { + sandboxID := cmd.GetSandboxId() + name := cmd.GetSnapshotName() + + result, err := c.prov.CreateSnapshot(ctx, sandboxID, name) + if err != nil { + return errorResponse(reqID, sandboxID, fmt.Sprintf("create snapshot: %v", err)) + } + + return &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_SnapshotCreated{ + SnapshotCreated: &fluidv1.SnapshotCreated{ + SandboxId: sandboxID, + SnapshotId: result.SnapshotID, + SnapshotName: result.SnapshotName, + }, + }, + } +} + +// --------------------------------------------------------------------------- +// Source VM command handlers +// --------------------------------------------------------------------------- + +func (c *Client) handlePrepareSourceVM(ctx context.Context, reqID string, cmd *fluidv1.PrepareSourceVMCommand) *fluidv1.HostMessage { + vmName := cmd.GetSourceVm() + + result, err := c.prov.PrepareSourceVM(ctx, vmName, cmd.GetSshUser(), cmd.GetSshKeyPath()) + if err != nil { + return errorResponse(reqID, "", fmt.Sprintf("prepare source VM %s: %v", vmName, err)) + } + + return &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_SourceVmPrepared{ + SourceVmPrepared: &fluidv1.SourceVMPrepared{ + SourceVm: result.SourceVM, + IpAddress: result.IPAddress, + Prepared: result.Prepared, + UserCreated: result.UserCreated, + ShellInstalled: result.ShellInstalled, + CaKeyInstalled: result.CAKeyInstalled, + SshdConfigured: result.SSHDConfigured, + PrincipalsCreated: result.PrincipalsCreated, + SshdRestarted: result.SSHDRestarted, + }, + }, + } +} + +func (c *Client) handleRunSourceCommand(ctx context.Context, reqID string, cmd *fluidv1.RunSourceCommandCommand) *fluidv1.HostMessage { + vmName := cmd.GetSourceVm() + command := cmd.GetCommand() + + timeout := time.Duration(cmd.GetTimeoutSeconds()) * time.Second + + result, err := c.prov.RunSourceCommand(ctx, vmName, command, timeout) + if err != nil { + return errorResponse(reqID, "", fmt.Sprintf("run source command: %v", err)) + } + + return &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_SourceCommandResult{ + SourceCommandResult: &fluidv1.SourceCommandResult{ + SourceVm: vmName, + ExitCode: int32(result.ExitCode), + Stdout: result.Stdout, + Stderr: result.Stderr, + }, + }, + } +} + +func (c *Client) handleReadSourceFile(ctx context.Context, reqID string, cmd *fluidv1.ReadSourceFileCommand) *fluidv1.HostMessage { + vmName := cmd.GetSourceVm() + + content, err := c.prov.ReadSourceFile(ctx, vmName, cmd.GetPath()) + if err != nil { + return errorResponse(reqID, "", fmt.Sprintf("read source file: %v", err)) + } + + return &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_SourceFileResult{ + SourceFileResult: &fluidv1.SourceFileResult{ + SourceVm: vmName, + Path: cmd.GetPath(), + Content: content, + }, + }, + } +} + +func (c *Client) handleListSourceVMs(ctx context.Context, reqID string) *fluidv1.HostMessage { + vms, err := c.prov.ListSourceVMs(ctx) + if err != nil { + return errorResponse(reqID, "", fmt.Sprintf("list VMs: %v", err)) + } + + entries := make([]*fluidv1.SourceVMListEntry, len(vms)) + for i, vm := range vms { + entries[i] = &fluidv1.SourceVMListEntry{ + Name: vm.Name, + State: vm.State, + IpAddress: vm.IPAddress, + Prepared: vm.Prepared, + } + } + + return &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_SourceVmsList{ + SourceVmsList: &fluidv1.SourceVMsList{ + Vms: entries, + }, + }, + } +} + +func (c *Client) handleValidateSourceVM(ctx context.Context, reqID string, cmd *fluidv1.ValidateSourceVMCommand) *fluidv1.HostMessage { + vmName := cmd.GetSourceVm() + + result, err := c.prov.ValidateSourceVM(ctx, vmName) + if err != nil { + return errorResponse(reqID, "", fmt.Sprintf("validate source VM: %v", err)) + } + + return &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_SourceVmValidation{ + SourceVmValidation: &fluidv1.SourceVMValidation{ + SourceVm: result.VMName, + Valid: result.Valid, + State: result.State, + MacAddress: result.MACAddress, + IpAddress: result.IPAddress, + HasNetwork: result.HasNetwork, + Warnings: result.Warnings, + Errors: result.Errors, + }, + }, + } +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +func (c *Client) handleDiscoverHosts(ctx context.Context, reqID string, cmd *fluidv1.DiscoverHostsCommand) *fluidv1.HostMessage { + c.logger.Info("discovering hosts from SSH config") + + hosts, err := sshconfig.Parse(strings.NewReader(cmd.GetSshConfigContent())) + if err != nil { + return errorResponse(reqID, "", fmt.Sprintf("parse ssh config: %v", err)) + } + + probeResults := sshconfig.ProbeAll(ctx, hosts) + + discovered := make([]*fluidv1.DiscoveredHost, 0, len(probeResults)) + for _, pr := range probeResults { + discovered = append(discovered, &fluidv1.DiscoveredHost{ + Name: pr.Host.Name, + Hostname: pr.Host.HostName, + User: pr.Host.User, + Port: int32(pr.Host.Port), + IdentityFile: pr.Host.IdentityFile, + Reachable: pr.Reachable, + HasLibvirt: pr.HasLibvirt, + HasProxmox: pr.HasProxmox, + Vms: pr.VMs, + Error: pr.Error, + }) + } + + return &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_DiscoverHostsResult{ + DiscoverHostsResult: &fluidv1.DiscoverHostsResult{ + Hosts: discovered, + }, + }, + } +} + +// buildTLSCredentials constructs gRPC transport credentials from the client's +// TLS configuration. If cert/key are provided, loads a client certificate. +// If a CA file is provided, uses it for server verification. Otherwise falls +// back to the system certificate pool. +func (c *Client) buildTLSCredentials() (credentials.TransportCredentials, error) { + tlsCfg := &tls.Config{} + + if c.certFile != "" && c.keyFile != "" { + cert, err := tls.LoadX509KeyPair(c.certFile, c.keyFile) + if err != nil { + return nil, fmt.Errorf("load client cert/key: %w", err) + } + tlsCfg.Certificates = []tls.Certificate{cert} + } + + if c.caFile != "" { + caPEM, err := os.ReadFile(c.caFile) + if err != nil { + return nil, fmt.Errorf("read CA file: %w", err) + } + pool := x509.NewCertPool() + if !pool.AppendCertsFromPEM(caPEM) { + return nil, fmt.Errorf("failed to parse CA certificate") + } + tlsCfg.RootCAs = pool + } + + return credentials.NewTLS(tlsCfg), nil +} + +// errorResponse builds an ErrorReport HostMessage. +func errorResponse(reqID, sandboxID, errMsg string) *fluidv1.HostMessage { + return &fluidv1.HostMessage{ + RequestId: reqID, + Payload: &fluidv1.HostMessage_ErrorReport{ + ErrorReport: &fluidv1.ErrorReport{ + Error: errMsg, + SandboxId: sandboxID, + }, + }, + } +} diff --git a/fluid-daemon/internal/agent/reconnect.go b/fluid-daemon/internal/agent/reconnect.go new file mode 100644 index 00000000..def5426c --- /dev/null +++ b/fluid-daemon/internal/agent/reconnect.go @@ -0,0 +1,70 @@ +package agent + +import ( + "context" + "log/slog" + "math" + "time" +) + +// connectFunc is a function that establishes a connection and runs until +// it fails or the context is done. +type connectFunc func(ctx context.Context) error + +// RunWithReconnect calls connectFn in a loop with exponential backoff. +// It returns only when ctx is cancelled. +func RunWithReconnect(ctx context.Context, logger *slog.Logger, connectFn connectFunc) error { + const ( + initialBackoff = 1 * time.Second + maxBackoff = 60 * time.Second + backoffFactor = 2.0 + ) + + backoff := initialBackoff + attempt := 0 + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + attempt++ + logger.Info("connecting to control plane", "attempt", attempt) + + connStart := time.Now() + err := connectFn(ctx) + if err == nil { + // Clean disconnect (e.g., context cancelled during serve). + return nil + } + + // Check if the context was cancelled (normal shutdown). + if ctx.Err() != nil { + return ctx.Err() + } + + logger.Error("connection lost", "error", err, "attempt", attempt, "backoff", backoff) + + // Reset backoff after a successful connection that lasted > 5 minutes. + // This means the connection was stable, so next failure should start + // with a short backoff. + if time.Since(connStart) > 5*time.Minute { + backoff = initialBackoff + } + + // Wait with backoff before reconnecting. + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(backoff): + } + + // Increase backoff with cap. + backoff = time.Duration(math.Min( + float64(backoff)*backoffFactor, + float64(maxBackoff), + )) + } +} diff --git a/fluid-daemon/internal/agent/reconnect_test.go b/fluid-daemon/internal/agent/reconnect_test.go new file mode 100644 index 00000000..7d499a20 --- /dev/null +++ b/fluid-daemon/internal/agent/reconnect_test.go @@ -0,0 +1,130 @@ +package agent + +import ( + "context" + "errors" + "log/slog" + "math" + "sync/atomic" + "testing" + "time" +) + +func TestRunWithReconnect_ContextCancelled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() // cancel immediately + + connectFn := func(ctx context.Context) error { + return errors.New("should not be called meaningfully") + } + + err := RunWithReconnect(ctx, slog.Default(), connectFn) + if !errors.Is(err, context.Canceled) { + t.Fatalf("expected context.Canceled, got %v", err) + } +} + +func TestRunWithReconnect_SuccessfulConnect(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + connectFn := func(ctx context.Context) error { + return nil // success on first call + } + + err := RunWithReconnect(ctx, slog.Default(), connectFn) + if err != nil { + t.Fatalf("expected nil, got %v", err) + } +} + +func TestRunWithReconnect_RetriesOnError(t *testing.T) { + var callCount atomic.Int32 + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + connectFn := func(ctx context.Context) error { + n := callCount.Add(1) + if n < 3 { + return errors.New("connection failed") + } + return nil // succeed on 3rd attempt + } + + // Override timing: we cancel context after connectFn succeeds, so + // backoff waits are the bottleneck. The first two failures will incur + // backoff waits of 1s and 2s respectively from the production code. + // We use a generous timeout above to accommodate that. + err := RunWithReconnect(ctx, slog.Default(), connectFn) + if err != nil { + t.Fatalf("expected nil after retries, got %v", err) + } + + got := callCount.Load() + if got != 3 { + t.Fatalf("expected 3 calls, got %d", got) + } +} + +func TestRunWithReconnect_BackoffCap(t *testing.T) { + // Verify the backoff math: starting at 1s, doubling, capped at 60s. + // This tests the algorithm without waiting for actual backoff durations. + const ( + initialBackoff = 1 * time.Second + maxBackoff = 60 * time.Second + backoffFactor = 2.0 + ) + + backoff := initialBackoff + expected := []time.Duration{ + 1 * time.Second, + 2 * time.Second, + 4 * time.Second, + 8 * time.Second, + 16 * time.Second, + 32 * time.Second, + 60 * time.Second, // capped + 60 * time.Second, // stays capped + } + + for i, want := range expected { + if backoff != want { + t.Fatalf("step %d: expected backoff %v, got %v", i, want, backoff) + } + // Apply the same backoff calculation as RunWithReconnect + backoff = time.Duration(math.Min( + float64(backoff)*backoffFactor, + float64(maxBackoff), + )) + } +} + +func TestRunWithReconnect_CancelDuringBackoff(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + var callCount atomic.Int32 + + connectFn := func(ctx context.Context) error { + n := callCount.Add(1) + if n == 1 { + // After first failure, cancel context so RunWithReconnect + // exits during the backoff wait. + go func() { + time.Sleep(50 * time.Millisecond) + cancel() + }() + } + return errors.New("connection failed") + } + + err := RunWithReconnect(ctx, slog.Default(), connectFn) + if !errors.Is(err, context.Canceled) { + t.Fatalf("expected context.Canceled, got %v", err) + } + + got := callCount.Load() + if got < 1 { + t.Fatalf("expected at least 1 call, got %d", got) + } +} diff --git a/fluid-daemon/internal/config/config.go b/fluid-daemon/internal/config/config.go new file mode 100644 index 00000000..6c5ca8c7 --- /dev/null +++ b/fluid-daemon/internal/config/config.go @@ -0,0 +1,265 @@ +package config + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "gopkg.in/yaml.v3" +) + +// Config holds all configuration for the sandbox host daemon. +type Config struct { + // HostID is a persistent identifier for this host. Generated on first run. + HostID string `yaml:"host_id"` + + // Provider selects the sandbox provider: "microvm" (default) or "lxc". + Provider string `yaml:"provider"` + + // Daemon configures the inbound gRPC server for CLI access. + Daemon DaemonConfig `yaml:"daemon"` + + // ControlPlane configures the connection to the control plane. + ControlPlane ControlPlaneConfig `yaml:"control_plane"` + + // MicroVM configures QEMU microVM defaults. + MicroVM MicroVMConfig `yaml:"microvm"` + + // Network configures bridge and TAP networking. + Network NetworkConfig `yaml:"network"` + + // Image configures base image storage. + Image ImageConfig `yaml:"image"` + + // SSH configures SSH CA and key management. + SSH SSHConfig `yaml:"ssh"` + + // Libvirt configures libvirt access for source VM operations. + Libvirt LibvirtConfig `yaml:"libvirt"` + + // LXC configures Proxmox LXC container management (only used when provider: lxc). + LXC LXCConfig `yaml:"lxc"` + + // State configures local state storage. + State StateConfig `yaml:"state"` + + // Janitor configures TTL enforcement. + Janitor JanitorConfig `yaml:"janitor"` +} + +// DaemonConfig configures the inbound gRPC server for direct CLI access. +type DaemonConfig struct { + // ListenAddr is the address the daemon gRPC server listens on. + ListenAddr string `yaml:"listen_addr"` + + // Enabled controls whether the daemon gRPC server starts. + Enabled bool `yaml:"enabled"` + + // TLSCertFile is the path to the TLS certificate for the daemon gRPC server. + TLSCertFile string `yaml:"tls_cert_file"` + + // TLSKeyFile is the path to the TLS key for the daemon gRPC server. + TLSKeyFile string `yaml:"tls_key_file"` +} + +// LXCConfig configures LXC provider settings for Proxmox. +type LXCConfig struct { + Host string `yaml:"host"` + TokenID string `yaml:"token_id"` + Secret string `yaml:"secret"` + Node string `yaml:"node"` + Storage string `yaml:"storage"` + Bridge string `yaml:"bridge"` + VMIDStart int `yaml:"vmid_start"` + VMIDEnd int `yaml:"vmid_end"` + VerifySSL bool `yaml:"verify_ssl"` + Timeout time.Duration `yaml:"timeout"` +} + +// ControlPlaneConfig configures the gRPC connection to the control plane. +type ControlPlaneConfig struct { + // Address is the control plane gRPC endpoint (host:port). + Address string `yaml:"address"` + + // Token is the authentication token for the control plane. + Token string `yaml:"token"` + + // TLS configures mTLS for the connection. + CertFile string `yaml:"cert_file"` + KeyFile string `yaml:"key_file"` + CAFile string `yaml:"ca_file"` + + // Insecure disables TLS (for development). + Insecure bool `yaml:"insecure"` +} + +// MicroVMConfig configures QEMU microVM defaults. +type MicroVMConfig struct { + // QEMUBinary is the path to qemu-system-x86_64. + QEMUBinary string `yaml:"qemu_binary"` + + // WorkDir is the directory for sandbox runtime data (overlays, PID files). + WorkDir string `yaml:"work_dir"` + + // DefaultVCPUs is the default number of vCPUs per sandbox. + DefaultVCPUs int `yaml:"default_vcpus"` + + // DefaultMemoryMB is the default memory per sandbox in MB. + DefaultMemoryMB int `yaml:"default_memory_mb"` + + // CommandTimeout is the default command execution timeout. + CommandTimeout time.Duration `yaml:"command_timeout"` + + // IPDiscoveryTimeout is how long to wait for IP discovery. + IPDiscoveryTimeout time.Duration `yaml:"ip_discovery_timeout"` +} + +// NetworkConfig configures networking for sandboxes. +type NetworkConfig struct { + // DefaultBridge is the default bridge for sandboxes. + DefaultBridge string `yaml:"default_bridge"` + + // BridgeMap maps libvirt network names to local bridge names. + BridgeMap map[string]string `yaml:"bridge_map"` + + // DHCPMode determines IP discovery strategy: "libvirt", "arp", or "dnsmasq". + DHCPMode string `yaml:"dhcp_mode"` +} + +// ImageConfig configures base image storage and management. +type ImageConfig struct { + // BaseDir is the directory containing base QCOW2 images. + BaseDir string `yaml:"base_dir"` +} + +// SSHConfig configures SSH CA and key management. +type SSHConfig struct { + // CAKeyPath is the path to the SSH CA private key. + CAKeyPath string `yaml:"ca_key_path"` + + // CAPubKeyPath is the path to the SSH CA public key. + CAPubKeyPath string `yaml:"ca_pub_key_path"` + + // KeyDir is the directory for ephemeral SSH keys. + KeyDir string `yaml:"key_dir"` + + // CertTTL is the lifetime of issued SSH certificates. + CertTTL time.Duration `yaml:"cert_ttl"` + + // DefaultUser is the default SSH user for sandbox access. + DefaultUser string `yaml:"default_user"` + + // ProxyJump is an optional SSH proxy jump host. + ProxyJump string `yaml:"proxy_jump"` +} + +// LibvirtConfig configures libvirt access for source VM operations. +type LibvirtConfig struct { + // URI is the libvirt connection URI (e.g., "qemu:///system"). + URI string `yaml:"uri"` + + // Network is the default libvirt network name. + Network string `yaml:"network"` +} + +// StateConfig configures local state storage. +type StateConfig struct { + // DBPath is the path to the SQLite database file. + DBPath string `yaml:"db_path"` +} + +// JanitorConfig configures TTL enforcement. +type JanitorConfig struct { + // Interval is how often the janitor runs. + Interval time.Duration `yaml:"interval"` + + // DefaultTTL is the default sandbox TTL if none is specified. + DefaultTTL time.Duration `yaml:"default_ttl"` +} + +// DefaultConfig returns a configuration with sensible defaults. +func DefaultConfig() Config { + home, _ := os.UserHomeDir() + fluidDir := filepath.Join(home, ".fluid") + + return Config{ + Daemon: DaemonConfig{ + ListenAddr: ":9091", + Enabled: true, + }, + ControlPlane: ControlPlaneConfig{ + Address: "localhost:9090", + Insecure: true, + }, + MicroVM: MicroVMConfig{ + QEMUBinary: "qemu-system-x86_64", + WorkDir: "/var/lib/fluid/sandboxes", + DefaultVCPUs: 2, + DefaultMemoryMB: 2048, + CommandTimeout: 5 * time.Minute, + IPDiscoveryTimeout: 2 * time.Minute, + }, + Network: NetworkConfig{ + DefaultBridge: "virbr0", + BridgeMap: map[string]string{ + "default": "virbr0", + }, + DHCPMode: "arp", + }, + Image: ImageConfig{ + BaseDir: "/var/lib/fluid/images", + }, + SSH: SSHConfig{ + CAKeyPath: filepath.Join(fluidDir, "ssh_ca"), + CAPubKeyPath: filepath.Join(fluidDir, "ssh_ca.pub"), + KeyDir: filepath.Join(fluidDir, "keys"), + CertTTL: 30 * time.Minute, + DefaultUser: "sandbox", + }, + Libvirt: LibvirtConfig{ + URI: "qemu:///system", + Network: "default", + }, + State: StateConfig{ + DBPath: filepath.Join(fluidDir, "sandbox-host.db"), + }, + Janitor: JanitorConfig{ + Interval: 1 * time.Minute, + DefaultTTL: 24 * time.Hour, + }, + } +} + +// Load reads configuration from a YAML file, falling back to defaults. +func Load(path string) (*Config, error) { + cfg := DefaultConfig() + + data, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return &cfg, nil + } + return nil, fmt.Errorf("read config: %w", err) + } + + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("parse config: %w", err) + } + + return &cfg, nil +} + +// Save writes the configuration to a YAML file. +func Save(path string, cfg *Config) error { + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return fmt.Errorf("create config dir: %w", err) + } + + data, err := yaml.Marshal(cfg) + if err != nil { + return fmt.Errorf("marshal config: %w", err) + } + + return os.WriteFile(path, data, 0o644) +} diff --git a/fluid-daemon/internal/config/config_test.go b/fluid-daemon/internal/config/config_test.go new file mode 100644 index 00000000..3452f21a --- /dev/null +++ b/fluid-daemon/internal/config/config_test.go @@ -0,0 +1,344 @@ +package config + +import ( + "os" + "path/filepath" + "testing" + "time" +) + +func TestDefaultConfig(t *testing.T) { + cfg := DefaultConfig() + + // ControlPlane defaults + if cfg.ControlPlane.Address != "localhost:9090" { + t.Errorf("ControlPlane.Address = %q, want %q", cfg.ControlPlane.Address, "localhost:9090") + } + if !cfg.ControlPlane.Insecure { + t.Error("ControlPlane.Insecure = false, want true") + } + + // MicroVM defaults + if cfg.MicroVM.QEMUBinary != "qemu-system-x86_64" { + t.Errorf("MicroVM.QEMUBinary = %q, want %q", cfg.MicroVM.QEMUBinary, "qemu-system-x86_64") + } + if cfg.MicroVM.WorkDir != "/var/lib/fluid/sandboxes" { + t.Errorf("MicroVM.WorkDir = %q, want %q", cfg.MicroVM.WorkDir, "/var/lib/fluid/sandboxes") + } + if cfg.MicroVM.DefaultVCPUs != 2 { + t.Errorf("MicroVM.DefaultVCPUs = %d, want %d", cfg.MicroVM.DefaultVCPUs, 2) + } + if cfg.MicroVM.DefaultMemoryMB != 2048 { + t.Errorf("MicroVM.DefaultMemoryMB = %d, want %d", cfg.MicroVM.DefaultMemoryMB, 2048) + } + if cfg.MicroVM.CommandTimeout != 5*time.Minute { + t.Errorf("MicroVM.CommandTimeout = %v, want %v", cfg.MicroVM.CommandTimeout, 5*time.Minute) + } + if cfg.MicroVM.IPDiscoveryTimeout != 2*time.Minute { + t.Errorf("MicroVM.IPDiscoveryTimeout = %v, want %v", cfg.MicroVM.IPDiscoveryTimeout, 2*time.Minute) + } + + // Network defaults + if cfg.Network.DefaultBridge != "virbr0" { + t.Errorf("Network.DefaultBridge = %q, want %q", cfg.Network.DefaultBridge, "virbr0") + } + if cfg.Network.DHCPMode != "arp" { + t.Errorf("Network.DHCPMode = %q, want %q", cfg.Network.DHCPMode, "arp") + } + if v, ok := cfg.Network.BridgeMap["default"]; !ok || v != "virbr0" { + t.Errorf("Network.BridgeMap[\"default\"] = %q (ok=%v), want %q", v, ok, "virbr0") + } + + // Image defaults + if cfg.Image.BaseDir != "/var/lib/fluid/images" { + t.Errorf("Image.BaseDir = %q, want %q", cfg.Image.BaseDir, "/var/lib/fluid/images") + } + + // SSH defaults + home, _ := os.UserHomeDir() + fluidDir := filepath.Join(home, ".fluid") + if cfg.SSH.CAKeyPath != filepath.Join(fluidDir, "ssh_ca") { + t.Errorf("SSH.CAKeyPath = %q, want %q", cfg.SSH.CAKeyPath, filepath.Join(fluidDir, "ssh_ca")) + } + if cfg.SSH.CAPubKeyPath != filepath.Join(fluidDir, "ssh_ca.pub") { + t.Errorf("SSH.CAPubKeyPath = %q, want %q", cfg.SSH.CAPubKeyPath, filepath.Join(fluidDir, "ssh_ca.pub")) + } + if cfg.SSH.KeyDir != filepath.Join(fluidDir, "keys") { + t.Errorf("SSH.KeyDir = %q, want %q", cfg.SSH.KeyDir, filepath.Join(fluidDir, "keys")) + } + if cfg.SSH.CertTTL != 30*time.Minute { + t.Errorf("SSH.CertTTL = %v, want %v", cfg.SSH.CertTTL, 30*time.Minute) + } + if cfg.SSH.DefaultUser != "sandbox" { + t.Errorf("SSH.DefaultUser = %q, want %q", cfg.SSH.DefaultUser, "sandbox") + } + + // Libvirt defaults + if cfg.Libvirt.URI != "qemu:///system" { + t.Errorf("Libvirt.URI = %q, want %q", cfg.Libvirt.URI, "qemu:///system") + } + if cfg.Libvirt.Network != "default" { + t.Errorf("Libvirt.Network = %q, want %q", cfg.Libvirt.Network, "default") + } + + // State defaults + if cfg.State.DBPath != filepath.Join(fluidDir, "sandbox-host.db") { + t.Errorf("State.DBPath = %q, want %q", cfg.State.DBPath, filepath.Join(fluidDir, "sandbox-host.db")) + } + + // Janitor defaults + if cfg.Janitor.Interval != 1*time.Minute { + t.Errorf("Janitor.Interval = %v, want %v", cfg.Janitor.Interval, 1*time.Minute) + } + if cfg.Janitor.DefaultTTL != 24*time.Hour { + t.Errorf("Janitor.DefaultTTL = %v, want %v", cfg.Janitor.DefaultTTL, 24*time.Hour) + } +} + +func TestLoad_MissingFile(t *testing.T) { + path := filepath.Join(t.TempDir(), "nonexistent.yaml") + + cfg, err := Load(path) + if err != nil { + t.Fatalf("Load(%q) returned error: %v", path, err) + } + + // Should return defaults + defaults := DefaultConfig() + if cfg.MicroVM.DefaultVCPUs != defaults.MicroVM.DefaultVCPUs { + t.Errorf("DefaultVCPUs = %d, want default %d", cfg.MicroVM.DefaultVCPUs, defaults.MicroVM.DefaultVCPUs) + } + if cfg.Network.DefaultBridge != defaults.Network.DefaultBridge { + t.Errorf("DefaultBridge = %q, want default %q", cfg.Network.DefaultBridge, defaults.Network.DefaultBridge) + } + if cfg.ControlPlane.Address != defaults.ControlPlane.Address { + t.Errorf("ControlPlane.Address = %q, want default %q", cfg.ControlPlane.Address, defaults.ControlPlane.Address) + } +} + +func TestLoad_ValidYAML(t *testing.T) { + tests := []struct { + name string + yaml string + check func(t *testing.T, cfg *Config) + }{ + { + name: "override control plane address", + yaml: `control_plane: + address: "cp.example.com:443" + insecure: false +`, + check: func(t *testing.T, cfg *Config) { + if cfg.ControlPlane.Address != "cp.example.com:443" { + t.Errorf("ControlPlane.Address = %q, want %q", cfg.ControlPlane.Address, "cp.example.com:443") + } + if cfg.ControlPlane.Insecure { + t.Error("ControlPlane.Insecure = true, want false") + } + }, + }, + { + name: "override microvm settings", + yaml: `microvm: + default_vcpus: 4 + default_memory_mb: 4096 + qemu_binary: /usr/local/bin/qemu-system-x86_64 +`, + check: func(t *testing.T, cfg *Config) { + if cfg.MicroVM.DefaultVCPUs != 4 { + t.Errorf("DefaultVCPUs = %d, want %d", cfg.MicroVM.DefaultVCPUs, 4) + } + if cfg.MicroVM.DefaultMemoryMB != 4096 { + t.Errorf("DefaultMemoryMB = %d, want %d", cfg.MicroVM.DefaultMemoryMB, 4096) + } + if cfg.MicroVM.QEMUBinary != "/usr/local/bin/qemu-system-x86_64" { + t.Errorf("QEMUBinary = %q, want %q", cfg.MicroVM.QEMUBinary, "/usr/local/bin/qemu-system-x86_64") + } + }, + }, + { + name: "override network bridge", + yaml: `network: + default_bridge: br0 + dhcp_mode: dnsmasq + bridge_map: + production: br0 +`, + check: func(t *testing.T, cfg *Config) { + if cfg.Network.DefaultBridge != "br0" { + t.Errorf("DefaultBridge = %q, want %q", cfg.Network.DefaultBridge, "br0") + } + if cfg.Network.DHCPMode != "dnsmasq" { + t.Errorf("DHCPMode = %q, want %q", cfg.Network.DHCPMode, "dnsmasq") + } + if v, ok := cfg.Network.BridgeMap["production"]; !ok || v != "br0" { + t.Errorf("BridgeMap[\"production\"] = %q (ok=%v), want %q", v, ok, "br0") + } + }, + }, + { + name: "override janitor settings", + yaml: `janitor: + interval: 5m + default_ttl: 48h +`, + check: func(t *testing.T, cfg *Config) { + if cfg.Janitor.Interval != 5*time.Minute { + t.Errorf("Janitor.Interval = %v, want %v", cfg.Janitor.Interval, 5*time.Minute) + } + if cfg.Janitor.DefaultTTL != 48*time.Hour { + t.Errorf("Janitor.DefaultTTL = %v, want %v", cfg.Janitor.DefaultTTL, 48*time.Hour) + } + }, + }, + { + name: "partial override preserves defaults", + yaml: `microvm: + default_vcpus: 8 +`, + check: func(t *testing.T, cfg *Config) { + if cfg.MicroVM.DefaultVCPUs != 8 { + t.Errorf("DefaultVCPUs = %d, want %d", cfg.MicroVM.DefaultVCPUs, 8) + } + // Non-overridden fields keep defaults + if cfg.Network.DefaultBridge != "virbr0" { + t.Errorf("DefaultBridge = %q, want default %q", cfg.Network.DefaultBridge, "virbr0") + } + if cfg.MicroVM.DefaultMemoryMB != 2048 { + t.Errorf("DefaultMemoryMB = %d, want default %d", cfg.MicroVM.DefaultMemoryMB, 2048) + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "config.yaml") + + if err := os.WriteFile(path, []byte(tt.yaml), 0o644); err != nil { + t.Fatalf("write test yaml: %v", err) + } + + cfg, err := Load(path) + if err != nil { + t.Fatalf("Load(%q) returned error: %v", path, err) + } + + tt.check(t, cfg) + }) + } +} + +func TestLoad_InvalidYAML(t *testing.T) { + tests := []struct { + name string + content string + }{ + { + name: "malformed yaml", + content: ":\n\t- :\n\t\t invalid: [", + }, + { + name: "tabs instead of spaces", + content: "control_plane:\n\taddress: bad", + }, + { + name: "wrong type for integer field", + content: "microvm:\n default_vcpus: not_a_number", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "bad.yaml") + + if err := os.WriteFile(path, []byte(tt.content), 0o644); err != nil { + t.Fatalf("write test yaml: %v", err) + } + + _, err := Load(path) + if err == nil { + t.Fatalf("Load(%q) expected error for invalid YAML, got nil", path) + } + }) + } +} + +func TestSave_RoundTrip(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "subdir", "config.yaml") + + original := DefaultConfig() + original.HostID = "test-host-001" + original.ControlPlane.Address = "cp.example.com:9090" + original.ControlPlane.Insecure = false + original.MicroVM.DefaultVCPUs = 4 + original.MicroVM.DefaultMemoryMB = 8192 + original.Network.DefaultBridge = "br-custom" + original.Network.BridgeMap = map[string]string{ + "default": "virbr0", + "custom": "br-custom", + } + original.SSH.DefaultUser = "admin" + original.Libvirt.URI = "qemu+ssh://user@host/system" + original.Janitor.DefaultTTL = 12 * time.Hour + + // Save + if err := Save(path, &original); err != nil { + t.Fatalf("Save() error: %v", err) + } + + // Verify file was created (Save creates intermediate dirs) + if _, err := os.Stat(path); err != nil { + t.Fatalf("saved file does not exist: %v", err) + } + + // Load back + loaded, err := Load(path) + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + // Verify all overridden fields round-trip correctly + if loaded.HostID != original.HostID { + t.Errorf("HostID = %q, want %q", loaded.HostID, original.HostID) + } + if loaded.ControlPlane.Address != original.ControlPlane.Address { + t.Errorf("ControlPlane.Address = %q, want %q", loaded.ControlPlane.Address, original.ControlPlane.Address) + } + if loaded.ControlPlane.Insecure != original.ControlPlane.Insecure { + t.Errorf("ControlPlane.Insecure = %v, want %v", loaded.ControlPlane.Insecure, original.ControlPlane.Insecure) + } + if loaded.MicroVM.DefaultVCPUs != original.MicroVM.DefaultVCPUs { + t.Errorf("DefaultVCPUs = %d, want %d", loaded.MicroVM.DefaultVCPUs, original.MicroVM.DefaultVCPUs) + } + if loaded.MicroVM.DefaultMemoryMB != original.MicroVM.DefaultMemoryMB { + t.Errorf("DefaultMemoryMB = %d, want %d", loaded.MicroVM.DefaultMemoryMB, original.MicroVM.DefaultMemoryMB) + } + if loaded.Network.DefaultBridge != original.Network.DefaultBridge { + t.Errorf("DefaultBridge = %q, want %q", loaded.Network.DefaultBridge, original.Network.DefaultBridge) + } + if loaded.Network.DHCPMode != original.Network.DHCPMode { + t.Errorf("DHCPMode = %q, want %q", loaded.Network.DHCPMode, original.Network.DHCPMode) + } + if v, ok := loaded.Network.BridgeMap["custom"]; !ok || v != "br-custom" { + t.Errorf("BridgeMap[\"custom\"] = %q (ok=%v), want %q", v, ok, "br-custom") + } + if loaded.SSH.DefaultUser != original.SSH.DefaultUser { + t.Errorf("SSH.DefaultUser = %q, want %q", loaded.SSH.DefaultUser, original.SSH.DefaultUser) + } + if loaded.Libvirt.URI != original.Libvirt.URI { + t.Errorf("Libvirt.URI = %q, want %q", loaded.Libvirt.URI, original.Libvirt.URI) + } + if loaded.Janitor.DefaultTTL != original.Janitor.DefaultTTL { + t.Errorf("Janitor.DefaultTTL = %v, want %v", loaded.Janitor.DefaultTTL, original.Janitor.DefaultTTL) + } + if loaded.MicroVM.CommandTimeout != original.MicroVM.CommandTimeout { + t.Errorf("CommandTimeout = %v, want %v", loaded.MicroVM.CommandTimeout, original.MicroVM.CommandTimeout) + } + if loaded.SSH.CertTTL != original.SSH.CertTTL { + t.Errorf("SSH.CertTTL = %v, want %v", loaded.SSH.CertTTL, original.SSH.CertTTL) + } +} diff --git a/fluid-daemon/internal/daemon/server.go b/fluid-daemon/internal/daemon/server.go new file mode 100644 index 00000000..f45b97c4 --- /dev/null +++ b/fluid-daemon/internal/daemon/server.go @@ -0,0 +1,504 @@ +// Package daemon implements the DaemonService gRPC server for direct CLI access. +package daemon + +import ( + "context" + "fmt" + "log/slog" + "os" + "strings" + "time" + + fluidv1 "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1" + + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/provider" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/snapshotpull" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/sshconfig" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/state" + + genid "github.com/aspectrr/fluid.sh/fluid-daemon/internal/id" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Server implements the DaemonServiceServer interface. +type Server struct { + fluidv1.UnimplementedDaemonServiceServer + + prov provider.SandboxProvider + store *state.Store + puller *snapshotpull.Puller + hostID string + version string + logger *slog.Logger +} + +// NewServer creates a new DaemonService server. +func NewServer(prov provider.SandboxProvider, store *state.Store, puller *snapshotpull.Puller, hostID, version string, logger *slog.Logger) *Server { + return &Server{ + prov: prov, + store: store, + puller: puller, + hostID: hostID, + version: version, + logger: logger.With("component", "daemon-service"), + } +} + +func (s *Server) CreateSandbox(ctx context.Context, req *fluidv1.CreateSandboxCommand) (*fluidv1.SandboxCreated, error) { + s.logger.Info("CreateSandbox", "base_image", req.GetBaseImage(), "source_vm", req.GetSourceVm(), "name", req.GetName()) + + sandboxID := req.GetSandboxId() + if sandboxID == "" { + var err error + sandboxID, err = genid.Generate("sbx-") + if err != nil { + return nil, status.Errorf(codes.Internal, "generate sandbox ID: %v", err) + } + } + + vcpus := int(req.GetVcpus()) + if vcpus == 0 { + vcpus = 2 + } + memMB := int(req.GetMemoryMb()) + if memMB == 0 { + memMB = 2048 + } + + // If a source host connection is provided, snapshot+pull the image first + baseImage := req.GetBaseImage() + if conn := req.GetSourceHostConnection(); conn != nil && req.GetSourceVm() != "" && s.puller != nil { + var backend snapshotpull.SnapshotBackend + switch conn.GetType() { + case "libvirt": + backend = snapshotpull.NewLibvirtBackend( + conn.GetSshHost(), int(conn.GetSshPort()), + conn.GetSshUser(), conn.GetSshIdentityFile(), s.logger) + case "proxmox": + backend = snapshotpull.NewProxmoxBackend( + conn.GetProxmoxHost(), conn.GetProxmoxTokenId(), + conn.GetProxmoxSecret(), conn.GetProxmoxNode(), + conn.GetProxmoxVerifySsl(), s.logger) + } + if backend != nil { + mode := "cached" + if req.GetSnapshotMode() == fluidv1.SnapshotMode_SNAPSHOT_MODE_FRESH { + mode = "fresh" + } + pullResult, err := s.puller.Pull(ctx, snapshotpull.PullRequest{ + SourceHost: conn.GetSshHost(), + VMName: req.GetSourceVm(), + SnapshotMode: mode, + }, backend) + if err != nil { + return nil, status.Errorf(codes.Internal, "pull snapshot: %v", err) + } + baseImage = pullResult.ImageName + s.logger.Info("snapshot pulled", "image", baseImage, "cached", pullResult.Cached) + } + } + + result, err := s.prov.CreateSandbox(ctx, provider.CreateRequest{ + SandboxID: sandboxID, + Name: req.GetName(), + BaseImage: baseImage, + SourceVM: req.GetSourceVm(), + Network: req.GetNetwork(), + VCPUs: vcpus, + MemoryMB: memMB, + TTLSeconds: int(req.GetTtlSeconds()), + AgentID: req.GetAgentId(), + SSHPublicKey: req.GetSshPublicKey(), + }) + if err != nil { + s.logger.Error("CreateSandbox failed", "error", err) + return nil, status.Errorf(codes.Internal, "create sandbox: %v", err) + } + + // Persist to state store + now := time.Now().UTC() + sb := &state.Sandbox{ + ID: result.SandboxID, + Name: result.Name, + AgentID: req.GetAgentId(), + BaseImage: baseImage, + Bridge: result.Bridge, + MACAddress: result.MACAddress, + IPAddress: result.IPAddress, + State: result.State, + PID: result.PID, + VCPUs: vcpus, + MemoryMB: memMB, + TTLSeconds: int(req.GetTtlSeconds()), + CreatedAt: now, + UpdatedAt: now, + } + if err := s.store.CreateSandbox(ctx, sb); err != nil { + s.logger.Warn("failed to persist sandbox state", "sandbox_id", result.SandboxID, "error", err) + } + + return &fluidv1.SandboxCreated{ + SandboxId: result.SandboxID, + Name: result.Name, + State: result.State, + IpAddress: result.IPAddress, + MacAddress: result.MACAddress, + Bridge: result.Bridge, + Pid: int32(result.PID), + }, nil +} + +func (s *Server) GetSandbox(ctx context.Context, req *fluidv1.GetSandboxRequest) (*fluidv1.SandboxInfo, error) { + id := req.GetSandboxId() + if id == "" { + return nil, status.Error(codes.InvalidArgument, "sandbox_id is required") + } + + sb, err := s.store.GetSandbox(ctx, id) + if err != nil { + return nil, status.Errorf(codes.NotFound, "sandbox not found: %v", err) + } + + return sandboxToInfo(sb), nil +} + +func (s *Server) ListSandboxes(ctx context.Context, _ *fluidv1.ListSandboxesRequest) (*fluidv1.ListSandboxesResponse, error) { + sandboxes, err := s.store.ListSandboxes(ctx) + if err != nil { + return nil, status.Errorf(codes.Internal, "list sandboxes: %v", err) + } + + infos := make([]*fluidv1.SandboxInfo, 0, len(sandboxes)) + for _, sb := range sandboxes { + infos = append(infos, sandboxToInfo(sb)) + } + + return &fluidv1.ListSandboxesResponse{ + Sandboxes: infos, + Count: int32(len(infos)), + }, nil +} + +func (s *Server) DestroySandbox(ctx context.Context, req *fluidv1.DestroySandboxCommand) (*fluidv1.SandboxDestroyed, error) { + id := req.GetSandboxId() + if id == "" { + return nil, status.Error(codes.InvalidArgument, "sandbox_id is required") + } + + if err := s.prov.DestroySandbox(ctx, id); err != nil { + s.logger.Error("DestroySandbox failed", "sandbox_id", id, "error", err) + return nil, status.Errorf(codes.Internal, "destroy sandbox: %v", err) + } + + if err := s.store.DeleteSandbox(ctx, id); err != nil { + s.logger.Warn("failed to delete sandbox from store", "sandbox_id", id, "error", err) + } + + return &fluidv1.SandboxDestroyed{SandboxId: id}, nil +} + +func (s *Server) StartSandbox(ctx context.Context, req *fluidv1.StartSandboxCommand) (*fluidv1.SandboxStarted, error) { + id := req.GetSandboxId() + if id == "" { + return nil, status.Error(codes.InvalidArgument, "sandbox_id is required") + } + + result, err := s.prov.StartSandbox(ctx, id) + if err != nil { + return nil, status.Errorf(codes.Internal, "start sandbox: %v", err) + } + + // Update state + if sb, err := s.store.GetSandbox(ctx, id); err == nil { + sb.State = result.State + sb.IPAddress = result.IPAddress + sb.UpdatedAt = time.Now().UTC() + if err := s.store.UpdateSandbox(ctx, sb); err != nil { + s.logger.Warn("failed to update sandbox state", "sandbox_id", id, "error", err) + } + } + + return &fluidv1.SandboxStarted{ + SandboxId: id, + State: result.State, + IpAddress: result.IPAddress, + }, nil +} + +func (s *Server) StopSandbox(ctx context.Context, req *fluidv1.StopSandboxCommand) (*fluidv1.SandboxStopped, error) { + id := req.GetSandboxId() + if id == "" { + return nil, status.Error(codes.InvalidArgument, "sandbox_id is required") + } + + if err := s.prov.StopSandbox(ctx, id, req.GetForce()); err != nil { + return nil, status.Errorf(codes.Internal, "stop sandbox: %v", err) + } + + // Update state + if sb, err := s.store.GetSandbox(ctx, id); err == nil { + sb.State = "STOPPED" + sb.UpdatedAt = time.Now().UTC() + if err := s.store.UpdateSandbox(ctx, sb); err != nil { + s.logger.Warn("failed to update sandbox state", "sandbox_id", id, "error", err) + } + } + + return &fluidv1.SandboxStopped{ + SandboxId: id, + State: "STOPPED", + }, nil +} + +func (s *Server) RunCommand(ctx context.Context, req *fluidv1.RunCommandCommand) (*fluidv1.CommandResult, error) { + id := req.GetSandboxId() + if id == "" { + return nil, status.Error(codes.InvalidArgument, "sandbox_id is required") + } + if req.GetCommand() == "" { + return nil, status.Error(codes.InvalidArgument, "command is required") + } + + timeout := time.Duration(req.GetTimeoutSeconds()) * time.Second + if timeout == 0 { + timeout = 5 * time.Minute + } + + result, err := s.prov.RunCommand(ctx, id, req.GetCommand(), timeout) + if err != nil { + return nil, status.Errorf(codes.Internal, "run command: %v", err) + } + + // Record command in state + cmdID, _ := genid.GenerateRaw() + cmdRecord := &state.Command{ + ID: cmdID, + SandboxID: id, + Command: req.GetCommand(), + Stdout: result.Stdout, + Stderr: result.Stderr, + ExitCode: result.ExitCode, + DurationMS: result.DurationMS, + StartedAt: time.Now().UTC().Add(-time.Duration(result.DurationMS) * time.Millisecond), + EndedAt: time.Now().UTC(), + } + _ = s.store.CreateCommand(ctx, cmdRecord) + + return &fluidv1.CommandResult{ + SandboxId: id, + Stdout: result.Stdout, + Stderr: result.Stderr, + ExitCode: int32(result.ExitCode), + DurationMs: result.DurationMS, + }, nil +} + +func (s *Server) CreateSnapshot(ctx context.Context, req *fluidv1.SnapshotCommand) (*fluidv1.SnapshotCreated, error) { + id := req.GetSandboxId() + if id == "" { + return nil, status.Error(codes.InvalidArgument, "sandbox_id is required") + } + + name := req.GetSnapshotName() + if name == "" { + name = fmt.Sprintf("snap-%d", time.Now().Unix()) + } + + result, err := s.prov.CreateSnapshot(ctx, id, name) + if err != nil { + return nil, status.Errorf(codes.Internal, "create snapshot: %v", err) + } + + return &fluidv1.SnapshotCreated{ + SandboxId: id, + SnapshotId: result.SnapshotID, + SnapshotName: result.SnapshotName, + }, nil +} + +func (s *Server) ListSourceVMs(ctx context.Context, _ *fluidv1.ListSourceVMsCommand) (*fluidv1.SourceVMsList, error) { + vms, err := s.prov.ListSourceVMs(ctx) + if err != nil { + return nil, status.Errorf(codes.Internal, "list source VMs: %v", err) + } + + entries := make([]*fluidv1.SourceVMListEntry, 0, len(vms)) + for _, vm := range vms { + entries = append(entries, &fluidv1.SourceVMListEntry{ + Name: vm.Name, + State: vm.State, + IpAddress: vm.IPAddress, + Prepared: vm.Prepared, + }) + } + + return &fluidv1.SourceVMsList{Vms: entries}, nil +} + +func (s *Server) ValidateSourceVM(ctx context.Context, req *fluidv1.ValidateSourceVMCommand) (*fluidv1.SourceVMValidation, error) { + if req.GetSourceVm() == "" { + return nil, status.Error(codes.InvalidArgument, "source_vm is required") + } + + result, err := s.prov.ValidateSourceVM(ctx, req.GetSourceVm()) + if err != nil { + return nil, status.Errorf(codes.Internal, "validate source VM: %v", err) + } + + return &fluidv1.SourceVMValidation{ + SourceVm: result.VMName, + Valid: result.Valid, + State: result.State, + MacAddress: result.MACAddress, + IpAddress: result.IPAddress, + HasNetwork: result.HasNetwork, + Warnings: result.Warnings, + Errors: result.Errors, + }, nil +} + +func (s *Server) PrepareSourceVM(ctx context.Context, req *fluidv1.PrepareSourceVMCommand) (*fluidv1.SourceVMPrepared, error) { + if req.GetSourceVm() == "" { + return nil, status.Error(codes.InvalidArgument, "source_vm is required") + } + + result, err := s.prov.PrepareSourceVM(ctx, req.GetSourceVm(), req.GetSshUser(), req.GetSshKeyPath()) + if err != nil { + return nil, status.Errorf(codes.Internal, "prepare source VM: %v", err) + } + + return &fluidv1.SourceVMPrepared{ + SourceVm: result.SourceVM, + IpAddress: result.IPAddress, + Prepared: result.Prepared, + UserCreated: result.UserCreated, + ShellInstalled: result.ShellInstalled, + CaKeyInstalled: result.CAKeyInstalled, + SshdConfigured: result.SSHDConfigured, + PrincipalsCreated: result.PrincipalsCreated, + SshdRestarted: result.SSHDRestarted, + }, nil +} + +func (s *Server) RunSourceCommand(ctx context.Context, req *fluidv1.RunSourceCommandCommand) (*fluidv1.SourceCommandResult, error) { + if req.GetSourceVm() == "" { + return nil, status.Error(codes.InvalidArgument, "source_vm is required") + } + if req.GetCommand() == "" { + return nil, status.Error(codes.InvalidArgument, "command is required") + } + + timeout := time.Duration(req.GetTimeoutSeconds()) * time.Second + if timeout == 0 { + timeout = 5 * time.Minute + } + + result, err := s.prov.RunSourceCommand(ctx, req.GetSourceVm(), req.GetCommand(), timeout) + if err != nil { + return nil, status.Errorf(codes.Internal, "run source command: %v", err) + } + + return &fluidv1.SourceCommandResult{ + SourceVm: req.GetSourceVm(), + ExitCode: int32(result.ExitCode), + Stdout: result.Stdout, + Stderr: result.Stderr, + }, nil +} + +func (s *Server) ReadSourceFile(ctx context.Context, req *fluidv1.ReadSourceFileCommand) (*fluidv1.SourceFileResult, error) { + if req.GetSourceVm() == "" { + return nil, status.Error(codes.InvalidArgument, "source_vm is required") + } + if req.GetPath() == "" { + return nil, status.Error(codes.InvalidArgument, "path is required") + } + + content, err := s.prov.ReadSourceFile(ctx, req.GetSourceVm(), req.GetPath()) + if err != nil { + return nil, status.Errorf(codes.Internal, "read source file: %v", err) + } + + return &fluidv1.SourceFileResult{ + SourceVm: req.GetSourceVm(), + Path: req.GetPath(), + Content: content, + }, nil +} + +func (s *Server) GetHostInfo(ctx context.Context, _ *fluidv1.GetHostInfoRequest) (*fluidv1.HostInfoResponse, error) { + hostname, _ := os.Hostname() + + caps, err := s.prov.Capabilities(ctx) + if err != nil { + s.logger.Warn("failed to get capabilities", "error", err) + } + + resp := &fluidv1.HostInfoResponse{ + HostId: s.hostID, + Hostname: hostname, + Version: s.version, + ActiveSandboxes: int32(s.prov.ActiveSandboxCount()), + } + + if caps != nil { + resp.TotalCpus = int32(caps.TotalCPUs) + resp.TotalMemoryMb = int64(caps.TotalMemoryMB) + resp.BaseImages = caps.BaseImages + } + + return resp, nil +} + +func (s *Server) Health(_ context.Context, _ *fluidv1.HealthRequest) (*fluidv1.HealthResponse, error) { + return &fluidv1.HealthResponse{Status: "ok"}, nil +} + +func (s *Server) DiscoverHosts(ctx context.Context, req *fluidv1.DiscoverHostsCommand) (*fluidv1.DiscoverHostsResult, error) { + s.logger.Info("DiscoverHosts", "config_length", len(req.GetSshConfigContent())) + + hosts, err := sshconfig.Parse(strings.NewReader(req.GetSshConfigContent())) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "parse ssh config: %v", err) + } + + if len(hosts) == 0 { + return &fluidv1.DiscoverHostsResult{}, nil + } + + probeResults := sshconfig.ProbeAll(ctx, hosts) + + discovered := make([]*fluidv1.DiscoveredHost, 0, len(probeResults)) + for _, pr := range probeResults { + discovered = append(discovered, &fluidv1.DiscoveredHost{ + Name: pr.Host.Name, + Hostname: pr.Host.HostName, + User: pr.Host.User, + Port: int32(pr.Host.Port), + IdentityFile: pr.Host.IdentityFile, + Reachable: pr.Reachable, + HasLibvirt: pr.HasLibvirt, + HasProxmox: pr.HasProxmox, + Vms: pr.VMs, + Error: pr.Error, + }) + } + + return &fluidv1.DiscoverHostsResult{Hosts: discovered}, nil +} + +// sandboxToInfo converts a state.Sandbox to a proto SandboxInfo. +func sandboxToInfo(sb *state.Sandbox) *fluidv1.SandboxInfo { + return &fluidv1.SandboxInfo{ + SandboxId: sb.ID, + Name: sb.Name, + State: sb.State, + IpAddress: sb.IPAddress, + BaseImage: sb.BaseImage, + AgentId: sb.AgentID, + Vcpus: int32(sb.VCPUs), + MemoryMb: int32(sb.MemoryMB), + CreatedAt: sb.CreatedAt.Format(time.RFC3339), + } +} diff --git a/fluid-daemon/internal/id/id.go b/fluid-daemon/internal/id/id.go new file mode 100644 index 00000000..0da8b66e --- /dev/null +++ b/fluid-daemon/internal/id/id.go @@ -0,0 +1,23 @@ +package id + +import ( + "crypto/rand" + "encoding/hex" + "fmt" +) + +func Generate(prefix string) (string, error) { + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("crypto/rand failed: %w", err) + } + return prefix + hex.EncodeToString(b), nil +} + +func GenerateRaw() (string, error) { + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("crypto/rand failed: %w", err) + } + return hex.EncodeToString(b), nil +} diff --git a/fluid-daemon/internal/id/id_test.go b/fluid-daemon/internal/id/id_test.go new file mode 100644 index 00000000..33975c0e --- /dev/null +++ b/fluid-daemon/internal/id/id_test.go @@ -0,0 +1,67 @@ +package id + +import ( + "encoding/hex" + "strings" + "testing" +) + +func TestGenerate_PrefixAndLength(t *testing.T) { + prefix := "TST-" + got, err := Generate(prefix) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !strings.HasPrefix(got, prefix) { + t.Errorf("expected prefix %q, got %q", prefix, got) + } + if len(got) != len(prefix)+16 { + t.Errorf("expected length %d, got %d (%q)", len(prefix)+16, len(got), got) + } +} + +func TestGenerate_ValidHex(t *testing.T) { + prefix := "X-" + got, err := Generate(prefix) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + hexPart := got[len(prefix):] + if _, err := hex.DecodeString(hexPart); err != nil { + t.Errorf("hex part %q is not valid hex: %v", hexPart, err) + } +} + +func TestGenerateRaw_Length(t *testing.T) { + got, err := GenerateRaw() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(got) != 16 { + t.Errorf("expected length 16, got %d (%q)", len(got), got) + } +} + +func TestGenerateRaw_ValidHex(t *testing.T) { + got, err := GenerateRaw() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if _, err := hex.DecodeString(got); err != nil { + t.Errorf("%q is not valid hex: %v", got, err) + } +} + +func TestGenerate_NoCollisions(t *testing.T) { + seen := make(map[string]struct{}, 10000) + for i := 0; i < 10000; i++ { + id, err := Generate("T-") + if err != nil { + t.Fatalf("unexpected error at iteration %d: %v", i, err) + } + if _, ok := seen[id]; ok { + t.Fatalf("collision at iteration %d: %s", i, id) + } + seen[id] = struct{}{} + } +} diff --git a/fluid-daemon/internal/image/extract.go b/fluid-daemon/internal/image/extract.go new file mode 100644 index 00000000..7b4513ef --- /dev/null +++ b/fluid-daemon/internal/image/extract.go @@ -0,0 +1,209 @@ +package image + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" +) + +// ExtractKernel extracts a vmlinux kernel from a QCOW2 base image. +// The kernel is mounted via NBD, copied out, and saved alongside the image. +// +// Strategy: +// 1. Mount the QCOW2 via qemu-nbd +// 2. Mount the root partition +// 3. Copy /boot/vmlinuz-* or /vmlinuz +// 4. Decompress if needed (extract-vmlinux) +// 5. Save as .vmlinux +func ExtractKernel(ctx context.Context, imagePath string) (string, error) { + baseName := strings.TrimSuffix(filepath.Base(imagePath), ".qcow2") + outputPath := filepath.Join(filepath.Dir(imagePath), baseName+".vmlinux") + + if fileExists(outputPath) { + return outputPath, nil + } + + // Check for virt-ls/virt-cat (libguestfs) - easier approach + if _, err := exec.LookPath("virt-cat"); err == nil { + return extractKernelGuestfs(ctx, imagePath, outputPath) + } + + // Fallback to manual NBD mount + return extractKernelNBD(ctx, imagePath, outputPath) +} + +// extractKernelGuestfs uses libguestfs tools to extract the kernel. +func extractKernelGuestfs(ctx context.Context, imagePath, outputPath string) (string, error) { + // List /boot to find kernel + cmd := exec.CommandContext(ctx, "virt-ls", "-a", imagePath, "/boot/") + output, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("virt-ls /boot/: %w", err) + } + + // Find vmlinuz file + var kernelFile string + for _, line := range strings.Split(string(output), "\n") { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "vmlinuz") { + kernelFile = "/boot/" + line + break + } + } + + if kernelFile == "" { + return "", fmt.Errorf("no vmlinuz found in /boot/ of %s", imagePath) + } + + // Extract kernel + kernelCompressed := outputPath + ".compressed" + cmd = exec.CommandContext(ctx, "virt-cat", "-a", imagePath, kernelFile) + kernelData, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("virt-cat %s: %w", kernelFile, err) + } + + if err := os.WriteFile(kernelCompressed, kernelData, 0o644); err != nil { + return "", fmt.Errorf("write compressed kernel: %w", err) + } + defer func() { _ = os.Remove(kernelCompressed) }() + + // Try to decompress (extract-vmlinux script or direct use) + if err := decompressKernel(ctx, kernelCompressed, outputPath); err != nil { + // If decompression fails, try using the compressed kernel directly + // (microvm may support compressed kernels) + if err := os.Rename(kernelCompressed, outputPath); err != nil { + return "", fmt.Errorf("rename kernel: %w", err) + } + } + + return outputPath, nil +} + +// extractKernelNBD uses qemu-nbd to mount and extract the kernel. +func extractKernelNBD(ctx context.Context, imagePath, outputPath string) (string, error) { + // This requires root and available NBD device + nbdDev := "/dev/nbd0" + + // Load nbd module + _ = exec.CommandContext(ctx, "modprobe", "nbd", "max_part=8").Run() + + // Connect image + cmd := exec.CommandContext(ctx, "qemu-nbd", "--connect="+nbdDev, imagePath) + if output, err := cmd.CombinedOutput(); err != nil { + return "", fmt.Errorf("qemu-nbd connect: %w: %s", err, string(output)) + } + defer func() { + _ = exec.CommandContext(ctx, "qemu-nbd", "--disconnect", nbdDev).Run() + }() + + // Mount root partition + mountDir, err := os.MkdirTemp("", "fluid-extract-") + if err != nil { + return "", fmt.Errorf("create mount dir: %w", err) + } + defer func() { _ = os.RemoveAll(mountDir) }() + + // Try partition 1 first, then the device itself + mounted := false + for _, dev := range []string{nbdDev + "p1", nbdDev} { + cmd = exec.CommandContext(ctx, "mount", "-o", "ro", dev, mountDir) + if err := cmd.Run(); err == nil { + mounted = true + defer func() { + _ = exec.CommandContext(ctx, "umount", mountDir).Run() + }() + break + } + } + + if !mounted { + return "", fmt.Errorf("could not mount any partition from %s", imagePath) + } + + // Find kernel + bootDir := filepath.Join(mountDir, "boot") + entries, err := os.ReadDir(bootDir) + if err != nil { + return "", fmt.Errorf("read /boot: %w", err) + } + + var kernelPath string + for _, entry := range entries { + if strings.HasPrefix(entry.Name(), "vmlinuz") { + kernelPath = filepath.Join(bootDir, entry.Name()) + break + } + } + + if kernelPath == "" { + return "", fmt.Errorf("no vmlinuz found in /boot/ of %s", imagePath) + } + + // Copy and decompress + kernelData, err := os.ReadFile(kernelPath) + if err != nil { + return "", fmt.Errorf("read kernel: %w", err) + } + + kernelCompressed := outputPath + ".compressed" + if err := os.WriteFile(kernelCompressed, kernelData, 0o644); err != nil { + return "", fmt.Errorf("write compressed kernel: %w", err) + } + defer func() { _ = os.Remove(kernelCompressed) }() + + if err := decompressKernel(ctx, kernelCompressed, outputPath); err != nil { + if err := os.Rename(kernelCompressed, outputPath); err != nil { + return "", fmt.Errorf("rename kernel: %w", err) + } + } + + return outputPath, nil +} + +// decompressKernel attempts to decompress a compressed kernel using extract-vmlinux. +func decompressKernel(ctx context.Context, inputPath, outputPath string) error { + // Try extract-vmlinux script (ships with linux kernel source) + extractScript, err := exec.LookPath("extract-vmlinux") + if err != nil { + // Try common locations + for _, path := range []string{ + "/usr/src/linux-headers-*/scripts/extract-vmlinux", + "/usr/lib/linux-tools-*/extract-vmlinux", + } { + matches, _ := filepath.Glob(path) + if len(matches) > 0 { + extractScript = matches[0] + break + } + } + } + + if extractScript != "" { + cmd := exec.CommandContext(ctx, extractScript, inputPath) + output, err := cmd.Output() + if err == nil && len(output) > 0 { + return os.WriteFile(outputPath, output, 0o644) + } + } + + // Manual decompression attempts + // Try gzip + cmd := exec.CommandContext(ctx, "zcat", inputPath) + output, err := cmd.Output() + if err == nil && len(output) > 0 { + return os.WriteFile(outputPath, output, 0o644) + } + + // Try xz + cmd = exec.CommandContext(ctx, "xzcat", inputPath) + output, err = cmd.Output() + if err == nil && len(output) > 0 { + return os.WriteFile(outputPath, output, 0o644) + } + + return fmt.Errorf("could not decompress kernel (tried extract-vmlinux, gzip, xz)") +} diff --git a/fluid-daemon/internal/image/store.go b/fluid-daemon/internal/image/store.go new file mode 100644 index 00000000..0753b17a --- /dev/null +++ b/fluid-daemon/internal/image/store.go @@ -0,0 +1,131 @@ +// Package image manages base QCOW2 images and kernel extraction. +package image + +import ( + "fmt" + "log/slog" + "os" + "path/filepath" + "strings" +) + +// Store manages base QCOW2 images available for sandbox creation. +type Store struct { + baseDir string + logger *slog.Logger +} + +// ImageInfo describes a base image. +type ImageInfo struct { + Name string // filename without extension + Path string // full path to QCOW2 file + SizeMB int64 // file size in MB + HasKernel bool // whether a kernel has been extracted +} + +// NewStore creates an image store for the given base directory. +func NewStore(baseDir string, logger *slog.Logger) (*Store, error) { + if logger == nil { + logger = slog.Default() + } + + if err := os.MkdirAll(baseDir, 0o755); err != nil { + return nil, fmt.Errorf("create image dir: %w", err) + } + + return &Store{ + baseDir: baseDir, + logger: logger.With("component", "image"), + }, nil +} + +// List returns all available base images. +func (s *Store) List() ([]ImageInfo, error) { + entries, err := os.ReadDir(s.baseDir) + if err != nil { + return nil, fmt.Errorf("read image dir: %w", err) + } + + var images []ImageInfo + for _, entry := range entries { + if entry.IsDir() || !strings.HasSuffix(entry.Name(), ".qcow2") { + continue + } + + info, err := entry.Info() + if err != nil { + continue + } + + name := strings.TrimSuffix(entry.Name(), ".qcow2") + fullPath := filepath.Join(s.baseDir, entry.Name()) + + // Check for extracted kernel + kernelPath := filepath.Join(s.baseDir, name+".vmlinux") + hasKernel := fileExists(kernelPath) + + images = append(images, ImageInfo{ + Name: name, + Path: fullPath, + SizeMB: info.Size() / (1024 * 1024), + HasKernel: hasKernel, + }) + } + + return images, nil +} + +// ListNames returns just the names of available base images. +func (s *Store) ListNames() ([]string, error) { + images, err := s.List() + if err != nil { + return nil, err + } + + names := make([]string, len(images)) + for i, img := range images { + names[i] = img.Name + } + return names, nil +} + +// GetImagePath returns the full path to a base image by name. +func (s *Store) GetImagePath(name string) (string, error) { + path := filepath.Join(s.baseDir, name+".qcow2") + if !fileExists(path) { + // Try without adding .qcow2 (in case name already has extension) + if fileExists(filepath.Join(s.baseDir, name)) { + return filepath.Join(s.baseDir, name), nil + } + return "", fmt.Errorf("base image %q not found in %s", name, s.baseDir) + } + return path, nil +} + +// GetKernelPath returns the path to the extracted kernel for a base image. +func (s *Store) GetKernelPath(name string) (string, error) { + path := filepath.Join(s.baseDir, name+".vmlinux") + if !fileExists(path) { + return "", fmt.Errorf("kernel for %q not found (run kernel extraction first)", name) + } + return path, nil +} + +// HasImage checks if a base image exists. +func (s *Store) HasImage(name string) bool { + _, err := s.GetImagePath(name) + return err == nil +} + +// BaseDir returns the base image directory. +func (s *Store) BaseDir() string { + return s.baseDir +} + +func fileExists(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false + } + return !info.IsDir() +} diff --git a/fluid-daemon/internal/image/store_test.go b/fluid-daemon/internal/image/store_test.go new file mode 100644 index 00000000..bf22c09b --- /dev/null +++ b/fluid-daemon/internal/image/store_test.go @@ -0,0 +1,231 @@ +package image + +import ( + "log/slog" + "os" + "path/filepath" + "testing" +) + +func createFile(t *testing.T, path string, sizeBytes int) { + t.Helper() + data := make([]byte, sizeBytes) + if err := os.WriteFile(path, data, 0o644); err != nil { + t.Fatalf("failed to create file %s: %v", path, err) + } +} + +func TestNewStore_CreatesDir(t *testing.T) { + base := filepath.Join(t.TempDir(), "images", "nested") + + s, err := NewStore(base, slog.Default()) + if err != nil { + t.Fatalf("NewStore failed: %v", err) + } + + info, err := os.Stat(s.BaseDir()) + if err != nil { + t.Fatalf("expected directory to exist: %v", err) + } + if !info.IsDir() { + t.Fatalf("expected %s to be a directory", s.BaseDir()) + } +} + +func TestList_Empty(t *testing.T) { + base := t.TempDir() + + s, err := NewStore(base, slog.Default()) + if err != nil { + t.Fatalf("NewStore failed: %v", err) + } + + images, err := s.List() + if err != nil { + t.Fatalf("List failed: %v", err) + } + if len(images) != 0 { + t.Errorf("expected 0 images, got %d", len(images)) + } +} + +func TestList_WithImages(t *testing.T) { + base := t.TempDir() + + // Create two qcow2 files, one with a kernel. + createFile(t, filepath.Join(base, "ubuntu.qcow2"), 2*1024*1024) + createFile(t, filepath.Join(base, "debian.qcow2"), 1*1024*1024) + createFile(t, filepath.Join(base, "ubuntu.vmlinux"), 100) + + // Also create a non-qcow2 file that should be ignored. + createFile(t, filepath.Join(base, "notes.txt"), 10) + + s, err := NewStore(base, slog.Default()) + if err != nil { + t.Fatalf("NewStore failed: %v", err) + } + + images, err := s.List() + if err != nil { + t.Fatalf("List failed: %v", err) + } + + if len(images) != 2 { + t.Fatalf("expected 2 images, got %d", len(images)) + } + + byName := make(map[string]ImageInfo) + for _, img := range images { + byName[img.Name] = img + } + + // Check ubuntu entry. + ubuntu, ok := byName["ubuntu"] + if !ok { + t.Fatal("expected image named 'ubuntu'") + } + if !ubuntu.HasKernel { + t.Error("expected ubuntu to have kernel") + } + if ubuntu.SizeMB != 2 { + t.Errorf("expected ubuntu SizeMB=2, got %d", ubuntu.SizeMB) + } + if ubuntu.Path != filepath.Join(base, "ubuntu.qcow2") { + t.Errorf("unexpected path: %s", ubuntu.Path) + } + + // Check debian entry. + debian, ok := byName["debian"] + if !ok { + t.Fatal("expected image named 'debian'") + } + if debian.HasKernel { + t.Error("expected debian to NOT have kernel") + } + if debian.SizeMB != 1 { + t.Errorf("expected debian SizeMB=1, got %d", debian.SizeMB) + } +} + +func TestListNames(t *testing.T) { + base := t.TempDir() + + createFile(t, filepath.Join(base, "alpha.qcow2"), 100) + createFile(t, filepath.Join(base, "beta.qcow2"), 100) + + s, err := NewStore(base, slog.Default()) + if err != nil { + t.Fatalf("NewStore failed: %v", err) + } + + names, err := s.ListNames() + if err != nil { + t.Fatalf("ListNames failed: %v", err) + } + + if len(names) != 2 { + t.Fatalf("expected 2 names, got %d", len(names)) + } + + nameSet := make(map[string]bool) + for _, n := range names { + nameSet[n] = true + } + if !nameSet["alpha"] || !nameSet["beta"] { + t.Errorf("expected alpha and beta, got %v", names) + } +} + +func TestGetImagePath(t *testing.T) { + base := t.TempDir() + + createFile(t, filepath.Join(base, "myimage.qcow2"), 100) + + s, err := NewStore(base, slog.Default()) + if err != nil { + t.Fatalf("NewStore failed: %v", err) + } + + path, err := s.GetImagePath("myimage") + if err != nil { + t.Fatalf("GetImagePath failed: %v", err) + } + + expected := filepath.Join(base, "myimage.qcow2") + if path != expected { + t.Errorf("expected %s, got %s", expected, path) + } +} + +func TestGetImagePath_Missing(t *testing.T) { + base := t.TempDir() + + s, err := NewStore(base, slog.Default()) + if err != nil { + t.Fatalf("NewStore failed: %v", err) + } + + _, err = s.GetImagePath("nonexistent") + if err == nil { + t.Fatal("expected error for missing image, got nil") + } +} + +func TestGetKernelPath(t *testing.T) { + base := t.TempDir() + + createFile(t, filepath.Join(base, "myimage.qcow2"), 100) + createFile(t, filepath.Join(base, "myimage.vmlinux"), 100) + + s, err := NewStore(base, slog.Default()) + if err != nil { + t.Fatalf("NewStore failed: %v", err) + } + + path, err := s.GetKernelPath("myimage") + if err != nil { + t.Fatalf("GetKernelPath failed: %v", err) + } + + expected := filepath.Join(base, "myimage.vmlinux") + if path != expected { + t.Errorf("expected %s, got %s", expected, path) + } + + // Missing kernel should error. + _, err = s.GetKernelPath("nope") + if err == nil { + t.Fatal("expected error for missing kernel, got nil") + } +} + +func TestHasImage(t *testing.T) { + base := t.TempDir() + + createFile(t, filepath.Join(base, "present.qcow2"), 100) + + s, err := NewStore(base, slog.Default()) + if err != nil { + t.Fatalf("NewStore failed: %v", err) + } + + if !s.HasImage("present") { + t.Error("expected HasImage to return true for existing image") + } + if s.HasImage("absent") { + t.Error("expected HasImage to return false for missing image") + } +} + +func TestBaseDir(t *testing.T) { + base := t.TempDir() + + s, err := NewStore(base, slog.Default()) + if err != nil { + t.Fatalf("NewStore failed: %v", err) + } + + if s.BaseDir() != base { + t.Errorf("expected BaseDir=%s, got %s", base, s.BaseDir()) + } +} diff --git a/fluid-remote/internal/janitor/janitor.go b/fluid-daemon/internal/janitor/janitor.go similarity index 69% rename from fluid-remote/internal/janitor/janitor.go rename to fluid-daemon/internal/janitor/janitor.go index 699f0d92..bb08de6b 100644 --- a/fluid-remote/internal/janitor/janitor.go +++ b/fluid-daemon/internal/janitor/janitor.go @@ -1,4 +1,4 @@ -// Package janitor provides background cleanup of expired sandboxes. +// Package janitor provides background cleanup of expired sandboxes on the host. package janitor import ( @@ -6,26 +6,28 @@ import ( "log/slog" "time" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/store" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/vm" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/state" ) -// Janitor is a background service that periodically cleans up expired sandboxes. +// DestroyFunc is called to destroy an expired sandbox. +type DestroyFunc func(ctx context.Context, sandboxID string) error + +// Janitor periodically cleans up expired sandboxes. type Janitor struct { - store store.Store - vmService *vm.Service + store *state.Store + destroyFn DestroyFunc logger *slog.Logger defaultTTL time.Duration } // New creates a new Janitor service. -func New(st store.Store, svc *vm.Service, defaultTTL time.Duration, logger *slog.Logger) *Janitor { +func New(st *state.Store, destroyFn DestroyFunc, defaultTTL time.Duration, logger *slog.Logger) *Janitor { if logger == nil { logger = slog.Default() } return &Janitor{ store: st, - vmService: svc, + destroyFn: destroyFn, logger: logger.With("component", "janitor"), defaultTTL: defaultTTL, } @@ -38,7 +40,7 @@ func (j *Janitor) Start(ctx context.Context, interval time.Duration) { "default_ttl", j.defaultTTL, ) - // Run once immediately at startup + // Run once immediately j.cleanup(ctx) ticker := time.NewTicker(interval) @@ -72,24 +74,19 @@ func (j *Janitor) cleanup(ctx context.Context) { for _, sb := range expired { j.logger.Info("destroying expired sandbox", "id", sb.ID, - "name", sb.SandboxName, + "name", sb.Name, "ttl_seconds", sb.TTLSeconds, "created_at", sb.CreatedAt, "age", time.Since(sb.CreatedAt), ) - if _, err := j.vmService.DestroySandbox(ctx, sb.ID); err != nil { + if err := j.destroyFn(ctx, sb.ID); err != nil { j.logger.Error("failed to destroy expired sandbox", "id", sb.ID, - "name", sb.SandboxName, "error", err, ) - // Continue trying to destroy others even if one fails } else { - j.logger.Info("destroyed expired sandbox", - "id", sb.ID, - "name", sb.SandboxName, - ) + j.logger.Info("destroyed expired sandbox", "id", sb.ID) } } } diff --git a/fluid-daemon/internal/janitor/janitor_test.go b/fluid-daemon/internal/janitor/janitor_test.go new file mode 100644 index 00000000..19e22b8f --- /dev/null +++ b/fluid-daemon/internal/janitor/janitor_test.go @@ -0,0 +1,164 @@ +package janitor + +import ( + "context" + "errors" + "log/slog" + "sync" + "testing" + "time" + + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/state" +) + +func newTestStore(t *testing.T) *state.Store { + t.Helper() + st, err := state.NewStore(":memory:") + if err != nil { + t.Fatalf("failed to create in-memory store: %v", err) + } + t.Cleanup(func() { _ = st.Close() }) + return st +} + +func insertExpiredSandbox(t *testing.T, st *state.Store, id string, ttlSeconds int, createdAt time.Time) { + t.Helper() + sb := &state.Sandbox{ + ID: id, + Name: "test-" + id, + AgentID: "agent-1", + BaseImage: "ubuntu", + State: "RUNNING", + TTLSeconds: ttlSeconds, + CreatedAt: createdAt, + UpdatedAt: createdAt, + } + if err := st.CreateSandbox(context.Background(), sb); err != nil { + t.Fatalf("failed to insert sandbox: %v", err) + } +} + +func TestJanitor_CleanupExpired(t *testing.T) { + st := newTestStore(t) + + // Insert a sandbox that expired 10 seconds ago (TTL=1s, created 11s ago). + insertExpiredSandbox(t, st, "SBX-expired", 1, time.Now().UTC().Add(-11*time.Second)) + + var mu sync.Mutex + destroyed := make([]string, 0) + + destroyFn := func(_ context.Context, sandboxID string) error { + mu.Lock() + defer mu.Unlock() + destroyed = append(destroyed, sandboxID) + return nil + } + + j := New(st, destroyFn, 5*time.Minute, slog.Default()) + + ctx, cancel := context.WithCancel(context.Background()) + + // Start janitor in background; it runs cleanup immediately. + done := make(chan struct{}) + go func() { + j.Start(ctx, 50*time.Millisecond) + close(done) + }() + + // Give it time to run the immediate cleanup. + time.Sleep(200 * time.Millisecond) + cancel() + <-done + + mu.Lock() + defer mu.Unlock() + + if len(destroyed) == 0 { + t.Fatal("expected destroyFn to be called for expired sandbox, but it was not") + } + + found := false + for _, id := range destroyed { + if id == "SBX-expired" { + found = true + break + } + } + if !found { + t.Errorf("expected SBX-expired in destroyed list, got %v", destroyed) + } +} + +func TestJanitor_NoExpired(t *testing.T) { + st := newTestStore(t) + + // Insert a sandbox that is NOT expired (TTL=1h, created just now). + insertExpiredSandbox(t, st, "SBX-fresh", 3600, time.Now().UTC()) + + called := false + destroyFn := func(_ context.Context, _ string) error { + called = true + return nil + } + + j := New(st, destroyFn, 5*time.Minute, slog.Default()) + + ctx, cancel := context.WithCancel(context.Background()) + + done := make(chan struct{}) + go func() { + j.Start(ctx, 50*time.Millisecond) + close(done) + }() + + time.Sleep(200 * time.Millisecond) + cancel() + <-done + + if called { + t.Error("expected destroyFn NOT to be called when no sandboxes are expired") + } +} + +func TestJanitor_DestroyError(t *testing.T) { + st := newTestStore(t) + + // Insert two expired sandboxes. + insertExpiredSandbox(t, st, "SBX-fail", 1, time.Now().UTC().Add(-11*time.Second)) + insertExpiredSandbox(t, st, "SBX-ok", 1, time.Now().UTC().Add(-11*time.Second)) + + var mu sync.Mutex + calls := make([]string, 0) + + destroyFn := func(_ context.Context, sandboxID string) error { + mu.Lock() + defer mu.Unlock() + calls = append(calls, sandboxID) + if sandboxID == "SBX-fail" { + return errors.New("simulated destroy failure") + } + return nil + } + + j := New(st, destroyFn, 5*time.Minute, slog.Default()) + + ctx, cancel := context.WithCancel(context.Background()) + + done := make(chan struct{}) + go func() { + j.Start(ctx, 50*time.Millisecond) + close(done) + }() + + time.Sleep(200 * time.Millisecond) + cancel() + <-done + + mu.Lock() + defer mu.Unlock() + + // Both sandboxes should have been attempted regardless of the error on the first. + if len(calls) < 2 { + t.Errorf("expected destroyFn to be called for both sandboxes, got calls: %v", calls) + } +} diff --git a/fluid-daemon/internal/microvm/manager.go b/fluid-daemon/internal/microvm/manager.go new file mode 100644 index 00000000..c9175f0c --- /dev/null +++ b/fluid-daemon/internal/microvm/manager.go @@ -0,0 +1,450 @@ +package microvm + +import ( + "context" + "crypto/rand" + "encoding/json" + "fmt" + "log/slog" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "time" +) + +// SandboxState represents the lifecycle state of a microVM sandbox. +type SandboxState string + +const ( + StateCreating SandboxState = "CREATING" + StateRunning SandboxState = "RUNNING" + StateStopped SandboxState = "STOPPED" + StateError SandboxState = "ERROR" +) + +// SandboxInfo holds runtime information about a managed microVM. +type SandboxInfo struct { + ID string + Name string + PID int + State SandboxState + OverlayDir string + TAPDevice string + MACAddress string + Bridge string + VCPUs int + MemoryMB int +} + +// Manager manages QEMU microVM processes. +type Manager struct { + mu sync.RWMutex + vms map[string]*SandboxInfo // sandbox_id -> info + qemuBin string + workDir string + logger *slog.Logger +} + +// NewManager creates a new microVM manager. +func NewManager(qemuBin, workDir string, logger *slog.Logger) (*Manager, error) { + if logger == nil { + logger = slog.Default() + } + + // Resolve qemu binary + bin, err := exec.LookPath(qemuBin) + if err != nil { + return nil, fmt.Errorf("qemu binary not found: %w", err) + } + + if err := os.MkdirAll(workDir, 0o755); err != nil { + return nil, fmt.Errorf("create work dir: %w", err) + } + + m := &Manager{ + vms: make(map[string]*SandboxInfo), + qemuBin: bin, + workDir: workDir, + logger: logger.With("component", "microvm"), + } + + return m, nil +} + +// WorkDir returns the working directory for sandbox data. +func (m *Manager) WorkDir() string { + return m.workDir +} + +// RecoverState scans the work directory for PID files and rebuilds in-memory state. +// Called on daemon restart to reconnect with running QEMU processes. +func (m *Manager) RecoverState(ctx context.Context) error { + m.mu.Lock() + defer m.mu.Unlock() + + entries, err := os.ReadDir(m.workDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return fmt.Errorf("read work dir: %w", err) + } + + for _, entry := range entries { + if !entry.IsDir() { + continue + } + + sandboxID := entry.Name() + pidFile := filepath.Join(m.workDir, sandboxID, "qemu.pid") + + pidBytes, err := os.ReadFile(pidFile) + if err != nil { + m.logger.Warn("no PID file, skipping", "sandbox_id", sandboxID) + continue + } + + pid, err := strconv.Atoi(strings.TrimSpace(string(pidBytes))) + if err != nil { + m.logger.Warn("invalid PID file", "sandbox_id", sandboxID, "error", err) + continue + } + + // Check if process is still alive + proc, err := os.FindProcess(pid) + if err != nil { + m.logger.Warn("process not found", "sandbox_id", sandboxID, "pid", pid) + continue + } + + // Send signal 0 to check if process exists + if err := proc.Signal(syscall.Signal(0)); err != nil { + m.logger.Info("process dead, cleaning up", "sandbox_id", sandboxID, "pid", pid) + continue + } + + // Read metadata + meta, err := readMetadata(m.workDir, sandboxID) + if err != nil { + m.logger.Warn("failed to read metadata, sandbox state may be incomplete", + "sandbox_id", sandboxID, "error", err) + } + + info := &SandboxInfo{ + ID: sandboxID, + Name: meta.Name, + PID: pid, + State: StateRunning, + OverlayDir: filepath.Join(m.workDir, sandboxID), + TAPDevice: meta.TAPDevice, + MACAddress: meta.MACAddress, + Bridge: meta.Bridge, + VCPUs: meta.VCPUs, + MemoryMB: meta.MemoryMB, + } + m.vms[sandboxID] = info + m.logger.Info("recovered sandbox", "sandbox_id", sandboxID, "pid", pid) + } + + return nil +} + +// LaunchConfig contains parameters for launching a microVM. +type LaunchConfig struct { + SandboxID string + Name string + OverlayPath string + KernelPath string + TAPDevice string + MACAddress string + Bridge string + VCPUs int + MemoryMB int + RootDevice string // kernel root= device, defaults to /dev/vda + CloudInitISO string // optional +} + +// Launch starts a QEMU microVM process with the given configuration. +func (m *Manager) Launch(ctx context.Context, cfg LaunchConfig) (*SandboxInfo, error) { + m.mu.Lock() + defer m.mu.Unlock() + + if _, exists := m.vms[cfg.SandboxID]; exists { + return nil, fmt.Errorf("sandbox %s already exists", cfg.SandboxID) + } + + if !filepath.IsAbs(cfg.KernelPath) { + return nil, fmt.Errorf("kernel path must be absolute: %s", cfg.KernelPath) + } + if !filepath.IsAbs(cfg.OverlayPath) { + return nil, fmt.Errorf("overlay path must be absolute: %s", cfg.OverlayPath) + } + + sandboxDir := filepath.Join(m.workDir, cfg.SandboxID) + if err := os.MkdirAll(sandboxDir, 0o755); err != nil { + return nil, fmt.Errorf("create sandbox dir: %w", err) + } + + success := false + defer func() { + if !success { + _ = os.RemoveAll(sandboxDir) + } + }() + + pidFile := filepath.Join(sandboxDir, "qemu.pid") + + rootDev := cfg.RootDevice + if rootDev == "" { + rootDev = "/dev/vda" + } + + // Build QEMU command args + args := []string{ + "-M", "microvm", "-enable-kvm", "-cpu", "host", + "-m", strconv.Itoa(cfg.MemoryMB), + "-smp", strconv.Itoa(cfg.VCPUs), + "-kernel", cfg.KernelPath, + "-append", fmt.Sprintf("console=ttyS0 root=%s rw quiet", rootDev), + "-drive", fmt.Sprintf("id=root,file=%s,format=qcow2,if=none", cfg.OverlayPath), + "-device", "virtio-blk-device,drive=root", + "-netdev", fmt.Sprintf("tap,id=net0,ifname=%s,script=no,downscript=no", cfg.TAPDevice), + "-device", fmt.Sprintf("virtio-net-device,netdev=net0,mac=%s", cfg.MACAddress), + "-serial", "stdio", + "-nographic", "-nodefaults", + "-daemonize", + "-pidfile", pidFile, + } + + // Add cloud-init ISO if provided + if cfg.CloudInitISO != "" { + args = append(args, + "-drive", fmt.Sprintf("id=cidata,file=%s,format=raw,if=none", cfg.CloudInitISO), + "-device", "virtio-blk-device,drive=cidata", + ) + } + + m.logger.Info("launching microVM", + "sandbox_id", cfg.SandboxID, + "kernel", cfg.KernelPath, + "overlay", cfg.OverlayPath, + "tap", cfg.TAPDevice, + "mac", cfg.MACAddress, + "vcpus", cfg.VCPUs, + "memory_mb", cfg.MemoryMB, + ) + + cmd := exec.CommandContext(ctx, m.qemuBin, args...) + output, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("qemu launch failed: %w: %s", err, string(output)) + } + + // Read PID from pidfile (QEMU writes it after daemonizing) + var pid int + for i := 0; i < 10; i++ { + pidBytes, err := os.ReadFile(pidFile) + if err == nil { + pid, err = strconv.Atoi(strings.TrimSpace(string(pidBytes))) + if err == nil { + break + } + } + time.Sleep(100 * time.Millisecond) + } + + if pid == 0 { + return nil, fmt.Errorf("failed to read QEMU PID from %s", pidFile) + } + + info := &SandboxInfo{ + ID: cfg.SandboxID, + Name: cfg.Name, + PID: pid, + State: StateRunning, + OverlayDir: sandboxDir, + TAPDevice: cfg.TAPDevice, + MACAddress: cfg.MACAddress, + Bridge: cfg.Bridge, + VCPUs: cfg.VCPUs, + MemoryMB: cfg.MemoryMB, + } + + // Persist metadata for recovery (log but don't fail - VM is already running) + if err := writeMetadata(m.workDir, cfg.SandboxID, sandboxMetadata{ + Name: cfg.Name, + TAPDevice: cfg.TAPDevice, + MACAddress: cfg.MACAddress, + Bridge: cfg.Bridge, + VCPUs: cfg.VCPUs, + MemoryMB: cfg.MemoryMB, + }); err != nil { + m.logger.Warn("failed to write metadata", "sandbox_id", cfg.SandboxID, "error", err) + } + + m.vms[cfg.SandboxID] = info + success = true + m.logger.Info("microVM launched", "sandbox_id", cfg.SandboxID, "pid", pid) + + return info, nil +} + +// Stop sends SIGTERM to the QEMU process for graceful shutdown. +func (m *Manager) Stop(ctx context.Context, sandboxID string, force bool) error { + m.mu.Lock() + defer m.mu.Unlock() + + info, ok := m.vms[sandboxID] + if !ok { + return fmt.Errorf("sandbox %s not found", sandboxID) + } + + proc, err := os.FindProcess(info.PID) + if err != nil { + return fmt.Errorf("find process %d: %w", info.PID, err) + } + + sig := syscall.SIGTERM + if force { + sig = syscall.SIGKILL + } + + if err := proc.Signal(sig); err != nil { + return fmt.Errorf("signal process %d: %w", info.PID, err) + } + + // Wait briefly for the process to exit. + done := make(chan struct{}) + go func() { + _, _ = proc.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(5 * time.Second): + } + + info.State = StateStopped + m.logger.Info("microVM stopped", "sandbox_id", sandboxID, "pid", info.PID, "force", force) + return nil +} + +// Destroy stops the QEMU process and removes all associated resources. +func (m *Manager) Destroy(ctx context.Context, sandboxID string) error { + m.mu.Lock() + defer m.mu.Unlock() + + info, ok := m.vms[sandboxID] + if !ok { + // Even if not tracked, try to clean up disk + _ = RemoveOverlay(m.workDir, sandboxID) + return nil + } + + // Kill the process + proc, err := os.FindProcess(info.PID) + if err == nil { + _ = proc.Signal(syscall.SIGKILL) + // Wait briefly for process to exit + done := make(chan struct{}) + go func() { + _, _ = proc.Wait() + close(done) + }() + + select { + case <-done: + case <-time.After(5 * time.Second): + m.logger.Warn("process did not exit after SIGKILL", "sandbox_id", sandboxID, "pid", info.PID) + } + } + + delete(m.vms, sandboxID) + m.logger.Info("microVM destroyed", "sandbox_id", sandboxID) + + // Overlay and TAP cleanup happens at a higher layer + return nil +} + +// Get returns info about a sandbox. The returned SandboxInfo is a copy +// and is safe to use without holding the manager lock. +func (m *Manager) Get(sandboxID string) (*SandboxInfo, error) { + m.mu.Lock() + defer m.mu.Unlock() + + info, ok := m.vms[sandboxID] + if !ok { + return nil, fmt.Errorf("sandbox %s not found", sandboxID) + } + + // Check if process is still alive + proc, err := os.FindProcess(info.PID) + if err != nil { + info.State = StateError + } else if err := proc.Signal(syscall.Signal(0)); err != nil { + info.State = StateStopped + } + + cp := *info + return &cp, nil +} + +// List returns all tracked sandboxes. Each returned SandboxInfo is a copy +// and is safe to use without holding the manager lock. +func (m *Manager) List() []*SandboxInfo { + m.mu.RLock() + defer m.mu.RUnlock() + + result := make([]*SandboxInfo, 0, len(m.vms)) + for _, info := range m.vms { + cp := *info + result = append(result, &cp) + } + return result +} + +// GenerateMACAddress generates a random MAC address with QEMU/KVM prefix 52:54:00. +func GenerateMACAddress() string { + buf := make([]byte, 3) + _, _ = rand.Read(buf) + return fmt.Sprintf("52:54:00:%02x:%02x:%02x", buf[0], buf[1], buf[2]) +} + +// sandboxMetadata is persisted to disk for recovery on daemon restart. +type sandboxMetadata struct { + Name string `json:"name"` + TAPDevice string `json:"tap_device"` + MACAddress string `json:"mac_address"` + Bridge string `json:"bridge"` + VCPUs int `json:"vcpus"` + MemoryMB int `json:"memory_mb"` +} + +func writeMetadata(workDir, sandboxID string, meta sandboxMetadata) error { + path := filepath.Join(workDir, sandboxID, "metadata.json") + data, err := json.Marshal(meta) + if err != nil { + return fmt.Errorf("marshal metadata: %w", err) + } + if err := os.WriteFile(path, data, 0o644); err != nil { + return fmt.Errorf("write metadata: %w", err) + } + return nil +} + +func readMetadata(workDir, sandboxID string) (sandboxMetadata, error) { + path := filepath.Join(workDir, sandboxID, "metadata.json") + data, err := os.ReadFile(path) + if err != nil { + return sandboxMetadata{}, err + } + var meta sandboxMetadata + if err := json.Unmarshal(data, &meta); err != nil { + return sandboxMetadata{}, err + } + return meta, nil +} diff --git a/fluid-daemon/internal/microvm/manager_test.go b/fluid-daemon/internal/microvm/manager_test.go new file mode 100644 index 00000000..b0da5719 --- /dev/null +++ b/fluid-daemon/internal/microvm/manager_test.go @@ -0,0 +1,88 @@ +package microvm + +import ( + "context" + "log/slog" + "os" + "testing" +) + +func TestGenerateMACAddress(t *testing.T) { + mac := GenerateMACAddress() + if mac == "" { + t.Error("MAC address should not be empty") + } + if len(mac) != 17 { // XX:XX:XX:XX:XX:XX + t.Errorf("MAC address should be 17 chars, got %d: %s", len(mac), mac) + } + if mac[:8] != "52:54:00" { + t.Errorf("MAC should have QEMU prefix 52:54:00, got %s", mac[:8]) + } + + // Generate two and verify they differ + mac2 := GenerateMACAddress() + if mac == mac2 { + t.Error("two generated MACs should differ (random)") + } +} + +func TestWriteReadMetadata(t *testing.T) { + workDir := t.TempDir() + sandboxID := "test-sandbox" + if err := os.MkdirAll(workDir+"/"+sandboxID, 0o755); err != nil { + t.Fatal(err) + } + + want := sandboxMetadata{ + Name: "test", + TAPDevice: "fluid-abc123", + MACAddress: "52:54:00:aa:bb:cc", + Bridge: "fluid0", + VCPUs: 2, + MemoryMB: 2048, + } + + if err := writeMetadata(workDir, sandboxID, want); err != nil { + t.Fatalf("writeMetadata: %v", err) + } + + got, err := readMetadata(workDir, sandboxID) + if err != nil { + t.Fatalf("readMetadata: %v", err) + } + + if got != want { + t.Errorf("metadata mismatch:\n got: %+v\nwant: %+v", got, want) + } +} + +func TestRecoverState_EmptyDir(t *testing.T) { + workDir := t.TempDir() + + // This will fail because qemu binary won't be found on macOS, + // so we test the recovery logic directly + m := &Manager{ + vms: make(map[string]*SandboxInfo), + workDir: workDir, + qemuBin: "/bin/true", + logger: nil, + } + + // Set up a nil-safe logger + if m.logger == nil { + m.logger = defaultLogger() + } + + // Empty dir should recover without error + if err := m.RecoverState(context.TODO()); err != nil { + t.Errorf("RecoverState on empty dir: %v", err) + } + + if len(m.vms) != 0 { + t.Errorf("expected 0 VMs, got %d", len(m.vms)) + } +} + +func defaultLogger() *slog.Logger { + return slog.Default() +} diff --git a/fluid-daemon/internal/microvm/overlay.go b/fluid-daemon/internal/microvm/overlay.go new file mode 100644 index 00000000..ea42134b --- /dev/null +++ b/fluid-daemon/internal/microvm/overlay.go @@ -0,0 +1,41 @@ +// Package microvm manages QEMU microVM lifecycle - process launch, tracking, cleanup. +package microvm + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" +) + +// CreateOverlay creates a QCOW2 overlay disk backed by a base image. +// The overlay is created at workDir//disk.qcow2. +func CreateOverlay(ctx context.Context, baseImagePath, workDir, sandboxID string) (string, error) { + sandboxDir := filepath.Join(workDir, sandboxID) + if err := os.MkdirAll(sandboxDir, 0o755); err != nil { + return "", fmt.Errorf("create sandbox dir: %w", err) + } + + overlayPath := filepath.Join(sandboxDir, "disk.qcow2") + + cmd := exec.CommandContext(ctx, "qemu-img", "create", + "-f", "qcow2", + "-b", baseImagePath, + "-F", "qcow2", + overlayPath, + ) + + output, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("qemu-img create overlay: %w: %s", err, string(output)) + } + + return overlayPath, nil +} + +// RemoveOverlay removes the sandbox directory and all its contents (overlay, PID file, etc). +func RemoveOverlay(workDir, sandboxID string) error { + sandboxDir := filepath.Join(workDir, sandboxID) + return os.RemoveAll(sandboxDir) +} diff --git a/fluid-daemon/internal/microvm/overlay_test.go b/fluid-daemon/internal/microvm/overlay_test.go new file mode 100644 index 00000000..3eae4d22 --- /dev/null +++ b/fluid-daemon/internal/microvm/overlay_test.go @@ -0,0 +1,40 @@ +package microvm + +import ( + "context" + "os" + "path/filepath" + "testing" +) + +func TestRemoveOverlay(t *testing.T) { + workDir := t.TempDir() + sandboxID := "test-sandbox" + + // Create sandbox dir with files + sandboxDir := filepath.Join(workDir, sandboxID) + if err := os.MkdirAll(sandboxDir, 0o755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(sandboxDir, "disk.qcow2"), []byte("test"), 0o644); err != nil { + t.Fatal(err) + } + + // Remove + if err := RemoveOverlay(workDir, sandboxID); err != nil { + t.Fatal(err) + } + + // Verify removed + if _, err := os.Stat(sandboxDir); !os.IsNotExist(err) { + t.Error("sandbox dir should be removed") + } +} + +func TestCreateOverlay_MissingBase(t *testing.T) { + workDir := t.TempDir() + _, err := CreateOverlay(context.Background(), "/nonexistent/base.qcow2", workDir, "test-id") + if err == nil { + t.Error("expected error for missing base image") + } +} diff --git a/fluid-daemon/internal/network/bridge.go b/fluid-daemon/internal/network/bridge.go new file mode 100644 index 00000000..ba375a85 --- /dev/null +++ b/fluid-daemon/internal/network/bridge.go @@ -0,0 +1,155 @@ +package network + +import ( + "context" + "fmt" + "log/slog" + "os/exec" + "regexp" + "strings" +) + +var validBridge = regexp.MustCompile(`^[a-zA-Z0-9_-]+$`) + +// NetworkManager handles bridge resolution and TAP management. +type NetworkManager struct { + defaultBridge string + bridgeMap map[string]string // libvirt network name -> local bridge name + dhcpMode string + logger *slog.Logger +} + +// NewNetworkManager creates a network manager with the given configuration. +func NewNetworkManager(defaultBridge string, bridgeMap map[string]string, dhcpMode string, logger *slog.Logger) *NetworkManager { + if logger == nil { + logger = slog.Default() + } + if bridgeMap == nil { + bridgeMap = make(map[string]string) + } + return &NetworkManager{ + defaultBridge: defaultBridge, + bridgeMap: bridgeMap, + dhcpMode: dhcpMode, + logger: logger.With("component", "network"), + } +} + +// ResolveBridge determines which bridge to attach a sandbox's TAP to. +// Priority: explicit request > source VM's network > default bridge. +func (n *NetworkManager) ResolveBridge(ctx context.Context, sourceVMName, requestedNetwork string) (string, error) { + var bridge string + + // 1. If explicit network requested, look up in bridge_map + if requestedNetwork != "" { + if b, ok := n.bridgeMap[requestedNetwork]; ok { + n.logger.Info("resolved bridge from requested network", "network", requestedNetwork, "bridge", b) + bridge = b + } else if strings.HasPrefix(requestedNetwork, "br") || strings.HasPrefix(requestedNetwork, "virbr") { + // If the requested network looks like a bridge name (not a libvirt network), use it directly + bridge = requestedNetwork + } else { + return "", fmt.Errorf("unknown network %q: not found in bridge_map", requestedNetwork) + } + } + + // 2. If source VM specified, query libvirt for its network + if bridge == "" && sourceVMName != "" { + b, err := n.resolveFromSourceVM(ctx, sourceVMName) + if err == nil && b != "" { + n.logger.Info("resolved bridge from source VM", "source_vm", sourceVMName, "bridge", b) + bridge = b + } + if err != nil { + n.logger.Warn("failed to resolve bridge from source VM, using default", + "source_vm", sourceVMName, "error", err) + } + } + + // 3. Fall back to default bridge + if bridge == "" { + bridge = n.defaultBridge + n.logger.Info("using default bridge", "bridge", bridge) + } + + // Validate bridge name contains only safe characters. + if !validBridge.MatchString(bridge) { + return "", fmt.Errorf("invalid bridge name %q: must match [a-zA-Z0-9_-]+", bridge) + } + + return bridge, nil +} + +// resolveFromSourceVM queries virsh to determine which bridge a source VM is connected to. +func (n *NetworkManager) resolveFromSourceVM(ctx context.Context, sourceVMName string) (string, error) { + // virsh domiflist returns network/bridge info + cmd := exec.CommandContext(ctx, "virsh", "domiflist", sourceVMName) + output, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("virsh domiflist: %w", err) + } + + // Parse output to find network name or bridge + // Format: Interface Type Source Model MAC + lines := strings.Split(string(output), "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "---") || strings.HasPrefix(line, "Interface") { + continue + } + + fields := strings.Fields(line) + if len(fields) < 3 { + continue + } + + ifType := fields[1] + source := fields[2] + + // If type is "bridge", source is the bridge name directly + if ifType == "bridge" { + return source, nil + } + + // If type is "network", source is a libvirt network name + if ifType == "network" { + // Check bridge_map first + if bridge, ok := n.bridgeMap[source]; ok { + return bridge, nil + } + + // Resolve via virsh net-info + return n.resolveNetworkToBridge(ctx, source) + } + } + + return "", fmt.Errorf("no network interface found for VM %s", sourceVMName) +} + +// resolveNetworkToBridge uses virsh net-info to find the bridge for a libvirt network. +func (n *NetworkManager) resolveNetworkToBridge(ctx context.Context, networkName string) (string, error) { + cmd := exec.CommandContext(ctx, "virsh", "net-info", networkName) + output, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("virsh net-info %s: %w", networkName, err) + } + + // Parse "Bridge: virbr0" from output + lines := strings.Split(string(output), "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "Bridge:") { + parts := strings.SplitN(line, ":", 2) + if len(parts) == 2 { + return strings.TrimSpace(parts[1]), nil + } + } + } + + return "", fmt.Errorf("no bridge found for network %s", networkName) +} + +// DHCPMode returns the configured DHCP mode. +func (n *NetworkManager) DHCPMode() string { + return n.dhcpMode +} diff --git a/fluid-daemon/internal/network/bridge_test.go b/fluid-daemon/internal/network/bridge_test.go new file mode 100644 index 00000000..f8994ab3 --- /dev/null +++ b/fluid-daemon/internal/network/bridge_test.go @@ -0,0 +1,108 @@ +package network + +import ( + "context" + "log/slog" + "testing" +) + +func TestNetworkManager_ResolveBridge_Default(t *testing.T) { + nm := NewNetworkManager("br0", nil, "dnsmasq", slog.Default()) + + bridge, err := nm.ResolveBridge(context.Background(), "", "") + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if bridge != "br0" { + t.Fatalf("expected %q, got %q", "br0", bridge) + } +} + +func TestNetworkManager_ResolveBridge_FromMap(t *testing.T) { + bridgeMap := map[string]string{ + "default": "virbr0", + "mgmt": "br-mgmt", + } + nm := NewNetworkManager("br0", bridgeMap, "dnsmasq", slog.Default()) + + tests := []struct { + name string + requestedNetwork string + wantBridge string + }{ + {"default network", "default", "virbr0"}, + {"mgmt network", "mgmt", "br-mgmt"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + bridge, err := nm.ResolveBridge(context.Background(), "", tt.requestedNetwork) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if bridge != tt.wantBridge { + t.Fatalf("expected %q, got %q", tt.wantBridge, bridge) + } + }) + } +} + +func TestNetworkManager_ResolveBridge_Explicit(t *testing.T) { + nm := NewNetworkManager("br0", nil, "dnsmasq", slog.Default()) + + tests := []struct { + name string + requestedNetwork string + }{ + {"virbr prefix", "virbr5"}, + {"br prefix", "br-custom"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + bridge, err := nm.ResolveBridge(context.Background(), "", tt.requestedNetwork) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if bridge != tt.requestedNetwork { + t.Fatalf("expected %q, got %q", tt.requestedNetwork, bridge) + } + }) + } +} + +func TestNetworkManager_ResolveBridge_UnknownNetwork(t *testing.T) { + bridgeMap := map[string]string{ + "default": "virbr0", + } + nm := NewNetworkManager("br0", bridgeMap, "dnsmasq", slog.Default()) + + _, err := nm.ResolveBridge(context.Background(), "", "nonexistent") + if err == nil { + t.Fatal("expected error for unknown network, got nil") + } + + want := `unknown network "nonexistent": not found in bridge_map` + if err.Error() != want { + t.Fatalf("expected error %q, got %q", want, err.Error()) + } +} + +func TestNetworkManager_DHCPMode(t *testing.T) { + tests := []struct { + mode string + }{ + {"dnsmasq"}, + {"static"}, + {""}, + } + + for _, tt := range tests { + t.Run(tt.mode, func(t *testing.T) { + nm := NewNetworkManager("br0", nil, tt.mode, slog.Default()) + if got := nm.DHCPMode(); got != tt.mode { + t.Fatalf("expected %q, got %q", tt.mode, got) + } + }) + } +} diff --git a/fluid-daemon/internal/network/ip.go b/fluid-daemon/internal/network/ip.go new file mode 100644 index 00000000..d09d7ac3 --- /dev/null +++ b/fluid-daemon/internal/network/ip.go @@ -0,0 +1,162 @@ +package network + +import ( + "context" + "fmt" + "log/slog" + "os" + "os/exec" + "path/filepath" + "strings" + "time" +) + +// DiscoverIP discovers the IP address assigned to a MAC address on a given bridge. +// It uses the configured DHCP mode to determine the discovery strategy. +func (n *NetworkManager) DiscoverIP(ctx context.Context, macAddress, bridge string, timeout time.Duration) (string, error) { + switch n.dhcpMode { + case "libvirt": + return discoverIPLibvirt(ctx, macAddress, bridge, timeout, n.logger) + case "arp": + return discoverIPARP(ctx, macAddress, bridge, timeout, n.logger) + case "dnsmasq": + return discoverIPDnsmasq(ctx, macAddress, bridge, timeout, n.logger) + default: + return discoverIPARP(ctx, macAddress, bridge, timeout, n.logger) + } +} + +// discoverIPLibvirt reads libvirt dnsmasq lease files to find IP for a MAC. +func discoverIPLibvirt(ctx context.Context, macAddress, bridge string, timeout time.Duration, logger *slog.Logger) (string, error) { + mac := strings.ToLower(macAddress) + deadline := time.Now().Add(timeout) + + // Sanitize bridge name to prevent path traversal. + safeBridge := filepath.Base(bridge) + + // Try common lease file locations + leaseFiles := []string{ + "/var/lib/libvirt/dnsmasq/default.leases", + "/var/lib/libvirt/dnsmasq/virbr0.leases", + fmt.Sprintf("/var/lib/libvirt/dnsmasq/%s.leases", safeBridge), + } + + for time.Now().Before(deadline) { + select { + case <-ctx.Done(): + return "", ctx.Err() + default: + } + + for _, leaseFile := range leaseFiles { + data, err := os.ReadFile(leaseFile) + if err != nil { + continue + } + + // Lease file format: timestamp MAC IP hostname client-id + for _, line := range strings.Split(string(data), "\n") { + fields := strings.Fields(line) + if len(fields) >= 3 && strings.EqualFold(fields[1], mac) { + logger.Info("discovered IP via libvirt lease", "mac", macAddress, "ip", fields[2]) + return fields[2], nil + } + } + } + + time.Sleep(2 * time.Second) + } + + return "", fmt.Errorf("IP discovery timed out for MAC %s (libvirt mode)", macAddress) +} + +// discoverIPARP polls the ARP table to find IP for a MAC. +func discoverIPARP(ctx context.Context, macAddress, bridge string, timeout time.Duration, logger *slog.Logger) (string, error) { + mac := strings.ToLower(macAddress) + deadline := time.Now().Add(timeout) + + for time.Now().Before(deadline) { + select { + case <-ctx.Done(): + return "", ctx.Err() + default: + } + + // Try ip neigh first + cmd := exec.CommandContext(ctx, "ip", "neigh", "show", "dev", bridge) + output, err := cmd.Output() + if err == nil { + // Format: IP lladdr MAC STATE + for _, line := range strings.Split(string(output), "\n") { + line = strings.TrimSpace(line) + if line == "" { + continue + } + fields := strings.Fields(line) + // Look for the MAC address in the line + for i, f := range fields { + if strings.EqualFold(f, mac) && i > 0 { + ip := fields[0] + logger.Info("discovered IP via ARP", "mac", macAddress, "ip", ip) + return ip, nil + } + } + } + } + + // Fallback: arp -an + cmd = exec.CommandContext(ctx, "arp", "-an") + output, err = cmd.Output() + if err == nil { + for _, line := range strings.Split(string(output), "\n") { + if strings.Contains(strings.ToLower(line), mac) { + // Format: ? (IP) at MAC [ether] on interface + start := strings.Index(line, "(") + end := strings.Index(line, ")") + if start >= 0 && end > start { + ip := line[start+1 : end] + logger.Info("discovered IP via arp", "mac", macAddress, "ip", ip) + return ip, nil + } + } + } + } + + time.Sleep(2 * time.Second) + } + + return "", fmt.Errorf("IP discovery timed out for MAC %s (arp mode)", macAddress) +} + +// discoverIPDnsmasq reads local dnsmasq lease file for IP discovery. +func discoverIPDnsmasq(ctx context.Context, macAddress, bridge string, timeout time.Duration, logger *slog.Logger) (string, error) { + mac := strings.ToLower(macAddress) + deadline := time.Now().Add(timeout) + + // Sanitize bridge name to prevent path traversal. + safeBridge := filepath.Base(bridge) + leaseFile := fmt.Sprintf("/var/lib/fluid/dnsmasq/%s.leases", safeBridge) + + for time.Now().Before(deadline) { + select { + case <-ctx.Done(): + return "", ctx.Err() + default: + } + + data, err := os.ReadFile(leaseFile) + if err == nil { + for _, line := range strings.Split(string(data), "\n") { + fields := strings.Fields(line) + if len(fields) >= 3 && strings.EqualFold(fields[1], mac) { + logger.Info("discovered IP via dnsmasq lease", "mac", macAddress, "ip", fields[2]) + return fields[2], nil + } + } + } + + time.Sleep(2 * time.Second) + } + + return "", fmt.Errorf("IP discovery timed out for MAC %s (dnsmasq mode)", macAddress) +} diff --git a/fluid-daemon/internal/network/tap.go b/fluid-daemon/internal/network/tap.go new file mode 100644 index 00000000..3302f4f3 --- /dev/null +++ b/fluid-daemon/internal/network/tap.go @@ -0,0 +1,62 @@ +// Package network manages TAP devices and bridge networking for microVM sandboxes. +package network + +import ( + "context" + "fmt" + "log/slog" + "os/exec" + "strings" +) + +// CreateTAP creates a TAP device and attaches it to a bridge. +// TAP names use format "fluid-" where shortID is the first 6 chars of sandbox ID. +func CreateTAP(ctx context.Context, tapName, bridge string, logger *slog.Logger) error { + // 1. Create TAP device + if err := runCmd(ctx, "ip", "tuntap", "add", "dev", tapName, "mode", "tap"); err != nil { + return fmt.Errorf("create tap %s: %w", tapName, err) + } + + // 2. Attach to bridge + if err := runCmd(ctx, "ip", "link", "set", tapName, "master", bridge); err != nil { + // Cleanup on failure + _ = DestroyTAP(ctx, tapName) + return fmt.Errorf("attach tap %s to bridge %s: %w", tapName, bridge, err) + } + + // 3. Bring up + if err := runCmd(ctx, "ip", "link", "set", tapName, "up"); err != nil { + _ = DestroyTAP(ctx, tapName) + return fmt.Errorf("bring up tap %s: %w", tapName, err) + } + + if logger != nil { + logger.Info("TAP created", "tap", tapName, "bridge", bridge) + } + return nil +} + +// DestroyTAP removes a TAP device. +func DestroyTAP(ctx context.Context, tapName string) error { + return runCmd(ctx, "ip", "link", "delete", tapName) +} + +// TAPName generates a TAP device name from a sandbox ID. +// Uses the first 9 characters of the sandbox ID (after any prefix). +// Stays within Linux 15-char interface name limit: "fl-" + 9 = 12. +func TAPName(sandboxID string) string { + id := strings.TrimPrefix(sandboxID, "SBX-") + if len(id) > 9 { + id = id[:9] + } + return "fl-" + strings.ToLower(id) +} + +func runCmd(ctx context.Context, name string, args ...string) error { + cmd := exec.CommandContext(ctx, name, args...) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("%s %s: %w: %s", name, strings.Join(args, " "), err, strings.TrimSpace(string(output))) + } + return nil +} diff --git a/fluid-daemon/internal/network/tap_test.go b/fluid-daemon/internal/network/tap_test.go new file mode 100644 index 00000000..08a12bbd --- /dev/null +++ b/fluid-daemon/internal/network/tap_test.go @@ -0,0 +1,24 @@ +package network + +import ( + "testing" +) + +func TestTAPName(t *testing.T) { + tests := []struct { + sandboxID string + want string + }{ + {"SBX-abc123def", "fl-abc123def"}, + {"SBX-xyz", "fl-xyz"}, + {"abc123def456", "fl-abc123def"}, + {"short", "fl-short"}, + } + + for _, tt := range tests { + got := TAPName(tt.sandboxID) + if got != tt.want { + t.Errorf("TAPName(%q) = %q, want %q", tt.sandboxID, got, tt.want) + } + } +} diff --git a/fluid/internal/proxmox/client.go b/fluid-daemon/internal/provider/lxc/client.go similarity index 64% rename from fluid/internal/proxmox/client.go rename to fluid-daemon/internal/provider/lxc/client.go index 4ef5c9c6..90986b3a 100644 --- a/fluid/internal/proxmox/client.go +++ b/fluid-daemon/internal/provider/lxc/client.go @@ -1,4 +1,4 @@ -package proxmox +package lxc import ( "context" @@ -15,8 +15,8 @@ import ( "time" ) -// Client is a pure Go HTTP client for the Proxmox VE API. -// Authentication uses API tokens (no session/CSRF needed). +// Client is an HTTP client for the Proxmox VE LXC API. +// Authentication uses API tokens. type Client struct { baseURL string tokenID string @@ -27,7 +27,7 @@ type Client struct { maxRetries int } -// NewClient creates a new Proxmox API client. +// NewClient creates a new Proxmox LXC API client. func NewClient(cfg Config, logger *slog.Logger) *Client { if logger == nil { logger = slog.Default() @@ -38,7 +38,7 @@ func NewClient(cfg Config, logger *slog.Logger) *Client { }, } if !cfg.VerifySSL { - logger.Warn("TLS certificate verification is disabled - connections are vulnerable to MITM attacks") + logger.Warn("TLS certificate verification is disabled") } timeout := cfg.Timeout if timeout == 0 { @@ -59,8 +59,6 @@ func NewClient(cfg Config, logger *slog.Logger) *Client { } // do executes an HTTP request against the Proxmox API with retry logic. -// Retries on 5xx status codes and transient network errors with exponential backoff. -// Does not retry on 4xx errors, context cancellation, or context deadline exceeded. func (c *Client) do(ctx context.Context, method, path string, body url.Values) (json.RawMessage, error) { apiURL := fmt.Sprintf("%s/api2/json%s", c.baseURL, path) @@ -85,7 +83,6 @@ func (c *Client) do(ctx context.Context, method, path string, body url.Values) ( resp, err := c.httpClient.Do(req) if err != nil { - // Do not retry on context cancellation or deadline exceeded if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return nil, fmt.Errorf("request %s %s: %w", method, path, err) } @@ -109,7 +106,6 @@ func (c *Client) do(ctx context.Context, method, path string, body url.Values) ( return nil, fmt.Errorf("read response: %w", err) } - // Retry on 5xx status codes if resp.StatusCode >= 500 { lastErr = fmt.Errorf("API %s %s returned %d: %s", method, path, resp.StatusCode, string(respBody)) if attempt < c.maxRetries { @@ -125,12 +121,10 @@ func (c *Client) do(ctx context.Context, method, path string, body url.Values) ( continue } - // Non-retryable HTTP errors (4xx, etc.) if resp.StatusCode < 200 || resp.StatusCode >= 300 { return nil, fmt.Errorf("API %s %s returned %d: %s", method, path, resp.StatusCode, string(respBody)) } - // Parse the outer response envelope var envelope struct { Data json.RawMessage `json:"data"` Errors json.RawMessage `json:"errors,omitempty"` @@ -145,57 +139,64 @@ func (c *Client) do(ctx context.Context, method, path string, body url.Values) ( return nil, lastErr } -// ListVMs returns all QEMU VMs on the configured node. -func (c *Client) ListVMs(ctx context.Context) ([]VMListEntry, error) { - path := fmt.Sprintf("/nodes/%s/qemu", c.node) +// ListCTs returns all LXC containers on the configured node. +func (c *Client) ListCTs(ctx context.Context) ([]CTListEntry, error) { + path := fmt.Sprintf("/nodes/%s/lxc", c.node) data, err := c.do(ctx, http.MethodGet, path, nil) if err != nil { return nil, err } - var vms []VMListEntry - if err := json.Unmarshal(data, &vms); err != nil { - return nil, fmt.Errorf("unmarshal VM list: %w", err) + var cts []CTListEntry + if err := json.Unmarshal(data, &cts); err != nil { + return nil, fmt.Errorf("unmarshal CT list: %w", err) } - return vms, nil + return cts, nil } -// GetVMStatus returns the status of a VM by VMID. -func (c *Client) GetVMStatus(ctx context.Context, vmid int) (*VMStatus, error) { - path := fmt.Sprintf("/nodes/%s/qemu/%d/status/current", c.node, vmid) +// GetCTStatus returns the status of a container by VMID. +func (c *Client) GetCTStatus(ctx context.Context, vmid int) (*CTStatus, error) { + path := fmt.Sprintf("/nodes/%s/lxc/%d/status/current", c.node, vmid) data, err := c.do(ctx, http.MethodGet, path, nil) if err != nil { return nil, err } - var status VMStatus + var status CTStatus if err := json.Unmarshal(data, &status); err != nil { - return nil, fmt.Errorf("unmarshal VM status: %w", err) + return nil, fmt.Errorf("unmarshal CT status: %w", err) } return &status, nil } -// GetVMConfig returns the configuration of a VM by VMID. -func (c *Client) GetVMConfig(ctx context.Context, vmid int) (*VMConfig, error) { - path := fmt.Sprintf("/nodes/%s/qemu/%d/config", c.node, vmid) +// GetCTConfig returns the configuration of a container by VMID. +func (c *Client) GetCTConfig(ctx context.Context, vmid int) (*CTConfig, error) { + path := fmt.Sprintf("/nodes/%s/lxc/%d/config", c.node, vmid) data, err := c.do(ctx, http.MethodGet, path, nil) if err != nil { return nil, err } - var cfg VMConfig + var cfg CTConfig if err := json.Unmarshal(data, &cfg); err != nil { - return nil, fmt.Errorf("unmarshal VM config: %w", err) + return nil, fmt.Errorf("unmarshal CT config: %w", err) } return &cfg, nil } -// CloneVM clones a VM. Returns the UPID of the clone task. -func (c *Client) CloneVM(ctx context.Context, sourceVMID, newVMID int, name string, full bool) (string, error) { - path := fmt.Sprintf("/nodes/%s/qemu/%d/clone", c.node, sourceVMID) +// SetCTConfig updates container configuration parameters. +func (c *Client) SetCTConfig(ctx context.Context, vmid int, params url.Values) error { + path := fmt.Sprintf("/nodes/%s/lxc/%d/config", c.node, vmid) + _, err := c.do(ctx, http.MethodPut, path, params) + return err +} + +// CloneCT clones a container. Returns the UPID of the clone task. +func (c *Client) CloneCT(ctx context.Context, sourceVMID, newVMID int, hostname string, full bool) (string, error) { + path := fmt.Sprintf("/nodes/%s/lxc/%d/clone", c.node, sourceVMID) params := url.Values{ - "newid": {fmt.Sprintf("%d", newVMID)}, - "name": {name}, + "newid": {fmt.Sprintf("%d", newVMID)}, + "hostname": {hostname}, } if full { params.Set("full", "1") @@ -206,7 +207,6 @@ func (c *Client) CloneVM(ctx context.Context, sourceVMID, newVMID int, name stri return "", err } - // Response is a UPID string var upid string if err := json.Unmarshal(data, &upid); err != nil { return "", fmt.Errorf("unmarshal UPID: %w", err) @@ -214,16 +214,9 @@ func (c *Client) CloneVM(ctx context.Context, sourceVMID, newVMID int, name stri return upid, nil } -// SetVMConfig updates VM configuration parameters. -func (c *Client) SetVMConfig(ctx context.Context, vmid int, params url.Values) error { - path := fmt.Sprintf("/nodes/%s/qemu/%d/config", c.node, vmid) - _, err := c.do(ctx, http.MethodPut, path, params) - return err -} - -// StartVM starts a VM. Returns the UPID of the start task. -func (c *Client) StartVM(ctx context.Context, vmid int) (string, error) { - path := fmt.Sprintf("/nodes/%s/qemu/%d/status/start", c.node, vmid) +// StartCT starts a container. Returns the UPID. +func (c *Client) StartCT(ctx context.Context, vmid int) (string, error) { + path := fmt.Sprintf("/nodes/%s/lxc/%d/status/start", c.node, vmid) data, err := c.do(ctx, http.MethodPost, path, nil) if err != nil { return "", err @@ -236,9 +229,9 @@ func (c *Client) StartVM(ctx context.Context, vmid int) (string, error) { return upid, nil } -// StopVM stops a VM (hard stop). Returns the UPID. -func (c *Client) StopVM(ctx context.Context, vmid int) (string, error) { - path := fmt.Sprintf("/nodes/%s/qemu/%d/status/stop", c.node, vmid) +// StopCT force-stops a container. Returns the UPID. +func (c *Client) StopCT(ctx context.Context, vmid int) (string, error) { + path := fmt.Sprintf("/nodes/%s/lxc/%d/status/stop", c.node, vmid) data, err := c.do(ctx, http.MethodPost, path, nil) if err != nil { return "", err @@ -251,9 +244,9 @@ func (c *Client) StopVM(ctx context.Context, vmid int) (string, error) { return upid, nil } -// ShutdownVM gracefully shuts down a VM. Returns the UPID. -func (c *Client) ShutdownVM(ctx context.Context, vmid int) (string, error) { - path := fmt.Sprintf("/nodes/%s/qemu/%d/status/shutdown", c.node, vmid) +// ShutdownCT gracefully shuts down a container. Returns the UPID. +func (c *Client) ShutdownCT(ctx context.Context, vmid int) (string, error) { + path := fmt.Sprintf("/nodes/%s/lxc/%d/status/shutdown", c.node, vmid) data, err := c.do(ctx, http.MethodPost, path, nil) if err != nil { return "", err @@ -266,12 +259,12 @@ func (c *Client) ShutdownVM(ctx context.Context, vmid int) (string, error) { return upid, nil } -// DeleteVM deletes a VM and all its resources. Returns the UPID. -func (c *Client) DeleteVM(ctx context.Context, vmid int) (string, error) { - path := fmt.Sprintf("/nodes/%s/qemu/%d", c.node, vmid) +// DeleteCT deletes a container with purge. Returns the UPID. +func (c *Client) DeleteCT(ctx context.Context, vmid int) (string, error) { + path := fmt.Sprintf("/nodes/%s/lxc/%d", c.node, vmid) params := url.Values{ - "purge": {"1"}, - "destroy-unreferenced-disks": {"1"}, + "purge": {"1"}, + "force": {"1"}, } data, err := c.do(ctx, http.MethodDelete, path+"?"+params.Encode(), nil) if err != nil { @@ -285,15 +278,27 @@ func (c *Client) DeleteVM(ctx context.Context, vmid int) (string, error) { return upid, nil } -// CreateSnapshot creates a snapshot of a VM. Returns the UPID (or nil for sync). -func (c *Client) CreateSnapshot(ctx context.Context, vmid int, name, description string) (string, error) { - path := fmt.Sprintf("/nodes/%s/qemu/%d/snapshot", c.node, vmid) +// GetCTInterfaces returns network interfaces of a container. +func (c *Client) GetCTInterfaces(ctx context.Context, vmid int) ([]CTInterface, error) { + path := fmt.Sprintf("/nodes/%s/lxc/%d/interfaces", c.node, vmid) + data, err := c.do(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, err + } + + var ifaces []CTInterface + if err := json.Unmarshal(data, &ifaces); err != nil { + return nil, fmt.Errorf("unmarshal interfaces: %w", err) + } + return ifaces, nil +} + +// CreateSnapshot creates a snapshot of a container. +func (c *Client) CreateSnapshot(ctx context.Context, vmid int, name string) (string, error) { + path := fmt.Sprintf("/nodes/%s/lxc/%d/snapshot", c.node, vmid) params := url.Values{ "snapname": {name}, } - if description != "" { - params.Set("description", description) - } data, err := c.do(ctx, http.MethodPost, path, params) if err != nil { @@ -302,35 +307,11 @@ func (c *Client) CreateSnapshot(ctx context.Context, vmid int, name, description var upid string if err := json.Unmarshal(data, &upid); err != nil { - // Snapshot may return null data on sync completion return "", nil } return upid, nil } -// GetGuestAgentInterfaces returns network interfaces via the QEMU guest agent. -func (c *Client) GetGuestAgentInterfaces(ctx context.Context, vmid int) ([]NetworkInterface, error) { - path := fmt.Sprintf("/nodes/%s/qemu/%d/agent/network-get-interfaces", c.node, vmid) - data, err := c.do(ctx, http.MethodGet, path, nil) - if err != nil { - return nil, err - } - - // Proxmox wraps the result in a "result" field - var result struct { - Result []NetworkInterface `json:"result"` - } - if err := json.Unmarshal(data, &result); err != nil { - // Try direct unmarshal - var ifaces []NetworkInterface - if err2 := json.Unmarshal(data, &ifaces); err2 != nil { - return nil, fmt.Errorf("unmarshal interfaces: %w", err) - } - return ifaces, nil - } - return result.Result, nil -} - // GetNodeStatus returns the resource status of the configured node. func (c *Client) GetNodeStatus(ctx context.Context) (*NodeStatus, error) { path := fmt.Sprintf("/nodes/%s/status", c.node) @@ -362,7 +343,6 @@ func (c *Client) GetTaskStatus(ctx context.Context, upid string) (*TaskStatus, e } // WaitForTask polls a task until it completes or the context is cancelled. -// Returns an error if the task fails. func (c *Client) WaitForTask(ctx context.Context, upid string) error { if upid == "" { return nil @@ -392,14 +372,14 @@ func (c *Client) WaitForTask(ctx context.Context, upid string) error { // NextVMID finds the next available VMID in the configured range. func (c *Client) NextVMID(ctx context.Context, start, end int) (int, error) { - vms, err := c.ListVMs(ctx) + cts, err := c.ListCTs(ctx) if err != nil { - return 0, fmt.Errorf("list VMs for VMID allocation: %w", err) + return 0, fmt.Errorf("list CTs for VMID allocation: %w", err) } - used := make(map[int]bool, len(vms)) - for _, vm := range vms { - used[vm.VMID] = true + used := make(map[int]bool, len(cts)) + for _, ct := range cts { + used[ct.VMID] = true } for id := start; id <= end; id++ { @@ -409,12 +389,3 @@ func (c *Client) NextVMID(ctx context.Context, start, end int) (int, error) { } return 0, fmt.Errorf("no available VMID in range %d-%d", start, end) } - -// ResizeVM changes the VM's CPU and memory configuration. -func (c *Client) ResizeVM(ctx context.Context, vmid, cores, memoryMB int) error { - params := url.Values{ - "cores": {fmt.Sprintf("%d", cores)}, - "memory": {fmt.Sprintf("%d", memoryMB)}, - } - return c.SetVMConfig(ctx, vmid, params) -} diff --git a/fluid-daemon/internal/provider/lxc/client_test.go b/fluid-daemon/internal/provider/lxc/client_test.go new file mode 100644 index 00000000..0d6bcb0f --- /dev/null +++ b/fluid-daemon/internal/provider/lxc/client_test.go @@ -0,0 +1,562 @@ +package lxc + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" +) + +// proxmoxResponse wraps data in the Proxmox API envelope. +func proxmoxResponse(data any) []byte { + d, _ := json.Marshal(data) + resp := struct { + Data json.RawMessage `json:"data"` + }{Data: d} + b, _ := json.Marshal(resp) + return b +} + +func testClient(t *testing.T, handler http.Handler) (*Client, *httptest.Server) { + t.Helper() + srv := httptest.NewTLSServer(handler) + t.Cleanup(srv.Close) + + cfg := Config{ + Host: srv.URL, + TokenID: "test@pam!testtoken", + Secret: "test-secret", + Node: "pve", + VerifySSL: false, + Timeout: 10 * time.Second, + } + client := NewClient(cfg, nil) + // Override httpClient to use the test server's TLS client + client.httpClient = srv.Client() + client.httpClient.Timeout = 10 * time.Second + return client, srv +} + +func TestListCTs(t *testing.T) { + cts := []CTListEntry{ + {VMID: 100, Name: "web-server", Status: "running"}, + {VMID: 101, Name: "db-server", Status: "stopped", Template: 1}, + } + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + t.Errorf("expected GET, got %s", r.Method) + } + if !strings.HasSuffix(r.URL.Path, "/nodes/pve/lxc") { + t.Errorf("unexpected path: %s", r.URL.Path) + } + // Check auth header + auth := r.Header.Get("Authorization") + if !strings.Contains(auth, "PVEAPIToken=") { + t.Errorf("missing API token in Authorization header: %s", auth) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(cts)) + })) + + result, err := client.ListCTs(context.Background()) + if err != nil { + t.Fatalf("ListCTs() error: %v", err) + } + if len(result) != 2 { + t.Fatalf("expected 2 CTs, got %d", len(result)) + } + if result[0].Name != "web-server" { + t.Errorf("result[0].Name = %q, want %q", result[0].Name, "web-server") + } + if result[1].Template != 1 { + t.Errorf("result[1].Template = %d, want 1", result[1].Template) + } +} + +func TestGetCTStatus(t *testing.T) { + status := CTStatus{ + VMID: 100, + Name: "web-server", + Status: "running", + CPU: 0.15, + MaxMem: 2147483648, + Mem: 536870912, + } + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasSuffix(r.URL.Path, "/nodes/pve/lxc/100/status/current") { + t.Errorf("unexpected path: %s", r.URL.Path) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(status)) + })) + + result, err := client.GetCTStatus(context.Background(), 100) + if err != nil { + t.Fatalf("GetCTStatus() error: %v", err) + } + if result.Status != "running" { + t.Errorf("Status = %q, want %q", result.Status, "running") + } + if result.Name != "web-server" { + t.Errorf("Name = %q, want %q", result.Name, "web-server") + } +} + +func TestGetCTConfig(t *testing.T) { + cfg := CTConfig{ + Hostname: "test-ct", + Memory: 2048, + Cores: 4, + Net0: "name=eth0,bridge=vmbr0,ip=dhcp", + } + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasSuffix(r.URL.Path, "/nodes/pve/lxc/100/config") { + t.Errorf("unexpected path: %s", r.URL.Path) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(cfg)) + })) + + result, err := client.GetCTConfig(context.Background(), 100) + if err != nil { + t.Fatalf("GetCTConfig() error: %v", err) + } + if result.Cores != 4 { + t.Errorf("Cores = %d, want 4", result.Cores) + } + if result.Net0 != cfg.Net0 { + t.Errorf("Net0 = %q, want %q", result.Net0, cfg.Net0) + } +} + +func TestCloneCT(t *testing.T) { + expectedUPID := "UPID:pve:000F1234:00B3C4D5:12345678:vzclone:100:user@pam:" + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Errorf("expected POST, got %s", r.Method) + } + if !strings.HasSuffix(r.URL.Path, "/nodes/pve/lxc/100/clone") { + t.Errorf("unexpected path: %s", r.URL.Path) + } + // Check body params + if err := r.ParseForm(); err != nil { + t.Fatalf("parse form: %v", err) + } + if r.FormValue("newid") != "9001" { + t.Errorf("newid = %q, want 9001", r.FormValue("newid")) + } + if r.FormValue("hostname") != "sbx-test" { + t.Errorf("hostname = %q, want sbx-test", r.FormValue("hostname")) + } + if r.FormValue("full") != "1" { + t.Errorf("full = %q, want 1", r.FormValue("full")) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(expectedUPID)) + })) + + upid, err := client.CloneCT(context.Background(), 100, 9001, "sbx-test", true) + if err != nil { + t.Fatalf("CloneCT() error: %v", err) + } + if upid != expectedUPID { + t.Errorf("UPID = %q, want %q", upid, expectedUPID) + } +} + +func TestStartCT(t *testing.T) { + expectedUPID := "UPID:pve:0001:0002:12345678:vzstart:100:user@pam:" + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Errorf("expected POST, got %s", r.Method) + } + if !strings.HasSuffix(r.URL.Path, "/nodes/pve/lxc/100/status/start") { + t.Errorf("unexpected path: %s", r.URL.Path) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(expectedUPID)) + })) + + upid, err := client.StartCT(context.Background(), 100) + if err != nil { + t.Fatalf("StartCT() error: %v", err) + } + if upid != expectedUPID { + t.Errorf("UPID = %q, want %q", upid, expectedUPID) + } +} + +func TestStopCT(t *testing.T) { + expectedUPID := "UPID:pve:0001:0002:12345678:vzstop:100:user@pam:" + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasSuffix(r.URL.Path, "/nodes/pve/lxc/100/status/stop") { + t.Errorf("unexpected path: %s", r.URL.Path) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(expectedUPID)) + })) + + upid, err := client.StopCT(context.Background(), 100) + if err != nil { + t.Fatalf("StopCT() error: %v", err) + } + if upid != expectedUPID { + t.Errorf("UPID = %q, want %q", upid, expectedUPID) + } +} + +func TestShutdownCT(t *testing.T) { + expectedUPID := "UPID:pve:0001:0002:12345678:vzshutdown:100:user@pam:" + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasSuffix(r.URL.Path, "/nodes/pve/lxc/100/status/shutdown") { + t.Errorf("unexpected path: %s", r.URL.Path) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(expectedUPID)) + })) + + upid, err := client.ShutdownCT(context.Background(), 100) + if err != nil { + t.Fatalf("ShutdownCT() error: %v", err) + } + if upid != expectedUPID { + t.Errorf("UPID = %q, want %q", upid, expectedUPID) + } +} + +func TestDeleteCT(t *testing.T) { + expectedUPID := "UPID:pve:0001:0002:12345678:vzdel:100:user@pam:" + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodDelete { + t.Errorf("expected DELETE, got %s", r.Method) + } + if !strings.Contains(r.URL.Path, "/nodes/pve/lxc/100") { + t.Errorf("unexpected path: %s", r.URL.Path) + } + // Check purge and force query params + if r.URL.Query().Get("purge") != "1" { + t.Errorf("expected purge=1 in query") + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(expectedUPID)) + })) + + upid, err := client.DeleteCT(context.Background(), 100) + if err != nil { + t.Fatalf("DeleteCT() error: %v", err) + } + if upid != expectedUPID { + t.Errorf("UPID = %q, want %q", upid, expectedUPID) + } +} + +func TestGetCTInterfaces(t *testing.T) { + ifaces := []CTInterface{ + {Name: "lo", HWAddr: "00:00:00:00:00:00", Inet: "127.0.0.1/8"}, + {Name: "eth0", HWAddr: "AA:BB:CC:DD:EE:FF", Inet: "10.0.0.5/24"}, + } + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasSuffix(r.URL.Path, "/nodes/pve/lxc/100/interfaces") { + t.Errorf("unexpected path: %s", r.URL.Path) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(ifaces)) + })) + + result, err := client.GetCTInterfaces(context.Background(), 100) + if err != nil { + t.Fatalf("GetCTInterfaces() error: %v", err) + } + if len(result) != 2 { + t.Fatalf("expected 2 interfaces, got %d", len(result)) + } + if result[1].Inet != "10.0.0.5/24" { + t.Errorf("result[1].Inet = %q, want %q", result[1].Inet, "10.0.0.5/24") + } +} + +func TestCreateSnapshot(t *testing.T) { + expectedUPID := "UPID:pve:0001:0002:12345678:vzsnapshot:100:user@pam:" + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + t.Errorf("expected POST, got %s", r.Method) + } + if !strings.HasSuffix(r.URL.Path, "/nodes/pve/lxc/100/snapshot") { + t.Errorf("unexpected path: %s", r.URL.Path) + } + if err := r.ParseForm(); err != nil { + t.Fatalf("parse form: %v", err) + } + if r.FormValue("snapname") != "snap-1" { + t.Errorf("snapname = %q, want snap-1", r.FormValue("snapname")) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(expectedUPID)) + })) + + upid, err := client.CreateSnapshot(context.Background(), 100, "snap-1") + if err != nil { + t.Fatalf("CreateSnapshot() error: %v", err) + } + if upid != expectedUPID { + t.Errorf("UPID = %q, want %q", upid, expectedUPID) + } +} + +func TestGetNodeStatus(t *testing.T) { + status := NodeStatus{ + CPU: 0.25, + MaxCPU: 8, + Memory: MemoryStatus{Total: 16 * 1024 * 1024 * 1024, Used: 4 * 1024 * 1024 * 1024, Free: 12 * 1024 * 1024 * 1024}, + RootFS: DiskStatus{Total: 100 * 1024 * 1024 * 1024, Used: 30 * 1024 * 1024 * 1024, Available: 70 * 1024 * 1024 * 1024}, + } + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasSuffix(r.URL.Path, "/nodes/pve/status") { + t.Errorf("unexpected path: %s", r.URL.Path) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(status)) + })) + + result, err := client.GetNodeStatus(context.Background()) + if err != nil { + t.Fatalf("GetNodeStatus() error: %v", err) + } + if result.MaxCPU != 8 { + t.Errorf("MaxCPU = %d, want 8", result.MaxCPU) + } + if result.Memory.Free != 12*1024*1024*1024 { + t.Errorf("Memory.Free = %d, unexpected", result.Memory.Free) + } +} + +func TestGetTaskStatus(t *testing.T) { + taskStatus := TaskStatus{ + Status: "stopped", + ExitStatus: "OK", + Type: "vzstart", + Node: "pve", + } + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.Contains(r.URL.Path, "/nodes/pve/tasks/") { + t.Errorf("unexpected path: %s", r.URL.Path) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(taskStatus)) + })) + + result, err := client.GetTaskStatus(context.Background(), "UPID:pve:test") + if err != nil { + t.Fatalf("GetTaskStatus() error: %v", err) + } + if result.Status != "stopped" { + t.Errorf("Status = %q, want stopped", result.Status) + } + if result.ExitStatus != "OK" { + t.Errorf("ExitStatus = %q, want OK", result.ExitStatus) + } +} + +func TestNextVMID(t *testing.T) { + cts := []CTListEntry{ + {VMID: 9000, Name: "ct-a"}, + {VMID: 9001, Name: "ct-b"}, + {VMID: 9003, Name: "ct-c"}, + } + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(cts)) + })) + + // Should skip 9000, 9001 and return 9002 + vmid, err := client.NextVMID(context.Background(), 9000, 9999) + if err != nil { + t.Fatalf("NextVMID() error: %v", err) + } + if vmid != 9002 { + t.Errorf("VMID = %d, want 9002", vmid) + } +} + +func TestNextVMID_RangeExhausted(t *testing.T) { + cts := []CTListEntry{ + {VMID: 100, Name: "ct-a"}, + {VMID: 101, Name: "ct-b"}, + {VMID: 102, Name: "ct-c"}, + } + + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(cts)) + })) + + _, err := client.NextVMID(context.Background(), 100, 102) + if err == nil { + t.Fatal("expected error for exhausted range") + } + if !strings.Contains(err.Error(), "no available VMID") { + t.Errorf("error = %q, want containing 'no available VMID'", err.Error()) + } +} + +func TestClient_HTTPError4xx(t *testing.T) { + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`{"data":null,"errors":{"vmid":"not found"}}`)) + })) + + _, err := client.GetCTStatus(context.Background(), 999) + if err == nil { + t.Fatal("expected error for 404 response") + } + if !strings.Contains(err.Error(), "404") { + t.Errorf("error = %q, want containing '404'", err.Error()) + } +} + +func TestClient_RetryOn500(t *testing.T) { + attempts := 0 + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + attempts++ + if attempts < 3 { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(`{"data":null}`)) + return + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(CTStatus{VMID: 100, Status: "running"})) + })) + // Override maxRetries to ensure we retry + client.maxRetries = 3 + + result, err := client.GetCTStatus(context.Background(), 100) + if err != nil { + t.Fatalf("expected retry to succeed, got error: %v", err) + } + if result.Status != "running" { + t.Errorf("Status = %q, want running", result.Status) + } + if attempts != 3 { + t.Errorf("attempts = %d, want 3", attempts) + } +} + +func TestClient_RetryExhausted(t *testing.T) { + attempts := 0 + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + attempts++ + w.WriteHeader(http.StatusInternalServerError) + _, _ = fmt.Fprintf(w, `{"data":null,"errors":"server error attempt %d"}`, attempts) + })) + client.maxRetries = 2 + + _, err := client.GetCTStatus(context.Background(), 100) + if err == nil { + t.Fatal("expected error after retries exhausted") + } + if attempts != 2 { + t.Errorf("attempts = %d, want 2", attempts) + } +} + +func TestClient_ContextCancellation(t *testing.T) { + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(5 * time.Second) + w.WriteHeader(http.StatusOK) + })) + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + _, err := client.ListCTs(ctx) + if err == nil { + t.Fatal("expected error from cancelled context") + } +} + +func TestWaitForTask_EmptyUPID(t *testing.T) { + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Fatal("should not make any requests for empty UPID") + })) + + err := client.WaitForTask(context.Background(), "") + if err != nil { + t.Fatalf("WaitForTask('') error: %v", err) + } +} + +func TestWaitForTask_FailedTask(t *testing.T) { + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(TaskStatus{ + Status: "stopped", + ExitStatus: "clone failed: disk error", + })) + })) + + err := client.WaitForTask(context.Background(), "UPID:pve:test") + if err == nil { + t.Fatal("expected error for failed task") + } + if !strings.Contains(err.Error(), "task failed") { + t.Errorf("error = %q, want containing 'task failed'", err.Error()) + } +} + +func TestSetCTConfig(t *testing.T) { + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPut { + t.Errorf("expected PUT, got %s", r.Method) + } + if !strings.HasSuffix(r.URL.Path, "/nodes/pve/lxc/100/config") { + t.Errorf("unexpected path: %s", r.URL.Path) + } + if err := r.ParseForm(); err != nil { + t.Fatalf("parse form: %v", err) + } + if r.FormValue("cores") != "4" { + t.Errorf("cores = %q, want 4", r.FormValue("cores")) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse(nil)) + })) + + params := map[string][]string{ + "cores": {"4"}, + } + err := client.SetCTConfig(context.Background(), 100, params) + if err != nil { + t.Fatalf("SetCTConfig() error: %v", err) + } +} + +func TestClient_AuthorizationHeader(t *testing.T) { + client, _ := testClient(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + auth := r.Header.Get("Authorization") + expected := "PVEAPIToken=test@pam!testtoken=test-secret" + if auth != expected { + t.Errorf("Authorization = %q, want %q", auth, expected) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write(proxmoxResponse([]CTListEntry{})) + })) + + _, _ = client.ListCTs(context.Background()) +} diff --git a/fluid-daemon/internal/provider/lxc/config.go b/fluid-daemon/internal/provider/lxc/config.go new file mode 100644 index 00000000..94b8c82c --- /dev/null +++ b/fluid-daemon/internal/provider/lxc/config.go @@ -0,0 +1,52 @@ +package lxc + +import ( + "fmt" + "time" +) + +// Config holds settings for connecting to Proxmox VE and managing LXC containers. +type Config struct { + Host string `yaml:"host"` // Base URL, e.g. "https://proxmox:8006" + TokenID string `yaml:"token_id"` // API token ID, e.g. "user@pam!fluid" + Secret string `yaml:"secret"` // API token secret + Node string `yaml:"node"` // Target node name, e.g. "pve" + Storage string `yaml:"storage"` // Storage for CT disks, e.g. "local-lvm" + Bridge string `yaml:"bridge"` // Network bridge, e.g. "vmbr0" + VMIDStart int `yaml:"vmid_start"` // Start of VMID range for sandboxes + VMIDEnd int `yaml:"vmid_end"` // End of VMID range for sandboxes + VerifySSL bool `yaml:"verify_ssl"` // Verify TLS certificates + Timeout time.Duration `yaml:"timeout"` // HTTP client timeout +} + +// Validate checks that required config fields are set and applies defaults. +func (c *Config) Validate() error { + if c.Host == "" { + return fmt.Errorf("lxc host is required") + } + if c.TokenID == "" { + return fmt.Errorf("lxc token_id is required") + } + if c.Secret == "" { + return fmt.Errorf("lxc secret is required") + } + if c.Node == "" { + return fmt.Errorf("lxc node is required") + } + if c.VMIDStart <= 0 { + c.VMIDStart = 9000 + } + if c.VMIDEnd <= 0 { + c.VMIDEnd = 9999 + } + if c.VMIDEnd <= c.VMIDStart { + return fmt.Errorf("lxc vmid_end (%d) must be greater than vmid_start (%d)", c.VMIDEnd, c.VMIDStart) + } + if c.Timeout == 0 { + c.Timeout = 5 * time.Minute + } + if c.Bridge == "" { + c.Bridge = "vmbr0" + } + return nil +} diff --git a/fluid-daemon/internal/provider/lxc/config_test.go b/fluid-daemon/internal/provider/lxc/config_test.go new file mode 100644 index 00000000..23befd96 --- /dev/null +++ b/fluid-daemon/internal/provider/lxc/config_test.go @@ -0,0 +1,124 @@ +package lxc + +import ( + "testing" + "time" +) + +func TestConfig_Validate_RequiredFields(t *testing.T) { + tests := []struct { + name string + cfg Config + wantErr string + }{ + { + name: "missing host", + cfg: Config{TokenID: "user@pam!tok", Secret: "s", Node: "pve"}, + wantErr: "lxc host is required", + }, + { + name: "missing token_id", + cfg: Config{Host: "https://pve:8006", Secret: "s", Node: "pve"}, + wantErr: "lxc token_id is required", + }, + { + name: "missing secret", + cfg: Config{Host: "https://pve:8006", TokenID: "user@pam!tok", Node: "pve"}, + wantErr: "lxc secret is required", + }, + { + name: "missing node", + cfg: Config{Host: "https://pve:8006", TokenID: "user@pam!tok", Secret: "s"}, + wantErr: "lxc node is required", + }, + { + name: "vmid_end less than vmid_start", + cfg: Config{ + Host: "https://pve:8006", TokenID: "user@pam!tok", Secret: "s", Node: "pve", + VMIDStart: 100, VMIDEnd: 50, + }, + wantErr: "vmid_end", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.cfg.Validate() + if err == nil { + t.Fatal("expected error, got nil") + } + if !contains(err.Error(), tt.wantErr) { + t.Errorf("error = %q, want containing %q", err.Error(), tt.wantErr) + } + }) + } +} + +func TestConfig_Validate_Defaults(t *testing.T) { + cfg := Config{ + Host: "https://pve:8006", + TokenID: "user@pam!tok", + Secret: "secret123", + Node: "pve", + } + + if err := cfg.Validate(); err != nil { + t.Fatalf("Validate() unexpected error: %v", err) + } + + if cfg.VMIDStart != 9000 { + t.Errorf("VMIDStart = %d, want 9000", cfg.VMIDStart) + } + if cfg.VMIDEnd != 9999 { + t.Errorf("VMIDEnd = %d, want 9999", cfg.VMIDEnd) + } + if cfg.Timeout != 5*time.Minute { + t.Errorf("Timeout = %v, want 5m", cfg.Timeout) + } + if cfg.Bridge != "vmbr0" { + t.Errorf("Bridge = %q, want vmbr0", cfg.Bridge) + } +} + +func TestConfig_Validate_PreservesExplicitValues(t *testing.T) { + cfg := Config{ + Host: "https://pve:8006", + TokenID: "user@pam!tok", + Secret: "secret123", + Node: "pve", + VMIDStart: 5000, + VMIDEnd: 5999, + Timeout: 30 * time.Second, + Bridge: "vmbr1", + } + + if err := cfg.Validate(); err != nil { + t.Fatalf("Validate() unexpected error: %v", err) + } + + if cfg.VMIDStart != 5000 { + t.Errorf("VMIDStart = %d, want 5000", cfg.VMIDStart) + } + if cfg.VMIDEnd != 5999 { + t.Errorf("VMIDEnd = %d, want 5999", cfg.VMIDEnd) + } + if cfg.Timeout != 30*time.Second { + t.Errorf("Timeout = %v, want 30s", cfg.Timeout) + } + if cfg.Bridge != "vmbr1" { + t.Errorf("Bridge = %q, want vmbr1", cfg.Bridge) + } +} + +func contains(s, substr string) bool { + return len(s) >= len(substr) && searchString(s, substr) +} + +func searchString(s, substr string) bool { + for i := 0; i+len(substr) <= len(s); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/fluid-daemon/internal/provider/lxc/lxc_provider.go b/fluid-daemon/internal/provider/lxc/lxc_provider.go new file mode 100644 index 00000000..49c85f82 --- /dev/null +++ b/fluid-daemon/internal/provider/lxc/lxc_provider.go @@ -0,0 +1,605 @@ +// Package lxc implements the SandboxProvider interface for Proxmox LXC containers. +// It uses the Proxmox REST API to clone container templates, manage lifecycle, +// and execute commands via pct exec. +package lxc + +import ( + "bytes" + "context" + "fmt" + "log/slog" + "net" + "net/url" + "os/exec" + "strings" + "sync" + "time" + + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/id" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/provider" +) + +// Provider implements provider.SandboxProvider for Proxmox LXC containers. +type Provider struct { + client *Client + cfg Config + resolver *CTResolver + logger *slog.Logger + + // Protects VMID allocation and sandbox tracking. + mu sync.Mutex + // sandboxID -> vmid mapping for active sandboxes. + sandboxes map[string]int +} + +// New creates a new LXC provider. +func New(cfg Config, logger *slog.Logger) (*Provider, error) { + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("invalid lxc config: %w", err) + } + if logger == nil { + logger = slog.Default() + } + + client := NewClient(cfg, logger) + return &Provider{ + client: client, + cfg: cfg, + resolver: NewCTResolver(client), + logger: logger.With("provider", "lxc"), + sandboxes: make(map[string]int), + }, nil +} + +func (p *Provider) CreateSandbox(ctx context.Context, req provider.CreateRequest) (*provider.SandboxResult, error) { + // Resolve source CT template VMID + sourceVMID, err := p.resolver.ResolveVMID(ctx, req.SourceVM) + if err != nil { + return nil, fmt.Errorf("resolve source CT %q: %w", req.SourceVM, err) + } + + // Allocate next VMID + p.mu.Lock() + newVMID, err := p.client.NextVMID(ctx, p.cfg.VMIDStart, p.cfg.VMIDEnd) + if err != nil { + p.mu.Unlock() + return nil, fmt.Errorf("allocate VMID: %w", err) + } + + hostname := req.Name + if hostname == "" { + hostname = fmt.Sprintf("sbx-%s", req.SandboxID[:8]) + } + + p.logger.Info("cloning CT", + "source_vmid", sourceVMID, + "new_vmid", newVMID, + "hostname", hostname, + ) + + // Clone the template + upid, err := p.client.CloneCT(ctx, sourceVMID, newVMID, hostname, true) + p.mu.Unlock() + if err != nil { + return nil, fmt.Errorf("clone CT: %w", err) + } + + if err := p.client.WaitForTask(ctx, upid); err != nil { + return nil, fmt.Errorf("wait for clone: %w", err) + } + + // Configure the clone + params := url.Values{} + if req.VCPUs > 0 { + params.Set("cores", fmt.Sprintf("%d", req.VCPUs)) + } + if req.MemoryMB > 0 { + params.Set("memory", fmt.Sprintf("%d", req.MemoryMB)) + } + + bridge := p.cfg.Bridge + if req.Network != "" { + bridge = req.Network + } + params.Set("net0", fmt.Sprintf("name=eth0,bridge=%s,ip=dhcp", bridge)) + + if len(params) > 0 { + if err := p.client.SetCTConfig(ctx, newVMID, params); err != nil { + // Cleanup on config failure + _ = p.cleanupCT(ctx, newVMID) + return nil, fmt.Errorf("configure clone: %w", err) + } + } + + // Inject SSH public key if provided + if req.SSHPublicKey != "" { + sshParams := url.Values{ + "ssh-public-keys": {url.QueryEscape(strings.TrimSpace(req.SSHPublicKey))}, + } + if err := p.client.SetCTConfig(ctx, newVMID, sshParams); err != nil { + p.logger.Warn("failed to inject SSH key", "error", err) + } + } + + // Start container + startUPID, err := p.client.StartCT(ctx, newVMID) + if err != nil { + _ = p.cleanupCT(ctx, newVMID) + return nil, fmt.Errorf("start CT: %w", err) + } + + if err := p.client.WaitForTask(ctx, startUPID); err != nil { + _ = p.cleanupCT(ctx, newVMID) + return nil, fmt.Errorf("wait for start: %w", err) + } + + // Discover IP by polling interfaces + ip, err := p.discoverIP(ctx, newVMID, 2*time.Minute) + if err != nil { + p.logger.Warn("IP discovery failed", "sandbox_id", req.SandboxID, "error", err) + } + + // Track sandbox + p.mu.Lock() + p.sandboxes[req.SandboxID] = newVMID + p.mu.Unlock() + + // Refresh resolver cache + _ = p.resolver.Refresh(ctx) + + return &provider.SandboxResult{ + SandboxID: req.SandboxID, + Name: hostname, + State: "RUNNING", + IPAddress: ip, + Bridge: bridge, + }, nil +} + +func (p *Provider) DestroySandbox(ctx context.Context, sandboxID string) error { + p.mu.Lock() + vmid, ok := p.sandboxes[sandboxID] + if ok { + delete(p.sandboxes, sandboxID) + } + p.mu.Unlock() + + if !ok { + return fmt.Errorf("sandbox %s not tracked", sandboxID) + } + + return p.cleanupCT(ctx, vmid) +} + +func (p *Provider) StartSandbox(ctx context.Context, sandboxID string) (*provider.SandboxResult, error) { + vmid, err := p.getVMID(sandboxID) + if err != nil { + return nil, err + } + + upid, err := p.client.StartCT(ctx, vmid) + if err != nil { + return nil, fmt.Errorf("start CT: %w", err) + } + + if err := p.client.WaitForTask(ctx, upid); err != nil { + return nil, fmt.Errorf("wait for start: %w", err) + } + + ip, _ := p.discoverIP(ctx, vmid, 30*time.Second) + + return &provider.SandboxResult{ + SandboxID: sandboxID, + State: "RUNNING", + IPAddress: ip, + }, nil +} + +func (p *Provider) StopSandbox(ctx context.Context, sandboxID string, force bool) error { + vmid, err := p.getVMID(sandboxID) + if err != nil { + return err + } + + var upid string + if force { + upid, err = p.client.StopCT(ctx, vmid) + } else { + upid, err = p.client.ShutdownCT(ctx, vmid) + } + if err != nil { + return fmt.Errorf("stop CT: %w", err) + } + + return p.client.WaitForTask(ctx, upid) +} + +func (p *Provider) GetSandboxIP(ctx context.Context, sandboxID string) (string, error) { + vmid, err := p.getVMID(sandboxID) + if err != nil { + return "", err + } + + return p.discoverIP(ctx, vmid, 30*time.Second) +} + +func (p *Provider) CreateSnapshot(ctx context.Context, sandboxID, name string) (*provider.SnapshotResult, error) { + vmid, err := p.getVMID(sandboxID) + if err != nil { + return nil, err + } + + upid, err := p.client.CreateSnapshot(ctx, vmid, name) + if err != nil { + return nil, fmt.Errorf("create snapshot: %w", err) + } + + if err := p.client.WaitForTask(ctx, upid); err != nil { + return nil, fmt.Errorf("wait for snapshot: %w", err) + } + + snapshotID, err := id.Generate("SNP-") + if err != nil { + return nil, fmt.Errorf("generate snapshot ID: %w", err) + } + return &provider.SnapshotResult{ + SnapshotID: snapshotID, + SnapshotName: name, + }, nil +} + +func (p *Provider) RunCommand(ctx context.Context, sandboxID, command string, timeout time.Duration) (*provider.CommandResult, error) { + vmid, err := p.getVMID(sandboxID) + if err != nil { + return nil, err + } + + if timeout == 0 { + timeout = 5 * time.Minute + } + + start := time.Now() + stdout, stderr, exitCode, err := p.pctExec(ctx, vmid, command, timeout) + if err != nil { + return nil, fmt.Errorf("pct exec: %w", err) + } + + return &provider.CommandResult{ + Stdout: stdout, + Stderr: stderr, + ExitCode: exitCode, + DurationMS: time.Since(start).Milliseconds(), + }, nil +} + +func (p *Provider) ListTemplates(ctx context.Context) ([]string, error) { + cts, err := p.client.ListCTs(ctx) + if err != nil { + return nil, err + } + + var templates []string + for _, ct := range cts { + if ct.Template == 1 { + templates = append(templates, ct.Name) + } + } + return templates, nil +} + +func (p *Provider) ListSourceVMs(ctx context.Context) ([]provider.SourceVMInfo, error) { + cts, err := p.client.ListCTs(ctx) + if err != nil { + return nil, err + } + + var vms []provider.SourceVMInfo + for _, ct := range cts { + // Skip templates and sandbox containers + if ct.Template == 1 || strings.HasPrefix(ct.Name, "sbx-") { + continue + } + ip := "" + if ct.Status == "running" { + ip, _ = p.discoverIP(ctx, ct.VMID, 5*time.Second) + } + vms = append(vms, provider.SourceVMInfo{ + Name: ct.Name, + State: ct.Status, + IPAddress: ip, + }) + } + return vms, nil +} + +func (p *Provider) ValidateSourceVM(ctx context.Context, vmName string) (*provider.ValidationResult, error) { + result := &provider.ValidationResult{ + VMName: vmName, + } + + vmid, err := p.resolver.ResolveVMID(ctx, vmName) + if err != nil { + result.Errors = append(result.Errors, fmt.Sprintf("CT %q not found: %v", vmName, err)) + return result, nil + } + + status, err := p.client.GetCTStatus(ctx, vmid) + if err != nil { + result.Errors = append(result.Errors, fmt.Sprintf("failed to get CT status: %v", err)) + return result, nil + } + result.State = status.Status + + cfg, err := p.client.GetCTConfig(ctx, vmid) + if err != nil { + result.Warnings = append(result.Warnings, fmt.Sprintf("could not read CT config: %v", err)) + } else { + if cfg.Net0 == "" { + result.HasNetwork = false + result.Warnings = append(result.Warnings, "CT has no network interface (net0)") + } else { + result.HasNetwork = true + } + } + + if status.Status == "running" { + ip, err := p.discoverIP(ctx, vmid, 10*time.Second) + if err == nil { + result.IPAddress = ip + } else { + result.Warnings = append(result.Warnings, "Could not determine IP address") + } + } + + if len(result.Errors) == 0 { + result.Valid = true + } + + return result, nil +} + +func (p *Provider) PrepareSourceVM(ctx context.Context, vmName, sshUser, sshKeyPath string) (*provider.PrepareResult, error) { + vmid, err := p.resolver.ResolveVMID(ctx, vmName) + if err != nil { + return nil, fmt.Errorf("resolve CT %q: %w", vmName, err) + } + + ip, _ := p.discoverIP(ctx, vmid, 10*time.Second) + + // Install fluid-readonly user + restricted shell via pct exec + steps := []struct { + name string + cmd string + field func(*provider.PrepareResult) + }{ + { + name: "install restricted shell", + cmd: "cat > /usr/local/bin/fluid-readonly-shell << 'EOF'\n#!/bin/bash\nset -euo pipefail\nif [ -n \"${SSH_ORIGINAL_COMMAND:-}\" ]; then CMD=\"$SSH_ORIGINAL_COMMAND\"; elif [ \"${1:-}\" = \"-c\" ] && [ -n \"${2:-}\" ]; then CMD=\"$2\"; else echo 'ERROR: Interactive login not permitted.' >&2; exit 1; fi\nexec /bin/bash -c \"$CMD\"\nEOF\nchmod 755 /usr/local/bin/fluid-readonly-shell", + field: func(r *provider.PrepareResult) { r.ShellInstalled = true }, + }, + { + name: "create fluid-readonly user", + cmd: "mkdir -p /var/empty && id fluid-readonly >/dev/null 2>&1 || useradd -r -s /usr/local/bin/fluid-readonly-shell -d /var/empty -M fluid-readonly", + field: func(r *provider.PrepareResult) { r.UserCreated = true }, + }, + } + + result := &provider.PrepareResult{ + SourceVM: vmName, + IPAddress: ip, + } + + for _, step := range steps { + _, stderr, exitCode, err := p.pctExec(ctx, vmid, step.cmd, 60*time.Second) + if err != nil { + return result, fmt.Errorf("%s: %w", step.name, err) + } + if exitCode != 0 { + return result, fmt.Errorf("%s: exit %d: %s", step.name, exitCode, stderr) + } + step.field(result) + } + + result.Prepared = true + return result, nil +} + +func (p *Provider) RunSourceCommand(ctx context.Context, vmName, command string, timeout time.Duration) (*provider.CommandResult, error) { + vmid, err := p.resolver.ResolveVMID(ctx, vmName) + if err != nil { + return nil, fmt.Errorf("resolve CT %q: %w", vmName, err) + } + + if timeout == 0 { + timeout = 30 * time.Second + } + + start := time.Now() + // Execute as fluid-readonly user via pct exec + wrappedCmd := fmt.Sprintf("su -s /usr/local/bin/fluid-readonly-shell fluid-readonly -c '%s'", + strings.ReplaceAll(command, "'", "'\"'\"'")) + + stdout, stderr, exitCode, err := p.pctExec(ctx, vmid, wrappedCmd, timeout) + if err != nil { + return nil, err + } + + return &provider.CommandResult{ + Stdout: stdout, + Stderr: stderr, + ExitCode: exitCode, + DurationMS: time.Since(start).Milliseconds(), + }, nil +} + +func (p *Provider) ReadSourceFile(ctx context.Context, vmName, path string) (string, error) { + vmid, err := p.resolver.ResolveVMID(ctx, vmName) + if err != nil { + return "", fmt.Errorf("resolve CT %q: %w", vmName, err) + } + + // Read file as fluid-readonly user + cmd := fmt.Sprintf("su - fluid-readonly -c 'cat %s'", + strings.ReplaceAll(path, "'", "'\"'\"'")) + + stdout, stderr, exitCode, err := p.pctExec(ctx, vmid, cmd, 30*time.Second) + if err != nil { + return "", err + } + if exitCode != 0 { + return "", fmt.Errorf("read file failed (exit %d): %s", exitCode, stderr) + } + + return stdout, nil +} + +func (p *Provider) Capabilities(ctx context.Context) (*provider.HostCapabilities, error) { + nodeStatus, err := p.client.GetNodeStatus(ctx) + if err != nil { + return nil, fmt.Errorf("get node status: %w", err) + } + + caps := &provider.HostCapabilities{ + TotalCPUs: nodeStatus.MaxCPU, + AvailableCPUs: nodeStatus.MaxCPU, + TotalMemoryMB: int(nodeStatus.Memory.Total / (1024 * 1024)), + AvailableMemMB: int(nodeStatus.Memory.Free / (1024 * 1024)), + TotalDiskMB: int(nodeStatus.RootFS.Total / (1024 * 1024)), + AvailableDiskMB: int(nodeStatus.RootFS.Available / (1024 * 1024)), + } + + // Get template names + templates, _ := p.ListTemplates(ctx) + caps.BaseImages = templates + + return caps, nil +} + +func (p *Provider) ActiveSandboxCount() int { + p.mu.Lock() + defer p.mu.Unlock() + return len(p.sandboxes) +} + +func (p *Provider) RecoverState(ctx context.Context) error { + cts, err := p.client.ListCTs(ctx) + if err != nil { + return fmt.Errorf("list CTs for recovery: %w", err) + } + + p.mu.Lock() + defer p.mu.Unlock() + + for _, ct := range cts { + if strings.HasPrefix(ct.Name, "sbx-") && ct.Template == 0 { + // Use the CT name as sandbox ID for recovery + sandboxID := ct.Name + p.sandboxes[sandboxID] = ct.VMID + p.logger.Info("recovered sandbox CT", "sandbox_id", sandboxID, "vmid", ct.VMID) + } + } + + return nil +} + +// --- Internal helpers --- + +// getVMID returns the Proxmox VMID for a tracked sandbox. +func (p *Provider) getVMID(sandboxID string) (int, error) { + p.mu.Lock() + vmid, ok := p.sandboxes[sandboxID] + p.mu.Unlock() + if !ok { + return 0, fmt.Errorf("sandbox %s not tracked", sandboxID) + } + return vmid, nil +} + +// cleanupCT stops and deletes a container. +func (p *Provider) cleanupCT(ctx context.Context, vmid int) error { + // Check status first + status, err := p.client.GetCTStatus(ctx, vmid) + if err != nil { + // CT may already be gone + p.logger.Warn("get CT status for cleanup failed", "vmid", vmid, "error", err) + return nil + } + + if status.Status == "running" { + stopUPID, err := p.client.StopCT(ctx, vmid) + if err != nil { + p.logger.Error("stop CT for cleanup failed", "vmid", vmid, "error", err) + } else { + _ = p.client.WaitForTask(ctx, stopUPID) + } + } + + delUPID, err := p.client.DeleteCT(ctx, vmid) + if err != nil { + return fmt.Errorf("delete CT %d: %w", vmid, err) + } + + if err := p.client.WaitForTask(ctx, delUPID); err != nil { + return fmt.Errorf("wait for delete CT %d: %w", vmid, err) + } + + _ = p.resolver.Refresh(ctx) + return nil +} + +// discoverIP polls the CT interfaces endpoint until an IPv4 address appears. +func (p *Provider) discoverIP(ctx context.Context, vmid int, timeout time.Duration) (string, error) { + deadline := time.Now().Add(timeout) + ticker := time.NewTicker(2 * time.Second) + defer ticker.Stop() + + for { + ifaces, err := p.client.GetCTInterfaces(ctx, vmid) + if err == nil { + for _, iface := range ifaces { + if iface.Name == "lo" { + continue + } + if iface.Inet != "" { + // inet format: "10.0.0.5/24" - strip prefix + ipStr := strings.SplitN(iface.Inet, "/", 2)[0] + ip := net.ParseIP(ipStr) + if ip != nil && !ip.IsLoopback() && !ip.IsLinkLocalUnicast() { + return ipStr, nil + } + } + } + } + + if time.Now().After(deadline) { + return "", fmt.Errorf("timeout waiting for IP of CT %d", vmid) + } + + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-ticker.C: + } + } +} + +// pctExec runs a command inside a container via pct exec. +func (p *Provider) pctExec(ctx context.Context, vmid int, command string, timeout time.Duration) (stdout, stderr string, exitCode int, err error) { + cmdCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + cmd := exec.CommandContext(cmdCtx, "pct", "exec", fmt.Sprintf("%d", vmid), "--", "sh", "-c", command) + var stdoutBuf, stderrBuf bytes.Buffer + cmd.Stdout = &stdoutBuf + cmd.Stderr = &stderrBuf + + err = cmd.Run() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return stdoutBuf.String(), stderrBuf.String(), exitErr.ExitCode(), nil + } + return "", "", -1, err + } + + return stdoutBuf.String(), stderrBuf.String(), 0, nil +} diff --git a/fluid-daemon/internal/provider/lxc/lxc_provider_test.go b/fluid-daemon/internal/provider/lxc/lxc_provider_test.go new file mode 100644 index 00000000..c3588bbe --- /dev/null +++ b/fluid-daemon/internal/provider/lxc/lxc_provider_test.go @@ -0,0 +1,718 @@ +package lxc + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + "time" + + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/provider" +) + +// mockProxmox provides a configurable mock Proxmox API server for testing the LXC provider. +type mockProxmox struct { + mu sync.Mutex + cts []CTListEntry + statuses map[int]CTStatus + configs map[int]CTConfig + ifaces map[int][]CTInterface + taskQueue map[string]TaskStatus + nodeStatus *NodeStatus + cloneCount int +} + +func newMockProxmox() *mockProxmox { + return &mockProxmox{ + statuses: make(map[int]CTStatus), + configs: make(map[int]CTConfig), + ifaces: make(map[int][]CTInterface), + taskQueue: make(map[string]TaskStatus), + nodeStatus: &NodeStatus{ + MaxCPU: 8, + Memory: MemoryStatus{Total: 16 * 1024 * 1024 * 1024, Free: 12 * 1024 * 1024 * 1024}, + RootFS: DiskStatus{Total: 100 * 1024 * 1024 * 1024, Available: 70 * 1024 * 1024 * 1024}, + }, + } +} + +func (m *mockProxmox) respond(w http.ResponseWriter, data any) { + d, _ := json.Marshal(data) + resp := struct { + Data json.RawMessage `json:"data"` + }{Data: d} + b, _ := json.Marshal(resp) + w.WriteHeader(http.StatusOK) + _, _ = w.Write(b) +} + +func (m *mockProxmox) handler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m.mu.Lock() + defer m.mu.Unlock() + + path := r.URL.Path + // Strip api2/json prefix + idx := strings.Index(path, "/nodes/") + if idx >= 0 { + path = path[idx:] + } + + switch { + // Task status (must come before node status to avoid path collision) + case r.Method == http.MethodGet && strings.Contains(path, "/tasks/"): + parts := strings.Split(path, "/tasks/") + if len(parts) > 1 { + upidPart := strings.TrimSuffix(parts[1], "/status") + if ts, ok := m.taskQueue[upidPart]; ok { + m.respond(w, ts) + return + } + } + // Default: task is done + m.respond(w, TaskStatus{Status: "stopped", ExitStatus: "OK"}) + + // List CTs + case r.Method == http.MethodGet && strings.HasSuffix(path, "/lxc"): + m.respond(w, m.cts) + + // Node status + case r.Method == http.MethodGet && strings.HasSuffix(path, "/status") && !strings.Contains(path, "/lxc/"): + m.respond(w, m.nodeStatus) + + // CT status + case r.Method == http.MethodGet && strings.Contains(path, "/status/current"): + vmid := extractVMID(path) + if s, ok := m.statuses[vmid]; ok { + m.respond(w, s) + } else { + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`{"data":null}`)) + } + + // CT config GET + case r.Method == http.MethodGet && strings.HasSuffix(path, "/config"): + vmid := extractVMID(path) + if c, ok := m.configs[vmid]; ok { + m.respond(w, c) + } else { + m.respond(w, CTConfig{}) + } + + // CT config PUT + case r.Method == http.MethodPut && strings.HasSuffix(path, "/config"): + m.respond(w, nil) + + // Clone + case r.Method == http.MethodPost && strings.HasSuffix(path, "/clone"): + m.cloneCount++ + upid := fmt.Sprintf("UPID:pve:clone:%d", m.cloneCount) + // Mark task as immediately done + m.taskQueue[upid] = TaskStatus{Status: "stopped", ExitStatus: "OK"} + m.respond(w, upid) + + // Start + case r.Method == http.MethodPost && strings.HasSuffix(path, "/status/start"): + vmid := extractVMID(path) + upid := fmt.Sprintf("UPID:pve:start:%d", vmid) + m.taskQueue[upid] = TaskStatus{Status: "stopped", ExitStatus: "OK"} + // Update status to running + if s, ok := m.statuses[vmid]; ok { + s.Status = "running" + m.statuses[vmid] = s + } + m.respond(w, upid) + + // Stop + case r.Method == http.MethodPost && strings.HasSuffix(path, "/status/stop"): + vmid := extractVMID(path) + upid := fmt.Sprintf("UPID:pve:stop:%d", vmid) + m.taskQueue[upid] = TaskStatus{Status: "stopped", ExitStatus: "OK"} + m.respond(w, upid) + + // Shutdown + case r.Method == http.MethodPost && strings.HasSuffix(path, "/status/shutdown"): + vmid := extractVMID(path) + upid := fmt.Sprintf("UPID:pve:shutdown:%d", vmid) + m.taskQueue[upid] = TaskStatus{Status: "stopped", ExitStatus: "OK"} + m.respond(w, upid) + + // Delete + case r.Method == http.MethodDelete && strings.Contains(path, "/lxc/"): + vmid := extractVMID(path) + upid := fmt.Sprintf("UPID:pve:delete:%d", vmid) + m.taskQueue[upid] = TaskStatus{Status: "stopped", ExitStatus: "OK"} + m.respond(w, upid) + + // Interfaces + case r.Method == http.MethodGet && strings.HasSuffix(path, "/interfaces"): + vmid := extractVMID(path) + if iface, ok := m.ifaces[vmid]; ok { + m.respond(w, iface) + } else { + m.respond(w, []CTInterface{}) + } + + // Snapshot + case r.Method == http.MethodPost && strings.HasSuffix(path, "/snapshot"): + upid := "UPID:pve:snapshot:1" + m.taskQueue[upid] = TaskStatus{Status: "stopped", ExitStatus: "OK"} + m.respond(w, upid) + + default: + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write([]byte(`{"data":null}`)) + } + }) +} + +func extractVMID(path string) int { + // Extract VMID from paths like /nodes/pve/lxc/100/... + parts := strings.Split(path, "/") + for i, p := range parts { + if p == "lxc" && i+1 < len(parts) { + var vmid int + _, _ = fmt.Sscanf(parts[i+1], "%d", &vmid) + return vmid + } + } + return 0 +} + +func testProvider(t *testing.T, mock *mockProxmox) (*Provider, *httptest.Server) { + t.Helper() + srv := httptest.NewTLSServer(mock.handler()) + t.Cleanup(srv.Close) + + cfg := Config{ + Host: srv.URL, + TokenID: "test@pam!tok", + Secret: "secret", + Node: "pve", + Bridge: "vmbr0", + VMIDStart: 9000, + VMIDEnd: 9999, + VerifySSL: false, + Timeout: 10 * time.Second, + } + + prov, err := New(cfg, nil) + if err != nil { + t.Fatalf("New() error: %v", err) + } + + // Override the client's httpClient to use the test server's TLS client + prov.client.httpClient = srv.Client() + prov.client.httpClient.Timeout = 10 * time.Second + + return prov, srv +} + +func TestProvider_ListTemplates(t *testing.T) { + mock := newMockProxmox() + mock.cts = []CTListEntry{ + {VMID: 100, Name: "ubuntu-template", Template: 1, Status: "stopped"}, + {VMID: 101, Name: "debian-template", Template: 1, Status: "stopped"}, + {VMID: 200, Name: "web-server", Template: 0, Status: "running"}, + } + + prov, _ := testProvider(t, mock) + + templates, err := prov.ListTemplates(context.Background()) + if err != nil { + t.Fatalf("ListTemplates() error: %v", err) + } + + if len(templates) != 2 { + t.Fatalf("expected 2 templates, got %d", len(templates)) + } + + names := make(map[string]bool) + for _, n := range templates { + names[n] = true + } + if !names["ubuntu-template"] || !names["debian-template"] { + t.Errorf("unexpected templates: %v", templates) + } +} + +func TestProvider_ListSourceVMs(t *testing.T) { + mock := newMockProxmox() + mock.cts = []CTListEntry{ + {VMID: 100, Name: "ubuntu-template", Template: 1, Status: "stopped"}, + {VMID: 200, Name: "web-server", Template: 0, Status: "running"}, + {VMID: 201, Name: "db-server", Template: 0, Status: "stopped"}, + {VMID: 9001, Name: "sbx-abc12345", Template: 0, Status: "running"}, + } + mock.ifaces[200] = []CTInterface{ + {Name: "eth0", Inet: "10.0.0.5/24"}, + } + + prov, _ := testProvider(t, mock) + + vms, err := prov.ListSourceVMs(context.Background()) + if err != nil { + t.Fatalf("ListSourceVMs() error: %v", err) + } + + // Should exclude templates and sbx- prefixed containers + if len(vms) != 2 { + t.Fatalf("expected 2 source VMs, got %d: %+v", len(vms), vms) + } + + names := make(map[string]bool) + for _, vm := range vms { + names[vm.Name] = true + } + if !names["web-server"] || !names["db-server"] { + t.Errorf("unexpected VMs: %+v", vms) + } +} + +func TestProvider_Capabilities(t *testing.T) { + mock := newMockProxmox() + mock.cts = []CTListEntry{ + {VMID: 100, Name: "template-1", Template: 1}, + } + + prov, _ := testProvider(t, mock) + + caps, err := prov.Capabilities(context.Background()) + if err != nil { + t.Fatalf("Capabilities() error: %v", err) + } + + if caps.TotalCPUs != 8 { + t.Errorf("TotalCPUs = %d, want 8", caps.TotalCPUs) + } + if caps.TotalMemoryMB != 16*1024 { + t.Errorf("TotalMemoryMB = %d, want %d", caps.TotalMemoryMB, 16*1024) + } + if caps.AvailableMemMB != 12*1024 { + t.Errorf("AvailableMemMB = %d, want %d", caps.AvailableMemMB, 12*1024) + } +} + +func TestProvider_RecoverState(t *testing.T) { + mock := newMockProxmox() + mock.cts = []CTListEntry{ + {VMID: 100, Name: "web-server", Template: 0, Status: "running"}, + {VMID: 9001, Name: "sbx-sandbox1", Template: 0, Status: "running"}, + {VMID: 9002, Name: "sbx-sandbox2", Template: 0, Status: "stopped"}, + {VMID: 200, Name: "ubuntu-template", Template: 1, Status: "stopped"}, + } + + prov, _ := testProvider(t, mock) + + err := prov.RecoverState(context.Background()) + if err != nil { + t.Fatalf("RecoverState() error: %v", err) + } + + // Should recover only sbx- prefixed non-template containers + if prov.ActiveSandboxCount() != 2 { + t.Errorf("ActiveSandboxCount = %d, want 2", prov.ActiveSandboxCount()) + } +} + +func TestProvider_ActiveSandboxCount(t *testing.T) { + mock := newMockProxmox() + prov, _ := testProvider(t, mock) + + if prov.ActiveSandboxCount() != 0 { + t.Errorf("initial ActiveSandboxCount = %d, want 0", prov.ActiveSandboxCount()) + } + + // Manually track sandboxes + prov.mu.Lock() + prov.sandboxes["sbx-1"] = 9001 + prov.sandboxes["sbx-2"] = 9002 + prov.mu.Unlock() + + if prov.ActiveSandboxCount() != 2 { + t.Errorf("ActiveSandboxCount = %d, want 2", prov.ActiveSandboxCount()) + } +} + +func TestProvider_DestroySandbox_NotTracked(t *testing.T) { + mock := newMockProxmox() + prov, _ := testProvider(t, mock) + + err := prov.DestroySandbox(context.Background(), "nonexistent") + if err == nil { + t.Fatal("expected error for untracked sandbox") + } + if !strings.Contains(err.Error(), "not tracked") { + t.Errorf("error = %q, want containing 'not tracked'", err.Error()) + } +} + +func TestProvider_DestroySandbox(t *testing.T) { + mock := newMockProxmox() + mock.cts = []CTListEntry{ + {VMID: 9001, Name: "sbx-test", Status: "running"}, + } + mock.statuses[9001] = CTStatus{VMID: 9001, Status: "running"} + + prov, _ := testProvider(t, mock) + + // Track the sandbox + prov.mu.Lock() + prov.sandboxes["test-sandbox"] = 9001 + prov.mu.Unlock() + + err := prov.DestroySandbox(context.Background(), "test-sandbox") + if err != nil { + t.Fatalf("DestroySandbox() error: %v", err) + } + + // Should be removed from tracking + if prov.ActiveSandboxCount() != 0 { + t.Errorf("ActiveSandboxCount = %d, want 0", prov.ActiveSandboxCount()) + } +} + +func TestProvider_ValidateSourceVM_Found(t *testing.T) { + mock := newMockProxmox() + mock.cts = []CTListEntry{ + {VMID: 100, Name: "web-server", Status: "running"}, + } + mock.statuses[100] = CTStatus{VMID: 100, Name: "web-server", Status: "running"} + mock.configs[100] = CTConfig{Net0: "name=eth0,bridge=vmbr0"} + mock.ifaces[100] = []CTInterface{ + {Name: "eth0", Inet: "10.0.0.5/24"}, + } + + prov, _ := testProvider(t, mock) + + result, err := prov.ValidateSourceVM(context.Background(), "web-server") + if err != nil { + t.Fatalf("ValidateSourceVM() error: %v", err) + } + + if !result.Valid { + t.Error("expected Valid=true") + } + if result.State != "running" { + t.Errorf("State = %q, want running", result.State) + } + if !result.HasNetwork { + t.Error("expected HasNetwork=true") + } + if result.IPAddress != "10.0.0.5" { + t.Errorf("IPAddress = %q, want 10.0.0.5", result.IPAddress) + } +} + +func TestProvider_ValidateSourceVM_NotFound(t *testing.T) { + mock := newMockProxmox() + mock.cts = []CTListEntry{} + + prov, _ := testProvider(t, mock) + + result, err := prov.ValidateSourceVM(context.Background(), "nonexistent") + if err != nil { + t.Fatalf("ValidateSourceVM() error: %v", err) + } + + if result.Valid { + t.Error("expected Valid=false for nonexistent CT") + } + if len(result.Errors) == 0 { + t.Error("expected at least one error") + } +} + +func TestProvider_ValidateSourceVM_NoNetwork(t *testing.T) { + mock := newMockProxmox() + mock.cts = []CTListEntry{ + {VMID: 100, Name: "isolated-ct", Status: "stopped"}, + } + mock.statuses[100] = CTStatus{VMID: 100, Name: "isolated-ct", Status: "stopped"} + mock.configs[100] = CTConfig{Net0: ""} + + prov, _ := testProvider(t, mock) + + result, err := prov.ValidateSourceVM(context.Background(), "isolated-ct") + if err != nil { + t.Fatalf("ValidateSourceVM() error: %v", err) + } + + if !result.HasNetwork { + // Correctly detected no network + } else { + t.Error("expected HasNetwork=false for CT with empty Net0") + } +} + +func TestProvider_GetSandboxIP_NotTracked(t *testing.T) { + mock := newMockProxmox() + prov, _ := testProvider(t, mock) + + _, err := prov.GetSandboxIP(context.Background(), "unknown") + if err == nil { + t.Fatal("expected error for untracked sandbox") + } +} + +func TestProvider_StopSandbox_NotTracked(t *testing.T) { + mock := newMockProxmox() + prov, _ := testProvider(t, mock) + + err := prov.StopSandbox(context.Background(), "unknown", false) + if err == nil { + t.Fatal("expected error for untracked sandbox") + } +} + +func TestProvider_CreateSnapshot_NotTracked(t *testing.T) { + mock := newMockProxmox() + prov, _ := testProvider(t, mock) + + _, err := prov.CreateSnapshot(context.Background(), "unknown", "snap") + if err == nil { + t.Fatal("expected error for untracked sandbox") + } +} + +func TestProvider_StartSandbox(t *testing.T) { + mock := newMockProxmox() + mock.cts = []CTListEntry{ + {VMID: 9001, Name: "sbx-test", Status: "stopped"}, + } + mock.statuses[9001] = CTStatus{VMID: 9001, Status: "stopped"} + mock.ifaces[9001] = []CTInterface{ + {Name: "eth0", Inet: "10.0.0.10/24"}, + } + + prov, _ := testProvider(t, mock) + prov.mu.Lock() + prov.sandboxes["test-sbx"] = 9001 + prov.mu.Unlock() + + result, err := prov.StartSandbox(context.Background(), "test-sbx") + if err != nil { + t.Fatalf("StartSandbox() error: %v", err) + } + + if result.State != "RUNNING" { + t.Errorf("State = %q, want RUNNING", result.State) + } + if result.IPAddress != "10.0.0.10" { + t.Errorf("IPAddress = %q, want 10.0.0.10", result.IPAddress) + } +} + +func TestProvider_StopSandbox_Force(t *testing.T) { + mock := newMockProxmox() + prov, _ := testProvider(t, mock) + prov.mu.Lock() + prov.sandboxes["test-sbx"] = 9001 + prov.mu.Unlock() + + err := prov.StopSandbox(context.Background(), "test-sbx", true) + if err != nil { + t.Fatalf("StopSandbox(force) error: %v", err) + } +} + +func TestProvider_StopSandbox_Graceful(t *testing.T) { + mock := newMockProxmox() + prov, _ := testProvider(t, mock) + prov.mu.Lock() + prov.sandboxes["test-sbx"] = 9001 + prov.mu.Unlock() + + err := prov.StopSandbox(context.Background(), "test-sbx", false) + if err != nil { + t.Fatalf("StopSandbox(graceful) error: %v", err) + } +} + +func TestProvider_CreateSnapshot(t *testing.T) { + mock := newMockProxmox() + prov, _ := testProvider(t, mock) + prov.mu.Lock() + prov.sandboxes["test-sbx"] = 9001 + prov.mu.Unlock() + + result, err := prov.CreateSnapshot(context.Background(), "test-sbx", "my-snapshot") + if err != nil { + t.Fatalf("CreateSnapshot() error: %v", err) + } + + if result.SnapshotName != "my-snapshot" { + t.Errorf("SnapshotName = %q, want my-snapshot", result.SnapshotName) + } + if !strings.HasPrefix(result.SnapshotID, "SNP-") { + t.Errorf("SnapshotID = %q, want SNP- prefix", result.SnapshotID) + } +} + +func TestProvider_CreateSandbox(t *testing.T) { + mock := newMockProxmox() + mock.cts = []CTListEntry{ + {VMID: 100, Name: "source-template", Template: 1, Status: "stopped"}, + } + // After clone, the new CT shows up with interfaces + mock.ifaces[9000] = []CTInterface{ + {Name: "lo", Inet: "127.0.0.1/8"}, + {Name: "eth0", Inet: "10.0.0.50/24"}, + } + mock.statuses[9000] = CTStatus{VMID: 9000, Status: "stopped"} + + prov, _ := testProvider(t, mock) + + req := provider.CreateRequest{ + SandboxID: "sbx-12345678-abcd", + Name: "sbx-test-sandbox", + SourceVM: "source-template", + VCPUs: 2, + MemoryMB: 1024, + } + + result, err := prov.CreateSandbox(context.Background(), req) + if err != nil { + t.Fatalf("CreateSandbox() error: %v", err) + } + + if result.State != "RUNNING" { + t.Errorf("State = %q, want RUNNING", result.State) + } + if result.IPAddress != "10.0.0.50" { + t.Errorf("IPAddress = %q, want 10.0.0.50", result.IPAddress) + } + if result.Bridge != "vmbr0" { + t.Errorf("Bridge = %q, want vmbr0", result.Bridge) + } + + // Should be tracked + if prov.ActiveSandboxCount() != 1 { + t.Errorf("ActiveSandboxCount = %d, want 1", prov.ActiveSandboxCount()) + } +} + +func TestProvider_CreateSandbox_CustomBridge(t *testing.T) { + mock := newMockProxmox() + mock.cts = []CTListEntry{ + {VMID: 100, Name: "src", Template: 1, Status: "stopped"}, + } + mock.ifaces[9000] = []CTInterface{ + {Name: "eth0", Inet: "192.168.1.10/24"}, + } + mock.statuses[9000] = CTStatus{VMID: 9000, Status: "stopped"} + + prov, _ := testProvider(t, mock) + + req := provider.CreateRequest{ + SandboxID: "sbx-custom-bridge", + SourceVM: "src", + Network: "vmbr1", + } + + result, err := prov.CreateSandbox(context.Background(), req) + if err != nil { + t.Fatalf("CreateSandbox() error: %v", err) + } + + if result.Bridge != "vmbr1" { + t.Errorf("Bridge = %q, want vmbr1", result.Bridge) + } +} + +func TestProvider_DiscoverIP_FiltersLoopback(t *testing.T) { + mock := newMockProxmox() + mock.ifaces[100] = []CTInterface{ + {Name: "lo", Inet: "127.0.0.1/8"}, + {Name: "eth0", Inet: "10.0.0.5/24"}, + } + + prov, _ := testProvider(t, mock) + + ip, err := prov.discoverIP(context.Background(), 100, 5*time.Second) + if err != nil { + t.Fatalf("discoverIP() error: %v", err) + } + if ip != "10.0.0.5" { + t.Errorf("IP = %q, want 10.0.0.5", ip) + } +} + +func TestProvider_DiscoverIP_FiltersLinkLocal(t *testing.T) { + mock := newMockProxmox() + mock.ifaces[100] = []CTInterface{ + {Name: "eth0", Inet: "169.254.1.1/16"}, + {Name: "eth1", Inet: "192.168.0.5/24"}, + } + + prov, _ := testProvider(t, mock) + + ip, err := prov.discoverIP(context.Background(), 100, 5*time.Second) + if err != nil { + t.Fatalf("discoverIP() error: %v", err) + } + if ip != "192.168.0.5" { + t.Errorf("IP = %q, want 192.168.0.5", ip) + } +} + +func TestProvider_DiscoverIP_Timeout(t *testing.T) { + mock := newMockProxmox() + // No interfaces configured - will never find an IP + + prov, _ := testProvider(t, mock) + + _, err := prov.discoverIP(context.Background(), 100, 100*time.Millisecond) + if err == nil { + t.Fatal("expected timeout error") + } + if !strings.Contains(err.Error(), "timeout") { + t.Errorf("error = %q, want containing 'timeout'", err.Error()) + } +} + +func TestProvider_New_InvalidConfig(t *testing.T) { + cfg := Config{} // Missing required fields + _, err := New(cfg, nil) + if err == nil { + t.Fatal("expected error for invalid config") + } +} + +func TestProvider_RunCommand_NotTracked(t *testing.T) { + mock := newMockProxmox() + prov, _ := testProvider(t, mock) + + _, err := prov.RunCommand(context.Background(), "unknown", "ls", 10*time.Second) + if err == nil { + t.Fatal("expected error for untracked sandbox") + } +} + +func TestProvider_ReadSourceFile_NotFound(t *testing.T) { + mock := newMockProxmox() + mock.cts = []CTListEntry{} + + prov, _ := testProvider(t, mock) + + _, err := prov.ReadSourceFile(context.Background(), "nonexistent", "/etc/hostname") + if err == nil { + t.Fatal("expected error for nonexistent source") + } +} + +func TestProvider_RunSourceCommand_NotFound(t *testing.T) { + mock := newMockProxmox() + mock.cts = []CTListEntry{} + + prov, _ := testProvider(t, mock) + + _, err := prov.RunSourceCommand(context.Background(), "nonexistent", "ls", 10*time.Second) + if err == nil { + t.Fatal("expected error for nonexistent source") + } +} diff --git a/fluid-daemon/internal/provider/lxc/naming.go b/fluid-daemon/internal/provider/lxc/naming.go new file mode 100644 index 00000000..e50b6710 --- /dev/null +++ b/fluid-daemon/internal/provider/lxc/naming.go @@ -0,0 +1,88 @@ +package lxc + +import ( + "context" + "fmt" + "sync" +) + +// CTResolver resolves container names to VMIDs and vice versa. +type CTResolver struct { + client *Client + mu sync.RWMutex + byName map[string]int + byID map[int]string +} + +// NewCTResolver creates a new CTResolver backed by the given client. +func NewCTResolver(client *Client) *CTResolver { + return &CTResolver{ + client: client, + byName: make(map[string]int), + byID: make(map[int]string), + } +} + +// Refresh reloads the container list from Proxmox and rebuilds the cache. +func (r *CTResolver) Refresh(ctx context.Context) error { + cts, err := r.client.ListCTs(ctx) + if err != nil { + return fmt.Errorf("refresh CT list: %w", err) + } + + r.mu.Lock() + defer r.mu.Unlock() + + r.byName = make(map[string]int, len(cts)) + r.byID = make(map[int]string, len(cts)) + for _, ct := range cts { + r.byName[ct.Name] = ct.VMID + r.byID[ct.VMID] = ct.Name + } + return nil +} + +// ResolveVMID returns the VMID for a given container name. +// If the name is not in the cache, it refreshes first. +func (r *CTResolver) ResolveVMID(ctx context.Context, name string) (int, error) { + r.mu.RLock() + vmid, ok := r.byName[name] + r.mu.RUnlock() + if ok { + return vmid, nil + } + + if err := r.Refresh(ctx); err != nil { + return 0, err + } + + r.mu.RLock() + defer r.mu.RUnlock() + vmid, ok = r.byName[name] + if !ok { + return 0, fmt.Errorf("container %q not found", name) + } + return vmid, nil +} + +// ResolveName returns the name for a given VMID. +func (r *CTResolver) ResolveName(ctx context.Context, vmid int) (string, error) { + r.mu.RLock() + name, ok := r.byID[vmid] + r.mu.RUnlock() + if ok { + return name, nil + } + + if err := r.Refresh(ctx); err != nil { + return "", err + } + + r.mu.RLock() + defer r.mu.RUnlock() + name, ok = r.byID[vmid] + if !ok { + return "", fmt.Errorf("VMID %d not found", vmid) + } + return name, nil +} diff --git a/fluid-daemon/internal/provider/lxc/naming_test.go b/fluid-daemon/internal/provider/lxc/naming_test.go new file mode 100644 index 00000000..3f2a61aa --- /dev/null +++ b/fluid-daemon/internal/provider/lxc/naming_test.go @@ -0,0 +1,153 @@ +package lxc + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" +) + +func testResolverClient(t *testing.T, cts []CTListEntry) (*CTResolver, *httptest.Server) { + t.Helper() + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d, _ := json.Marshal(cts) + resp := struct { + Data json.RawMessage `json:"data"` + }{Data: d} + b, _ := json.Marshal(resp) + w.WriteHeader(http.StatusOK) + _, _ = w.Write(b) + })) + t.Cleanup(srv.Close) + + cfg := Config{ + Host: srv.URL, + TokenID: "test@pam!tok", + Secret: "secret", + Node: "pve", + VerifySSL: false, + Timeout: 5 * time.Second, + } + client := NewClient(cfg, nil) + client.httpClient = srv.Client() + client.httpClient.Timeout = 5 * time.Second + + return NewCTResolver(client), srv +} + +func TestCTResolver_ResolveVMID(t *testing.T) { + cts := []CTListEntry{ + {VMID: 100, Name: "web-server", Status: "running"}, + {VMID: 200, Name: "db-server", Status: "stopped"}, + } + + resolver, _ := testResolverClient(t, cts) + + vmid, err := resolver.ResolveVMID(context.Background(), "web-server") + if err != nil { + t.Fatalf("ResolveVMID() error: %v", err) + } + if vmid != 100 { + t.Errorf("VMID = %d, want 100", vmid) + } +} + +func TestCTResolver_ResolveName(t *testing.T) { + cts := []CTListEntry{ + {VMID: 100, Name: "web-server", Status: "running"}, + } + + resolver, _ := testResolverClient(t, cts) + + name, err := resolver.ResolveName(context.Background(), 100) + if err != nil { + t.Fatalf("ResolveName() error: %v", err) + } + if name != "web-server" { + t.Errorf("Name = %q, want web-server", name) + } +} + +func TestCTResolver_NotFound(t *testing.T) { + cts := []CTListEntry{ + {VMID: 100, Name: "web-server", Status: "running"}, + } + + resolver, _ := testResolverClient(t, cts) + + _, err := resolver.ResolveVMID(context.Background(), "nonexistent") + if err == nil { + t.Fatal("expected error for nonexistent CT name") + } +} + +func TestCTResolver_CacheHit(t *testing.T) { + callCount := 0 + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + callCount++ + cts := []CTListEntry{{VMID: 100, Name: "cached-ct"}} + d, _ := json.Marshal(cts) + resp := struct { + Data json.RawMessage `json:"data"` + }{Data: d} + b, _ := json.Marshal(resp) + w.WriteHeader(http.StatusOK) + _, _ = w.Write(b) + })) + defer srv.Close() + + cfg := Config{ + Host: srv.URL, TokenID: "t@p!t", Secret: "s", Node: "pve", + VerifySSL: false, Timeout: 5 * time.Second, + } + client := NewClient(cfg, nil) + client.httpClient = srv.Client() + client.httpClient.Timeout = 5 * time.Second + + resolver := NewCTResolver(client) + + ctx := context.Background() + + // First call triggers refresh + _, _ = resolver.ResolveVMID(ctx, "cached-ct") + firstCount := callCount + + // Second call should use cache (no additional HTTP call) + _, _ = resolver.ResolveVMID(ctx, "cached-ct") + if callCount != firstCount { + t.Errorf("expected cache hit, got %d calls (first had %d)", callCount, firstCount) + } +} + +func TestCTResolver_Refresh(t *testing.T) { + cts := []CTListEntry{ + {VMID: 100, Name: "alpha"}, + {VMID: 200, Name: "beta"}, + } + + resolver, _ := testResolverClient(t, cts) + + err := resolver.Refresh(context.Background()) + if err != nil { + t.Fatalf("Refresh() error: %v", err) + } + + // Both should be cached now + vmid, err := resolver.ResolveVMID(context.Background(), "alpha") + if err != nil { + t.Fatalf("ResolveVMID(alpha) error: %v", err) + } + if vmid != 100 { + t.Errorf("alpha VMID = %d, want 100", vmid) + } + + name, err := resolver.ResolveName(context.Background(), 200) + if err != nil { + t.Fatalf("ResolveName(200) error: %v", err) + } + if name != "beta" { + t.Errorf("200 name = %q, want beta", name) + } +} diff --git a/fluid-daemon/internal/provider/lxc/types.go b/fluid-daemon/internal/provider/lxc/types.go new file mode 100644 index 00000000..133e2527 --- /dev/null +++ b/fluid-daemon/internal/provider/lxc/types.go @@ -0,0 +1,77 @@ +package lxc + +// CTListEntry represents a container in the list returned by GET /nodes/{node}/lxc. +type CTListEntry struct { + VMID int `json:"vmid"` + Name string `json:"name"` + Status string `json:"status"` + Template int `json:"template,omitempty"` // 1 if template + MaxMem int64 `json:"maxmem"` + MaxDisk int64 `json:"maxdisk"` + CPU float64 `json:"cpu"` + Mem int64 `json:"mem"` + Uptime int64 `json:"uptime"` +} + +// CTStatus represents the status of an LXC container. +type CTStatus struct { + VMID int `json:"vmid"` + Name string `json:"name"` + Status string `json:"status"` // "running", "stopped" + CPU float64 `json:"cpu"` + MaxMem int64 `json:"maxmem"` + Mem int64 `json:"mem"` +} + +// CTConfig represents an LXC container's configuration. +type CTConfig struct { + Hostname string `json:"hostname,omitempty"` + Memory int `json:"memory"` + Cores int `json:"cores"` + Net0 string `json:"net0,omitempty"` + RootFS string `json:"rootfs,omitempty"` +} + +// CTInterface represents a network interface from the container. +type CTInterface struct { + Name string `json:"name"` + HWAddr string `json:"hwaddr"` + Inet string `json:"inet,omitempty"` // e.g. "10.0.0.5/24" + Inet6 string `json:"inet6,omitempty"` +} + +// NodeStatus represents a Proxmox node's resource status. +type NodeStatus struct { + CPU float64 `json:"cpu"` + MaxCPU int `json:"maxcpu"` + Memory MemoryStatus `json:"memory"` + RootFS DiskStatus `json:"rootfs"` + Uptime int64 `json:"uptime"` + KVersion string `json:"kversion"` +} + +// MemoryStatus is memory info from node status. +type MemoryStatus struct { + Total int64 `json:"total"` + Used int64 `json:"used"` + Free int64 `json:"free"` +} + +// DiskStatus is disk info from node status. +type DiskStatus struct { + Total int64 `json:"total"` + Used int64 `json:"used"` + Available int64 `json:"avail"` +} + +// TaskStatus represents the status of an asynchronous Proxmox task. +type TaskStatus struct { + Status string `json:"status"` // "running", "stopped" + ExitStatus string `json:"exitstatus,omitempty"` // "OK" on success + Type string `json:"type"` + ID string `json:"id"` + Node string `json:"node"` + PID int `json:"pid"` + StartTime int64 `json:"starttime"` + EndTime int64 `json:"endtime,omitempty"` +} diff --git a/fluid-daemon/internal/provider/microvm/microvm_provider.go b/fluid-daemon/internal/provider/microvm/microvm_provider.go new file mode 100644 index 00000000..ae7eba7c --- /dev/null +++ b/fluid-daemon/internal/provider/microvm/microvm_provider.go @@ -0,0 +1,401 @@ +// Package microvm implements the SandboxProvider interface for QEMU microVMs. +// It wraps the existing microvm, network, image, and sourcevm packages. +package microvm + +import ( + "bytes" + "context" + "fmt" + "log/slog" + "os/exec" + "runtime" + "time" + + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/id" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/image" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/microvm" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/network" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/provider" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/sourcevm" +) + +// Provider implements provider.SandboxProvider for QEMU microVMs. +type Provider struct { + vmMgr *microvm.Manager + netMgr *network.NetworkManager + imgStore *image.Store + srcVMMgr *sourcevm.Manager + logger *slog.Logger +} + +// New creates a new microVM provider. +func New( + vmMgr *microvm.Manager, + netMgr *network.NetworkManager, + imgStore *image.Store, + srcVMMgr *sourcevm.Manager, + logger *slog.Logger, +) *Provider { + if logger == nil { + logger = slog.Default() + } + return &Provider{ + vmMgr: vmMgr, + netMgr: netMgr, + imgStore: imgStore, + srcVMMgr: srcVMMgr, + logger: logger.With("provider", "microvm"), + } +} + +func (p *Provider) CreateSandbox(ctx context.Context, req provider.CreateRequest) (*provider.SandboxResult, error) { + if p.vmMgr == nil { + return nil, fmt.Errorf("microVM manager not available") + } + + // Resolve bridge + bridge, err := p.netMgr.ResolveBridge(ctx, req.SourceVM, req.Network) + if err != nil { + return nil, fmt.Errorf("resolve bridge: %w", err) + } + + // Get base image path + imagePath, err := p.imgStore.GetImagePath(req.BaseImage) + if err != nil { + return nil, fmt.Errorf("get base image: %w", err) + } + + // Get kernel path + kernelPath, err := p.imgStore.GetKernelPath(req.BaseImage) + if err != nil { + return nil, fmt.Errorf("get kernel: %w", err) + } + + // Create overlay disk + overlayPath, err := microvm.CreateOverlay(ctx, imagePath, p.vmMgr.WorkDir(), req.SandboxID) + if err != nil { + return nil, fmt.Errorf("create overlay: %w", err) + } + + // Generate MAC address and TAP device + mac := microvm.GenerateMACAddress() + tapName := network.TAPName(req.SandboxID) + + // Create TAP device + if err := network.CreateTAP(ctx, tapName, bridge, p.logger); err != nil { + _ = microvm.RemoveOverlay(p.vmMgr.WorkDir(), req.SandboxID) + return nil, fmt.Errorf("create TAP: %w", err) + } + + // Apply defaults + vcpus := req.VCPUs + if vcpus == 0 { + vcpus = 2 + } + memMB := req.MemoryMB + if memMB == 0 { + memMB = 2048 + } + + // Launch microVM + info, err := p.vmMgr.Launch(ctx, microvm.LaunchConfig{ + SandboxID: req.SandboxID, + Name: req.Name, + OverlayPath: overlayPath, + KernelPath: kernelPath, + TAPDevice: tapName, + MACAddress: mac, + Bridge: bridge, + VCPUs: vcpus, + MemoryMB: memMB, + }) + if err != nil { + _ = network.DestroyTAP(ctx, tapName) + _ = microvm.RemoveOverlay(p.vmMgr.WorkDir(), req.SandboxID) + return nil, fmt.Errorf("launch microVM: %w", err) + } + + // Discover IP + ip, err := p.netMgr.DiscoverIP(ctx, mac, bridge, 2*time.Minute) + if err != nil { + p.logger.Warn("IP discovery failed", "sandbox_id", req.SandboxID, "error", err) + } + + return &provider.SandboxResult{ + SandboxID: req.SandboxID, + Name: req.Name, + State: "RUNNING", + IPAddress: ip, + MACAddress: mac, + Bridge: bridge, + PID: info.PID, + }, nil +} + +func (p *Provider) DestroySandbox(ctx context.Context, sandboxID string) error { + if p.vmMgr != nil { + info, err := p.vmMgr.Get(sandboxID) + if err == nil { + _ = network.DestroyTAP(ctx, info.TAPDevice) + } + if err := p.vmMgr.Destroy(ctx, sandboxID); err != nil { + p.logger.Error("destroy microVM failed", "sandbox_id", sandboxID, "error", err) + } + _ = microvm.RemoveOverlay(p.vmMgr.WorkDir(), sandboxID) + } + return nil +} + +func (p *Provider) StartSandbox(ctx context.Context, sandboxID string) (*provider.SandboxResult, error) { + if p.vmMgr == nil { + return nil, fmt.Errorf("microVM manager not available") + } + + info, err := p.vmMgr.Get(sandboxID) + if err != nil { + return nil, fmt.Errorf("get sandbox: %w", err) + } + + ip := "" + if p.netMgr != nil { + ip, _ = p.netMgr.DiscoverIP(ctx, info.MACAddress, info.Bridge, 30*time.Second) + } + + return &provider.SandboxResult{ + SandboxID: sandboxID, + State: "RUNNING", + IPAddress: ip, + }, nil +} + +func (p *Provider) StopSandbox(ctx context.Context, sandboxID string, force bool) error { + if p.vmMgr == nil { + return fmt.Errorf("microVM manager not available") + } + return p.vmMgr.Stop(ctx, sandboxID, force) +} + +func (p *Provider) GetSandboxIP(ctx context.Context, sandboxID string) (string, error) { + if p.vmMgr == nil { + return "", fmt.Errorf("microVM manager not available") + } + + info, err := p.vmMgr.Get(sandboxID) + if err != nil { + return "", fmt.Errorf("get sandbox: %w", err) + } + + if p.netMgr == nil { + return "", fmt.Errorf("network manager not available") + } + + return p.netMgr.DiscoverIP(ctx, info.MACAddress, info.Bridge, 30*time.Second) +} + +func (p *Provider) CreateSnapshot(_ context.Context, sandboxID, name string) (*provider.SnapshotResult, error) { + if p.vmMgr == nil { + return nil, fmt.Errorf("microVM manager not available") + } + + snapshotID, err := id.Generate("SNP-") + if err != nil { + return nil, fmt.Errorf("generate snapshot ID: %w", err) + } + return &provider.SnapshotResult{ + SnapshotID: snapshotID, + SnapshotName: name, + }, nil +} + +func (p *Provider) RunCommand(ctx context.Context, sandboxID, command string, timeout time.Duration) (*provider.CommandResult, error) { + if p.vmMgr == nil { + return nil, fmt.Errorf("microVM manager not available") + } + + info, err := p.vmMgr.Get(sandboxID) + if err != nil { + return nil, fmt.Errorf("get sandbox: %w", err) + } + + ip := "" + if p.netMgr != nil { + ip, _ = p.netMgr.DiscoverIP(ctx, info.MACAddress, info.Bridge, 30*time.Second) + } + if ip == "" { + return nil, fmt.Errorf("unable to discover sandbox IP for SSH") + } + + if timeout == 0 { + timeout = 5 * time.Minute + } + + start := time.Now() + stdout, stderr, exitCode, err := runSSHCommand(ctx, ip, command, timeout) + if err != nil { + return nil, fmt.Errorf("run command: %w", err) + } + + return &provider.CommandResult{ + Stdout: stdout, + Stderr: stderr, + ExitCode: exitCode, + DurationMS: time.Since(start).Milliseconds(), + }, nil +} + +func (p *Provider) ListTemplates(_ context.Context) ([]string, error) { + if p.imgStore == nil { + return nil, nil + } + return p.imgStore.ListNames() +} + +func (p *Provider) ListSourceVMs(ctx context.Context) ([]provider.SourceVMInfo, error) { + if p.srcVMMgr == nil { + return nil, fmt.Errorf("source VM manager not available") + } + + vms, err := p.srcVMMgr.ListVMs(ctx) + if err != nil { + return nil, err + } + + result := make([]provider.SourceVMInfo, len(vms)) + for i, vm := range vms { + result[i] = provider.SourceVMInfo{ + Name: vm.Name, + State: vm.State, + IPAddress: vm.IPAddress, + Prepared: vm.Prepared, + } + } + return result, nil +} + +func (p *Provider) ValidateSourceVM(ctx context.Context, vmName string) (*provider.ValidationResult, error) { + if p.srcVMMgr == nil { + return nil, fmt.Errorf("source VM manager not available") + } + + result, err := p.srcVMMgr.ValidateSourceVM(ctx, vmName) + if err != nil { + return nil, err + } + + return &provider.ValidationResult{ + VMName: result.VMName, + Valid: result.Valid, + State: result.State, + MACAddress: result.MACAddress, + IPAddress: result.IPAddress, + HasNetwork: result.HasNetwork, + Warnings: result.Warnings, + Errors: result.Errors, + }, nil +} + +func (p *Provider) PrepareSourceVM(ctx context.Context, vmName, sshUser, sshKeyPath string) (*provider.PrepareResult, error) { + if p.srcVMMgr == nil { + return nil, fmt.Errorf("source VM manager not available") + } + + result, err := p.srcVMMgr.PrepareSourceVM(ctx, vmName, sshUser, sshKeyPath) + if err != nil { + return nil, err + } + + return &provider.PrepareResult{ + SourceVM: result.SourceVM, + IPAddress: result.IPAddress, + Prepared: result.Prepared, + UserCreated: result.UserCreated, + ShellInstalled: result.ShellInstalled, + CAKeyInstalled: result.CAKeyInstalled, + SSHDConfigured: result.SSHDConfigured, + PrincipalsCreated: result.PrincipalsCreated, + SSHDRestarted: result.SSHDRestarted, + }, nil +} + +func (p *Provider) RunSourceCommand(ctx context.Context, vmName, command string, timeout time.Duration) (*provider.CommandResult, error) { + if p.srcVMMgr == nil { + return nil, fmt.Errorf("source VM manager not available") + } + + start := time.Now() + stdout, stderr, exitCode, err := p.srcVMMgr.RunSourceCommand(ctx, vmName, command, timeout) + if err != nil { + return nil, err + } + + return &provider.CommandResult{ + Stdout: stdout, + Stderr: stderr, + ExitCode: exitCode, + DurationMS: time.Since(start).Milliseconds(), + }, nil +} + +func (p *Provider) ReadSourceFile(ctx context.Context, vmName, path string) (string, error) { + if p.srcVMMgr == nil { + return "", fmt.Errorf("source VM manager not available") + } + return p.srcVMMgr.ReadSourceFile(ctx, vmName, path) +} + +func (p *Provider) Capabilities(_ context.Context) (*provider.HostCapabilities, error) { + caps := &provider.HostCapabilities{ + TotalCPUs: runtime.NumCPU(), + AvailableCPUs: runtime.NumCPU(), + } + + if p.imgStore != nil { + names, _ := p.imgStore.ListNames() + caps.BaseImages = names + } + + return caps, nil +} + +func (p *Provider) ActiveSandboxCount() int { + if p.vmMgr == nil { + return 0 + } + return len(p.vmMgr.List()) +} + +func (p *Provider) RecoverState(ctx context.Context) error { + if p.vmMgr == nil { + return nil + } + return p.vmMgr.RecoverState(ctx) +} + +// runSSHCommand executes a command on a sandbox via SSH. +func runSSHCommand(ctx context.Context, ip, command string, timeout time.Duration) (stdout, stderr string, exitCode int, err error) { + cmdCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + sshArgs := []string{ + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", "ConnectTimeout=10", + fmt.Sprintf("sandbox@%s", ip), + command, + } + + cmd := exec.CommandContext(cmdCtx, "ssh", sshArgs...) + var stdoutBuf, stderrBuf bytes.Buffer + cmd.Stdout = &stdoutBuf + cmd.Stderr = &stderrBuf + + err = cmd.Run() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return stdoutBuf.String(), stderrBuf.String(), exitErr.ExitCode(), nil + } + return "", "", -1, err + } + + return stdoutBuf.String(), stderrBuf.String(), 0, nil +} diff --git a/fluid-daemon/internal/provider/provider.go b/fluid-daemon/internal/provider/provider.go new file mode 100644 index 00000000..7614396a --- /dev/null +++ b/fluid-daemon/internal/provider/provider.go @@ -0,0 +1,127 @@ +// Package provider defines the SandboxProvider interface that abstracts +// sandbox lifecycle management across different backends (microVM, LXC). +package provider + +import ( + "context" + "time" +) + +// SandboxProvider abstracts sandbox lifecycle management. +// Implementations handle the details of creating, managing, and executing +// commands in sandboxes - whether they are QEMU microVMs or LXC containers. +type SandboxProvider interface { + // Sandbox lifecycle + CreateSandbox(ctx context.Context, req CreateRequest) (*SandboxResult, error) + DestroySandbox(ctx context.Context, sandboxID string) error + StartSandbox(ctx context.Context, sandboxID string) (*SandboxResult, error) + StopSandbox(ctx context.Context, sandboxID string, force bool) error + GetSandboxIP(ctx context.Context, sandboxID string) (string, error) + CreateSnapshot(ctx context.Context, sandboxID, name string) (*SnapshotResult, error) + + // Command execution (SSH for microvm, pct exec for lxc) + RunCommand(ctx context.Context, sandboxID, command string, timeout time.Duration) (*CommandResult, error) + + // Template/image listing for registration + ListTemplates(ctx context.Context) ([]string, error) + + // Source VM/CT operations + ListSourceVMs(ctx context.Context) ([]SourceVMInfo, error) + ValidateSourceVM(ctx context.Context, vmName string) (*ValidationResult, error) + PrepareSourceVM(ctx context.Context, vmName, sshUser, sshKeyPath string) (*PrepareResult, error) + RunSourceCommand(ctx context.Context, vmName, command string, timeout time.Duration) (*CommandResult, error) + ReadSourceFile(ctx context.Context, vmName, path string) (string, error) + + // Registration capabilities + Capabilities(ctx context.Context) (*HostCapabilities, error) + + // Active sandbox count for heartbeat + ActiveSandboxCount() int + + // Recovery after restart + RecoverState(ctx context.Context) error +} + +// CreateRequest holds parameters for creating a sandbox. +type CreateRequest struct { + SandboxID string + Name string + BaseImage string // QCOW2 name (microvm) or CT template name (lxc) + SourceVM string // for bridge resolution (microvm) or clone source (lxc) + Network string // bridge override + VCPUs int + MemoryMB int + TTLSeconds int + AgentID string + SSHPublicKey string +} + +// SandboxResult holds the result of a sandbox lifecycle operation. +type SandboxResult struct { + SandboxID string + Name string + State string // RUNNING, STOPPED, etc. + IPAddress string + MACAddress string + Bridge string + PID int // QEMU PID (microvm) or 0 (lxc) +} + +// SnapshotResult holds the result of a snapshot operation. +type SnapshotResult struct { + SnapshotID string + SnapshotName string +} + +// CommandResult holds the result of a command execution. +type CommandResult struct { + Stdout string + Stderr string + ExitCode int + DurationMS int64 +} + +// PrepareResult holds the outcome of preparing a source VM for read-only access. +type PrepareResult struct { + SourceVM string + IPAddress string + Prepared bool + UserCreated bool + ShellInstalled bool + CAKeyInstalled bool + SSHDConfigured bool + PrincipalsCreated bool + SSHDRestarted bool +} + +// SourceVMInfo describes a source VM/CT visible to the provider. +type SourceVMInfo struct { + Name string + State string + IPAddress string + Prepared bool +} + +// ValidationResult contains the result of validating a source VM. +type ValidationResult struct { + VMName string + Valid bool + State string + MACAddress string + IPAddress string + HasNetwork bool + Warnings []string + Errors []string +} + +// HostCapabilities describes the resources and images available on this host. +type HostCapabilities struct { + TotalCPUs int + AvailableCPUs int + TotalMemoryMB int + AvailableMemMB int + TotalDiskMB int + AvailableDiskMB int + BaseImages []string + Bridges []string +} diff --git a/fluid-daemon/internal/readonly/prepare.go b/fluid-daemon/internal/readonly/prepare.go new file mode 100644 index 00000000..b8ff77d7 --- /dev/null +++ b/fluid-daemon/internal/readonly/prepare.go @@ -0,0 +1,194 @@ +package readonly + +import ( + "context" + "encoding/base64" + "fmt" + "log/slog" + "strings" +) + +// SSHRunFunc executes a command on a remote host via SSH. +// Returns stdout, stderr, exit code, and error. +type SSHRunFunc func(ctx context.Context, command string) (stdout, stderr string, exitCode int, err error) + +// PrepareStep identifies a step in the source VM preparation flow. +type PrepareStep int + +const ( + StepInstallShell PrepareStep = iota // Install restricted shell script + StepCreateUser // Create fluid-readonly user + StepInstallCAKey // Copy CA pub key + StepConfigureSSHD // Configure sshd to trust CA key + StepCreatePrincipals // Set up authorized principals + StepRestartSSHD // Restart sshd +) + +// PrepareProgress reports progress during source VM preparation. +type PrepareProgress struct { + Step PrepareStep + StepName string + Total int // always 6 + Done bool // false=starting, true=completed +} + +// ProgressFunc is called before and after each preparation step. +// If nil, no progress is reported. +type ProgressFunc func(PrepareProgress) + +// PrepareResult contains the outcome of preparing a golden VM for read-only access. +type PrepareResult struct { + UserCreated bool + ShellInstalled bool + CAKeyInstalled bool + SSHDConfigured bool + PrincipalsCreated bool + SSHDRestarted bool +} + +// Prepare configures a golden VM for read-only access via the fluid-readonly user. +// All steps are idempotent. The sshRun function is used to execute commands on the VM. +// +// Steps: +// 1. Create fluid-readonly user with restricted shell +// 2. Install restricted shell script +// 3. Copy CA pub key for certificate verification +// 4. Configure sshd to trust the CA key +// 5. Set up authorized principals for fluid-readonly +// 6. Restart sshd +func Prepare(ctx context.Context, sshRun SSHRunFunc, caPubKey string, onProgress ProgressFunc, logger *slog.Logger) (*PrepareResult, error) { + if logger == nil { + logger = slog.Default() + } + if strings.TrimSpace(caPubKey) == "" { + return nil, fmt.Errorf("CA public key is required") + } + + result := &PrepareResult{} + + report := func(step PrepareStep, name string, done bool) { + if onProgress != nil { + onProgress(PrepareProgress{Step: step, StepName: name, Total: 6, Done: done}) + } + } + + // Wrap sshRun to elevate all commands with sudo via base64 transport. + // + // Security context: Prepare runs during one-time source VM setup by a + // trusted operator (not by AI agents). The SSH session is authenticated + // with the operator's own credentials, not the fluid-readonly user. + // + // Why base64: preparation commands contain heredocs, single quotes, + // double quotes, and newlines (e.g. writing the restricted shell script). + // Passing these through SSH + sudo introduces nested quoting that is + // fragile and error-prone. Base64 encoding the command on the Go side + // and decoding on the VM side avoids all shell interpolation issues. + // + // The pattern is: echo | base64 -d | sudo bash + // - echo: emits the opaque base64 blob (no special chars to escape) + // - base64 -d: decodes to the original command string + // - sudo bash: executes with root privileges + // + // This wrapper is NOT used at runtime for agent commands. Agent commands + // go through RunWithCert which connects as the fluid-readonly user + // directly - no sudo, no base64, no privilege escalation. + origRun := sshRun + sshRun = func(ctx context.Context, command string) (string, string, int, error) { + encoded := base64.StdEncoding.EncodeToString([]byte(command)) + return origRun(ctx, fmt.Sprintf("echo %s | base64 -d | sudo bash", encoded)) + } + + // 1. Install restricted shell script at /usr/local/bin/fluid-readonly-shell + report(StepInstallShell, "Installing restricted shell", false) + logger.Info("installing restricted shell script") + shellCmd := fmt.Sprintf("cat > /usr/local/bin/fluid-readonly-shell << 'FLUID_SHELL_EOF'\n%sFLUID_SHELL_EOF\nchmod 755 /usr/local/bin/fluid-readonly-shell", RestrictedShellScript) + stdout, stderr, code, err := sshRun(ctx, shellCmd) + if err != nil || code != 0 { + return result, fmt.Errorf("install restricted shell: exit=%d stdout=%q stderr=%q err=%v", code, stdout, stderr, err) + } + result.ShellInstalled = true + logger.Info("restricted shell installed") + report(StepInstallShell, "Installing restricted shell", true) + + // 2. Create fluid-readonly user (idempotent - ignore if exists) + report(StepCreateUser, "Creating fluid-readonly user", false) + logger.Info("creating fluid-readonly user") + userCmd := `mkdir -p /var/empty && id fluid-readonly >/dev/null 2>&1 || useradd -r -s /usr/local/bin/fluid-readonly-shell -d /var/empty -M fluid-readonly` + stdout, stderr, code, err = sshRun(ctx, userCmd) + if err != nil || code != 0 { + return result, fmt.Errorf("create fluid-readonly user: exit=%d stdout=%q stderr=%q err=%v", code, stdout, stderr, err) + } + // Ensure the shell and home directory are correct even if user already existed + modOut, modErr, modCode, modRunErr := sshRun(ctx, "usermod -s /usr/local/bin/fluid-readonly-shell -d /var/empty fluid-readonly") + if modRunErr != nil || modCode != 0 { + logger.Warn("usermod fixup failed (non-fatal)", "exit", modCode, "stdout", modOut, "stderr", modErr, "error", modRunErr) + } else { + logger.Info("usermod fixup applied (shell and home directory)") + } + result.UserCreated = true + report(StepCreateUser, "Creating fluid-readonly user", true) + + // 3. Copy CA pub key to /etc/ssh/fluid_ca.pub + report(StepInstallCAKey, "Installing CA key", false) + logger.Info("installing CA public key") + caCmd := fmt.Sprintf("cat > /etc/ssh/fluid_ca.pub << 'FLUID_CA_EOF'\n%s\nFLUID_CA_EOF\nchmod 644 /etc/ssh/fluid_ca.pub", strings.TrimSpace(caPubKey)) + stdout, stderr, code, err = sshRun(ctx, caCmd) + if err != nil || code != 0 { + return result, fmt.Errorf("install CA pub key: exit=%d stdout=%q stderr=%q err=%v", code, stdout, stderr, err) + } + result.CAKeyInstalled = true + logger.Info("CA public key installed") + report(StepInstallCAKey, "Installing CA key", true) + + // 4. Configure sshd to trust the CA key (idempotent) + report(StepConfigureSSHD, "Configuring sshd", false) + logger.Info("configuring sshd") + sshdCmds := []string{ + // Add TrustedUserCAKeys if not present + `grep -q 'TrustedUserCAKeys /etc/ssh/fluid_ca.pub' /etc/ssh/sshd_config || echo 'TrustedUserCAKeys /etc/ssh/fluid_ca.pub' >> /etc/ssh/sshd_config`, + // Add AuthorizedPrincipalsFile if not present + `grep -q 'AuthorizedPrincipalsFile /etc/ssh/authorized_principals/%u' /etc/ssh/sshd_config || echo 'AuthorizedPrincipalsFile /etc/ssh/authorized_principals/%u' >> /etc/ssh/sshd_config`, + } + for _, cmd := range sshdCmds { + stdout, stderr, code, err = sshRun(ctx, cmd) + if err != nil || code != 0 { + return result, fmt.Errorf("configure sshd: exit=%d stdout=%q stderr=%q err=%v", code, stdout, stderr, err) + } + } + result.SSHDConfigured = true + logger.Info("sshd configured") + report(StepConfigureSSHD, "Configuring sshd", true) + + // 5. Create authorized_principals directory and file for fluid-readonly + report(StepCreatePrincipals, "Creating authorized principals", false) + logger.Info("creating authorized principals") + principalsCmds := []string{ + "mkdir -p /etc/ssh/authorized_principals", + "echo 'fluid-readonly' > /etc/ssh/authorized_principals/fluid-readonly", + "chmod 644 /etc/ssh/authorized_principals/fluid-readonly", + } + for _, cmd := range principalsCmds { + stdout, stderr, code, err = sshRun(ctx, cmd) + if err != nil || code != 0 { + return result, fmt.Errorf("create principals: exit=%d stdout=%q stderr=%q err=%v", code, stdout, stderr, err) + } + } + result.PrincipalsCreated = true + logger.Info("authorized principals created") + report(StepCreatePrincipals, "Creating authorized principals", true) + + // 6. Restart sshd to apply changes + // Try systemctl first, fall back to service command + report(StepRestartSSHD, "Restarting sshd", false) + logger.Info("restarting sshd") + restartCmd := `systemctl restart sshd 2>/dev/null || systemctl restart ssh 2>/dev/null || service sshd restart 2>/dev/null || service ssh restart` + stdout, stderr, code, err = sshRun(ctx, restartCmd) + if err != nil || code != 0 { + return result, fmt.Errorf("restart sshd: exit=%d stdout=%q stderr=%q err=%v", code, stdout, stderr, err) + } + result.SSHDRestarted = true + logger.Info("sshd restarted") + report(StepRestartSSHD, "Restarting sshd", true) + + return result, nil +} diff --git a/fluid-daemon/internal/readonly/shell.go b/fluid-daemon/internal/readonly/shell.go new file mode 100644 index 00000000..cd3f359b --- /dev/null +++ b/fluid-daemon/internal/readonly/shell.go @@ -0,0 +1,227 @@ +package readonly + +// RestrictedShellScript is the server-side restricted shell installed at +// /usr/local/bin/fluid-readonly-shell on golden VMs. It blocks destructive +// commands as a defense-in-depth layer behind the client-side allowlist. +const RestrictedShellScript = `#!/bin/bash +# fluid-readonly-shell - restricted shell for read-only VM access. +# Installed by: fluid source prepare +# This shell is set as the login shell for the fluid-readonly user. +# Commands are accepted via SSH_ORIGINAL_COMMAND (ForceCommand) or -c arg (login shell). + +set -euo pipefail + +# Extract command from SSH_ORIGINAL_COMMAND or login shell -c invocation +if [ -n "${SSH_ORIGINAL_COMMAND:-}" ]; then + CMD="$SSH_ORIGINAL_COMMAND" +elif [ "${1:-}" = "-c" ] && [ -n "${2:-}" ]; then + CMD="$2" +else + echo "ERROR: Interactive login is not permitted. This account is for read-only SSH commands only." >&2 + exit 1 +fi + +# Blocked command patterns (destructive operations) +BLOCKED_PATTERNS=( + "^sudo " + "^su " + "^rm " + "^mv " + "^cp " + "^dd " + "^kill " + "^killall " + "^pkill " + "^shutdown " + "^reboot " + "^halt " + "^poweroff " + "^init " + "^telinit " + "^chmod " + "^chown " + "^chgrp " + "^useradd " + "^userdel " + "^usermod " + "^groupadd " + "^groupdel " + "^groupmod " + "^passwd " + "^chpasswd " + "^mkfs" + "^mount " + "^umount " + "^fdisk " + "^parted " + "^lvm " + "^mdadm " + "^wget " + "^curl " + "^scp " + "^rsync " + "^ftp " + "^sftp " + "^python" + "^perl " + "^ruby " + "^node " + "^bash " + "^sh " + "^zsh " + "^dash " + "^csh " + "^vi " + "^vim " + "^nano " + "^emacs " + "^sed -i" + "^tee " + "^install " + "^make " + "^gcc " + "^g++ " + "^cc " + "^iptables " + "^ip6tables " + "^nft " + "^systemctl start" + "^systemctl stop" + "^systemctl restart" + "^systemctl reload" + "^systemctl enable" + "^systemctl disable" + "^systemctl daemon" + "^systemctl mask" + "^systemctl unmask" + "^systemctl edit" + "^systemctl set" + "^apt install" + "^apt remove" + "^apt purge" + "^apt autoremove" + "^apt-get " + "^dpkg -i" + "^dpkg --install" + "^dpkg --remove" + "^dpkg --purge" + "^rpm -i" + "^rpm --install" + "^rpm -e" + "^rpm --erase" + "^yum " + "^dnf " + "^pip install" + "^pip uninstall" + "^pip3 install" + "^pip3 uninstall" +) + +# Block command substitution and subshells +# Check for $(...), backticks, <(...), >(...) +if echo "$CMD" | grep -qE '\$\(|` + "`" + `|<\(|>\('; then + echo "ERROR: Command substitution and subshells are not permitted." >&2 + exit 126 +fi + +# Block output redirection +if echo "$CMD" | grep -qE '[^"'"'"']>[^&]|[^"'"'"']>>'; then + echo "ERROR: Output redirection is not permitted." >&2 + exit 126 +fi + +# Block newlines (commands must be single-line) +if [[ "$CMD" == *$'\n'* ]]; then + echo "ERROR: Multi-line commands are not permitted." >&2 + exit 126 +fi + +# Split command on all shell separators: | || ; && (and newlines, already blocked above) +# We need to parse the command to split on these operators outside of quotes. +# For defense-in-depth, we'll use a bash function to split properly. + +# Parse and validate each segment +parse_and_validate_segments() { + local cmd="$1" + local segment="" + local in_single_quote=false + local in_double_quote=false + local prev_char="" + local i + + for (( i=0; i<${#cmd}; i++ )); do + local char="${cmd:$i:1}" + local next_char="${cmd:$((i+1)):1}" + + # Track quote state + if [[ "$char" == "'" && "$in_double_quote" == false && "$prev_char" != "\\" ]]; then + if [[ "$in_single_quote" == true ]]; then + in_single_quote=false + else + in_single_quote=true + fi + segment+="$char" + elif [[ "$char" == '"' && "$in_single_quote" == false && "$prev_char" != "\\" ]]; then + if [[ "$in_double_quote" == true ]]; then + in_double_quote=false + else + in_double_quote=true + fi + segment+="$char" + # Check for separators outside quotes + elif [[ "$in_single_quote" == false && "$in_double_quote" == false ]]; then + if [[ "$char" == "|" ]]; then + # Check for || + if [[ "$next_char" == "|" ]]; then + validate_segment "$segment" + segment="" + ((i++)) # Skip next | + else + validate_segment "$segment" + segment="" + fi + elif [[ "$char" == ";" ]]; then + validate_segment "$segment" + segment="" + elif [[ "$char" == "&" && "$next_char" == "&" ]]; then + validate_segment "$segment" + segment="" + ((i++)) # Skip next & + else + segment+="$char" + fi + else + segment+="$char" + fi + + prev_char="$char" + done + + # Validate the last segment + if [[ -n "$segment" ]]; then + validate_segment "$segment" + fi +} + +validate_segment() { + local segment="$1" + # Trim leading whitespace + segment="${segment#"${segment%%[![:space:]]*}"}" + + # Skip empty segments + [[ -z "$segment" ]] && return + + for pattern in "${BLOCKED_PATTERNS[@]}"; do + if echo "$segment" | grep -qE "$pattern"; then + echo "ERROR: Command blocked by restricted shell: $segment" >&2 + exit 126 + fi + done +} + +# Validate all segments +parse_and_validate_segments "$CMD" + +# Execute the command +exec /bin/bash -c "$CMD" +` diff --git a/fluid-daemon/internal/readonly/validate.go b/fluid-daemon/internal/readonly/validate.go new file mode 100644 index 00000000..60d298c6 --- /dev/null +++ b/fluid-daemon/internal/readonly/validate.go @@ -0,0 +1,328 @@ +// Package readonly provides client-side command validation for read-only +// golden VM access. Commands are parsed into pipeline segments and each +// segment's first token is checked against an allowlist. +package readonly + +import ( + "fmt" + "strings" +) + +// allowedCommands is the set of commands permitted in read-only mode. +var allowedCommands = map[string]bool{ + // File inspection + "cat": true, "ls": true, "find": true, "head": true, "tail": true, + "stat": true, "file": true, "wc": true, "du": true, "tree": true, + "strings": true, "md5sum": true, "sha256sum": true, "readlink": true, + "realpath": true, "basename": true, "dirname": true, "base64": true, + + // Process/system + "ps": true, "top": true, "pgrep": true, + "systemctl": true, "journalctl": true, "dmesg": true, + + // Network + "ss": true, "netstat": true, "ip": true, "ifconfig": true, + "dig": true, "nslookup": true, "ping": true, + + // Disk + "df": true, "lsblk": true, "blkid": true, + + // Package query + "dpkg": true, "rpm": true, "apt": true, "pip": true, + + // System info + "uname": true, "hostname": true, "uptime": true, "free": true, + "lscpu": true, "lsmod": true, "lspci": true, "lsusb": true, + "arch": true, "nproc": true, + + // User + "whoami": true, "id": true, "groups": true, "who": true, + "w": true, "last": true, + + // Misc + "env": true, "printenv": true, "date": true, "which": true, + "type": true, "echo": true, "test": true, + + // Pipe targets + "grep": true, "awk": true, "sed": true, "sort": true, "uniq": true, + "cut": true, "tr": true, "xargs": true, +} + +// subcommandRestrictions maps commands to the set of allowed first arguments. +// If a command appears here, its first argument must be in the allowed set. +var subcommandRestrictions = map[string]map[string]bool{ + "systemctl": { + "status": true, + "show": true, + "list-units": true, + "is-active": true, + "is-enabled": true, + }, + "dpkg": { + "-l": true, + "--list": true, + }, + "rpm": { + "-qa": true, + "-q": true, + }, + "apt": { + "list": true, + }, + "pip": { + "list": true, + }, +} + +// ValidateCommand checks that every command in a pipeline is allowed for +// read-only mode. Returns nil if all commands are allowed, or an error +// describing the first violation found. +func ValidateCommand(command string) error { + command = strings.TrimSpace(command) + if command == "" { + return fmt.Errorf("empty command") + } + + // Block dangerous shell metacharacters that could be used for command injection. + if err := checkDangerousMetacharacters(command); err != nil { + return err + } + + // Block output redirection (unquoted > or >>). + if containsUnquotedRedirection(command) { + return fmt.Errorf("output redirection is not allowed in read-only mode") + } + + // Split on pipes to get pipeline segments. + segments := splitPipeline(command) + + for _, seg := range segments { + seg = strings.TrimSpace(seg) + if seg == "" { + continue + } + + // Extract the base command (first token), skipping env var assignments. + baseCmd := extractBaseCommand(seg) + if baseCmd == "" { + continue + } + + if !allowedCommands[baseCmd] { + return fmt.Errorf("command %q is not allowed in read-only mode", baseCmd) + } + + // Check subcommand restrictions if applicable. + if restrictions, ok := subcommandRestrictions[baseCmd]; ok { + subCmd := extractSubcommand(seg, baseCmd) + if subCmd != "" && !restrictions[subCmd] { + return fmt.Errorf("%s subcommand %q is not allowed in read-only mode (allowed: %s)", + baseCmd, subCmd, joinKeys(restrictions)) + } + } + } + + return nil +} + +// checkDangerousMetacharacters detects shell expansion primitives that could +// be used to smuggle commands past the allowlist. We block command substitution, +// process substitution, and newlines outside of quotes. +func checkDangerousMetacharacters(s string) error { + inSingle := false + inDouble := false + prev := rune(0) + + runes := []rune(s) + for i := 0; i < len(runes); i++ { + ch := runes[i] + + switch { + case ch == '\'' && !inDouble && prev != '\\': + inSingle = !inSingle + case ch == '"' && !inSingle && prev != '\\': + inDouble = !inDouble + case !inSingle && !inDouble: + // Check for command substitution: $(...) + if ch == '$' && i+1 < len(runes) && runes[i+1] == '(' { + return fmt.Errorf("command substitution $(...) is not allowed in read-only mode") + } + // Check for backticks (alternate command substitution) + if ch == '`' { + return fmt.Errorf("backtick command substitution is not allowed in read-only mode") + } + // Check for process substitution: <(...) or >(...) + if (ch == '<' || ch == '>') && i+1 < len(runes) && runes[i+1] == '(' { + return fmt.Errorf("process substitution is not allowed in read-only mode") + } + // Check for newlines (could be used to inject additional commands) + if ch == '\n' || ch == '\r' { + return fmt.Errorf("newline characters are not allowed in read-only mode") + } + } + prev = ch + } + return nil +} + +// containsUnquotedRedirection detects > or >> outside of quotes. +func containsUnquotedRedirection(s string) bool { + inSingle := false + inDouble := false + prev := rune(0) + + for _, ch := range s { + switch { + case ch == '\'' && !inDouble && prev != '\\': + inSingle = !inSingle + case ch == '"' && !inSingle && prev != '\\': + inDouble = !inDouble + case ch == '>' && !inSingle && !inDouble: + // Check it's not inside a process substitution like >(cmd) + // A simple > or >> outside quotes is a redirect. + return true + } + prev = ch + } + return false +} + +// splitPipeline splits a command string on unquoted pipe characters. +// It also splits on ; and && to handle chained commands. +func splitPipeline(s string) []string { + var segments []string + var current strings.Builder + inSingle := false + inDouble := false + prev := rune(0) + + runes := []rune(s) + for i := 0; i < len(runes); i++ { + ch := runes[i] + + switch { + case ch == '\'' && !inDouble && prev != '\\': + inSingle = !inSingle + current.WriteRune(ch) + case ch == '"' && !inSingle && prev != '\\': + inDouble = !inDouble + current.WriteRune(ch) + case ch == '|' && !inSingle && !inDouble: + // Skip || (logical OR) - treat as separator anyway + if i+1 < len(runes) && runes[i+1] == '|' { + segments = append(segments, current.String()) + current.Reset() + i++ // skip second | + } else { + segments = append(segments, current.String()) + current.Reset() + } + case ch == ';' && !inSingle && !inDouble: + segments = append(segments, current.String()) + current.Reset() + case ch == '&' && !inSingle && !inDouble: + if i+1 < len(runes) && runes[i+1] == '&' { + segments = append(segments, current.String()) + current.Reset() + i++ // skip second & + } else { + current.WriteRune(ch) + } + default: + current.WriteRune(ch) + } + prev = ch + } + + if current.Len() > 0 { + segments = append(segments, current.String()) + } + + return segments +} + +// extractBaseCommand returns the first actual command token from a segment, +// skipping leading environment variable assignments (VAR=value). +func extractBaseCommand(seg string) string { + tokens := tokenize(seg) + for _, tok := range tokens { + // Skip env var assignments like FOO=bar + if strings.Contains(tok, "=") && !strings.HasPrefix(tok, "-") { + continue + } + // Handle path-qualified commands like /usr/bin/cat + base := tok + if idx := strings.LastIndex(tok, "/"); idx >= 0 { + base = tok[idx+1:] + } + return base + } + return "" +} + +// extractSubcommand returns the first argument after the base command, +// which for restricted commands is the subcommand to check. +func extractSubcommand(seg, baseCmd string) string { + tokens := tokenize(seg) + foundBase := false + for _, tok := range tokens { + if !foundBase { + // Skip env assignments + if strings.Contains(tok, "=") && !strings.HasPrefix(tok, "-") { + continue + } + base := tok + if idx := strings.LastIndex(tok, "/"); idx >= 0 { + base = tok[idx+1:] + } + if base == baseCmd { + foundBase = true + continue + } + } else { + return tok + } + } + return "" +} + +// tokenize splits a command segment into shell-like tokens, respecting quotes. +func tokenize(s string) []string { + var tokens []string + var current strings.Builder + inSingle := false + inDouble := false + prev := rune(0) + + for _, ch := range s { + switch { + case ch == '\'' && !inDouble && prev != '\\': + inSingle = !inSingle + case ch == '"' && !inSingle && prev != '\\': + inDouble = !inDouble + case (ch == ' ' || ch == '\t') && !inSingle && !inDouble: + if current.Len() > 0 { + tokens = append(tokens, current.String()) + current.Reset() + } + default: + current.WriteRune(ch) + } + prev = ch + } + + if current.Len() > 0 { + tokens = append(tokens, current.String()) + } + + return tokens +} + +// joinKeys returns a comma-separated list of map keys. +func joinKeys(m map[string]bool) string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + return strings.Join(keys, ", ") +} diff --git a/fluid-daemon/internal/snapshotpull/backend.go b/fluid-daemon/internal/snapshotpull/backend.go new file mode 100644 index 00000000..5ce79a5e --- /dev/null +++ b/fluid-daemon/internal/snapshotpull/backend.go @@ -0,0 +1,13 @@ +// Package snapshotpull provides snapshot-and-pull functionality for creating +// sandboxes from remote production VMs. +package snapshotpull + +import "context" + +// SnapshotBackend abstracts the mechanism for snapshotting a VM disk +// on a remote host and pulling it locally. +type SnapshotBackend interface { + // SnapshotAndPull creates a temporary snapshot of vmName's disk, + // transfers the backing image to destPath, then cleans up the snapshot. + SnapshotAndPull(ctx context.Context, vmName string, destPath string) error +} diff --git a/fluid-daemon/internal/snapshotpull/libvirt_backend.go b/fluid-daemon/internal/snapshotpull/libvirt_backend.go new file mode 100644 index 00000000..4bbbe9ee --- /dev/null +++ b/fluid-daemon/internal/snapshotpull/libvirt_backend.go @@ -0,0 +1,172 @@ +package snapshotpull + +import ( + "bytes" + "context" + "fmt" + "log/slog" + "os/exec" + "strings" +) + +// LibvirtBackend snapshots and pulls a VM disk from a remote libvirt host via SSH. +type LibvirtBackend struct { + sshHost string + sshPort int + sshUser string + sshIdentityFile string + logger *slog.Logger +} + +// NewLibvirtBackend creates a backend that uses SSH + virsh to snapshot and rsync to pull. +func NewLibvirtBackend(host string, port int, user, identityFile string, logger *slog.Logger) *LibvirtBackend { + if port == 0 { + port = 22 + } + if user == "" { + user = "root" + } + if logger == nil { + logger = slog.Default() + } + return &LibvirtBackend{ + sshHost: host, + sshPort: port, + sshUser: user, + sshIdentityFile: identityFile, + logger: logger.With("component", "libvirt-backend"), + } +} + +// SnapshotAndPull creates a temporary external snapshot, rsyncs the original +// (now read-only) disk to destPath, then blockcommits back and removes the snapshot. +func (b *LibvirtBackend) SnapshotAndPull(ctx context.Context, vmName string, destPath string) error { + b.logger.Info("starting snapshot-and-pull", "vm", vmName, "dest", destPath) + + // 1. Find the disk path + diskPath, err := b.findDiskPath(ctx, vmName) + if err != nil { + return fmt.Errorf("find disk path: %w", err) + } + b.logger.Info("found disk path", "vm", vmName, "disk", diskPath) + + // 2. Create external snapshot (makes original disk read-only) + snapName := "fluid-tmp-snap" + if err := b.createSnapshot(ctx, vmName, snapName); err != nil { + return fmt.Errorf("create snapshot: %w", err) + } + + // 3. Always clean up: blockcommit + delete snapshot metadata + defer func() { + if err := b.blockcommit(ctx, vmName); err != nil { + b.logger.Error("blockcommit failed", "vm", vmName, "error", err) + } + if err := b.deleteSnapshotMetadata(ctx, vmName, snapName); err != nil { + b.logger.Warn("delete snapshot metadata failed", "vm", vmName, "error", err) + } + }() + + // 4. Rsync the now read-only original disk to local destPath + if err := b.rsyncDisk(ctx, diskPath, destPath); err != nil { + return fmt.Errorf("rsync disk: %w", err) + } + + b.logger.Info("snapshot-and-pull complete", "vm", vmName, "dest", destPath) + return nil +} + +// findDiskPath uses virsh domblklist to find the primary disk path. +func (b *LibvirtBackend) findDiskPath(ctx context.Context, vmName string) (string, error) { + out, err := b.sshCommand(ctx, fmt.Sprintf("virsh domblklist %s --details", vmName)) + if err != nil { + return "", err + } + + // Parse output - look for "disk" type entries + for _, line := range strings.Split(out, "\n") { + fields := strings.Fields(line) + if len(fields) >= 4 && fields[0] == "file" && fields[1] == "disk" { + return fields[3], nil + } + } + return "", fmt.Errorf("no disk found for VM %s", vmName) +} + +// createSnapshot creates an external disk-only snapshot. +func (b *LibvirtBackend) createSnapshot(ctx context.Context, vmName, snapName string) error { + _, err := b.sshCommand(ctx, fmt.Sprintf( + "virsh snapshot-create-as %s %s --disk-only --atomic", + vmName, snapName, + )) + return err +} + +// blockcommit merges the snapshot back into the original disk and pivots. +func (b *LibvirtBackend) blockcommit(ctx context.Context, vmName string) error { + _, err := b.sshCommand(ctx, fmt.Sprintf( + "virsh blockcommit %s vda --active --pivot --delete", + vmName, + )) + return err +} + +// deleteSnapshotMetadata removes snapshot metadata from libvirt. +func (b *LibvirtBackend) deleteSnapshotMetadata(ctx context.Context, vmName, snapName string) error { + _, err := b.sshCommand(ctx, fmt.Sprintf( + "virsh snapshot-delete %s %s --metadata", + vmName, snapName, + )) + return err +} + +// rsyncDisk pulls the remote disk to a local path. +func (b *LibvirtBackend) rsyncDisk(ctx context.Context, remotePath, localPath string) error { + sshOpts := b.sshOpts() + src := fmt.Sprintf("%s@%s:%s", b.sshUser, b.sshHost, remotePath) + + args := []string{ + "-avz", "--progress", + "-e", fmt.Sprintf("ssh %s", sshOpts), + src, localPath, + } + + cmd := exec.CommandContext(ctx, "rsync", args...) + var stderr bytes.Buffer + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("rsync: %w: %s", err, stderr.String()) + } + return nil +} + +// sshCommand runs a command on the remote host via SSH. +func (b *LibvirtBackend) sshCommand(ctx context.Context, command string) (string, error) { + args := []string{ + "-o", "StrictHostKeyChecking=no", + "-o", "BatchMode=yes", + "-p", fmt.Sprintf("%d", b.sshPort), + } + if b.sshIdentityFile != "" { + args = append(args, "-i", b.sshIdentityFile) + } + args = append(args, fmt.Sprintf("%s@%s", b.sshUser, b.sshHost), command) + + cmd := exec.CommandContext(ctx, "ssh", args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("ssh command %q: %w: %s", command, err, stderr.String()) + } + return stdout.String(), nil +} + +// sshOpts returns the SSH option string for use with rsync -e. +func (b *LibvirtBackend) sshOpts() string { + opts := fmt.Sprintf("-o StrictHostKeyChecking=no -o BatchMode=yes -p %d", b.sshPort) + if b.sshIdentityFile != "" { + opts += fmt.Sprintf(" -i %s", b.sshIdentityFile) + } + return opts +} diff --git a/fluid-daemon/internal/snapshotpull/libvirt_backend_test.go b/fluid-daemon/internal/snapshotpull/libvirt_backend_test.go new file mode 100644 index 00000000..5e704697 --- /dev/null +++ b/fluid-daemon/internal/snapshotpull/libvirt_backend_test.go @@ -0,0 +1,74 @@ +package snapshotpull + +import ( + "testing" +) + +func TestNewLibvirtBackend_Defaults(t *testing.T) { + b := NewLibvirtBackend("host1.example.com", 0, "", "", nil) + + if b.sshPort != 22 { + t.Errorf("expected default port 22, got %d", b.sshPort) + } + if b.sshUser != "root" { + t.Errorf("expected default user root, got %s", b.sshUser) + } + if b.sshHost != "host1.example.com" { + t.Errorf("expected host host1.example.com, got %s", b.sshHost) + } +} + +func TestNewLibvirtBackend_CustomValues(t *testing.T) { + b := NewLibvirtBackend("10.0.0.1", 2222, "admin", "/home/admin/.ssh/id_rsa", nil) + + if b.sshPort != 2222 { + t.Errorf("expected port 2222, got %d", b.sshPort) + } + if b.sshUser != "admin" { + t.Errorf("expected user admin, got %s", b.sshUser) + } + if b.sshIdentityFile != "/home/admin/.ssh/id_rsa" { + t.Errorf("expected identity file /home/admin/.ssh/id_rsa, got %s", b.sshIdentityFile) + } +} + +func TestLibvirtBackend_SSHOpts(t *testing.T) { + b := NewLibvirtBackend("host1", 2222, "user", "/path/to/key", nil) + opts := b.sshOpts() + + if opts == "" { + t.Fatal("expected non-empty ssh opts") + } + + // Should contain port + if !contains(opts, "-p 2222") { + t.Errorf("expected opts to contain port, got: %s", opts) + } + + // Should contain identity file + if !contains(opts, "-i /path/to/key") { + t.Errorf("expected opts to contain identity file, got: %s", opts) + } +} + +func TestLibvirtBackend_SSHOpts_NoIdentity(t *testing.T) { + b := NewLibvirtBackend("host1", 22, "root", "", nil) + opts := b.sshOpts() + + if contains(opts, "-i") { + t.Errorf("expected no identity file flag, got: %s", opts) + } +} + +func contains(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsHelper(s, substr)) +} + +func containsHelper(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/fluid-daemon/internal/snapshotpull/proxmox_backend.go b/fluid-daemon/internal/snapshotpull/proxmox_backend.go new file mode 100644 index 00000000..d6903218 --- /dev/null +++ b/fluid-daemon/internal/snapshotpull/proxmox_backend.go @@ -0,0 +1,339 @@ +package snapshotpull + +import ( + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "os" + "os/exec" + "strings" + "time" +) + +// ProxmoxBackend snapshots and pulls a VM disk from a Proxmox VE host via its REST API. +type ProxmoxBackend struct { + host string + tokenID string + secret string + node string + httpClient *http.Client + logger *slog.Logger +} + +// NewProxmoxBackend creates a backend that uses the Proxmox API to snapshot and download VM disks. +func NewProxmoxBackend(host, tokenID, secret, node string, verifySSL bool, logger *slog.Logger) *ProxmoxBackend { + if logger == nil { + logger = slog.Default() + } + transport := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: !verifySSL, + }, + } + return &ProxmoxBackend{ + host: strings.TrimRight(host, "/"), + tokenID: tokenID, + secret: secret, + node: node, + httpClient: &http.Client{ + Transport: transport, + Timeout: 10 * time.Minute, + }, + logger: logger.With("component", "proxmox-backend"), + } +} + +// SnapshotAndPull creates a snapshot on Proxmox, exports it via vzdump, +// downloads the dump, converts to qcow2, and cleans up. +func (b *ProxmoxBackend) SnapshotAndPull(ctx context.Context, vmName string, destPath string) error { + b.logger.Info("starting proxmox snapshot-and-pull", "vm", vmName, "dest", destPath) + + // 1. Resolve VMID from name + vmid, err := b.resolveVMID(ctx, vmName) + if err != nil { + return fmt.Errorf("resolve vmid: %w", err) + } + + // 2. Create snapshot + snapName := "fluid-tmp-snap" + if err := b.createSnapshot(ctx, vmid, snapName); err != nil { + return fmt.Errorf("create snapshot: %w", err) + } + defer func() { + if err := b.deleteSnapshot(ctx, vmid, snapName); err != nil { + b.logger.Warn("delete snapshot failed", "vmid", vmid, "error", err) + } + }() + + // 3. Create vzdump backup + dumpFile, err := b.vzdump(ctx, vmid) + if err != nil { + return fmt.Errorf("vzdump: %w", err) + } + defer func() { + // Clean up remote dump file + _ = b.deleteFile(ctx, dumpFile) + }() + + // 4. Download the dump + tmpDump := destPath + ".vzdump.tmp" + if err := b.downloadFile(ctx, dumpFile, tmpDump); err != nil { + return fmt.Errorf("download dump: %w", err) + } + defer func() { _ = os.Remove(tmpDump) }() + + // 5. Convert to qcow2 + if err := convertToQcow2(ctx, tmpDump, destPath); err != nil { + return fmt.Errorf("convert to qcow2: %w", err) + } + + b.logger.Info("proxmox snapshot-and-pull complete", "vm", vmName, "dest", destPath) + return nil +} + +// resolveVMID finds the VMID for a given VM name. +func (b *ProxmoxBackend) resolveVMID(ctx context.Context, vmName string) (string, error) { + path := fmt.Sprintf("/nodes/%s/qemu", b.node) + data, err := b.apiGet(ctx, path) + if err != nil { + return "", err + } + + var vms []struct { + VMID json.Number `json:"vmid"` + Name string `json:"name"` + } + if err := json.Unmarshal(data, &vms); err != nil { + return "", fmt.Errorf("parse vm list: %w", err) + } + + for _, vm := range vms { + if vm.Name == vmName { + return vm.VMID.String(), nil + } + } + return "", fmt.Errorf("VM %q not found on node %s", vmName, b.node) +} + +// createSnapshot creates a VM snapshot via the Proxmox API. +func (b *ProxmoxBackend) createSnapshot(ctx context.Context, vmid, snapName string) error { + path := fmt.Sprintf("/nodes/%s/qemu/%s/snapshot", b.node, vmid) + _, err := b.apiPost(ctx, path, url.Values{"snapname": {snapName}}) + return err +} + +// deleteSnapshot removes a VM snapshot via the Proxmox API. +func (b *ProxmoxBackend) deleteSnapshot(ctx context.Context, vmid, snapName string) error { + path := fmt.Sprintf("/nodes/%s/qemu/%s/snapshot/%s", b.node, vmid, snapName) + _, err := b.apiDelete(ctx, path) + return err +} + +// vzdump creates a backup of the VM and returns the dump file path. +func (b *ProxmoxBackend) vzdump(ctx context.Context, vmid string) (string, error) { + params := url.Values{ + "vmid": {vmid}, + "mode": {"snapshot"}, + "compress": {"zstd"}, + "storage": {"local"}, + } + data, err := b.apiPost(ctx, "/nodes/"+b.node+"/vzdump", params) + if err != nil { + return "", err + } + + // The response contains the UPID of the task. We need to wait for it. + var upid string + if err := json.Unmarshal(data, &upid); err != nil { + return "", fmt.Errorf("parse vzdump response: %w", err) + } + + // Wait for task completion + if err := b.waitForTask(ctx, upid); err != nil { + return "", err + } + + // Find the dump file in local storage + return b.findLatestDump(ctx, vmid) +} + +// waitForTask polls a Proxmox task until completion. +func (b *ProxmoxBackend) waitForTask(ctx context.Context, upid string) error { + path := fmt.Sprintf("/nodes/%s/tasks/%s/status", b.node, url.PathEscape(upid)) + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(2 * time.Second): + } + + data, err := b.apiGet(ctx, path) + if err != nil { + return err + } + + var status struct { + Status string `json:"status"` + Exitstatus string `json:"exitstatus"` + } + if err := json.Unmarshal(data, &status); err != nil { + return fmt.Errorf("parse task status: %w", err) + } + + if status.Status == "stopped" { + if status.Exitstatus != "OK" { + return fmt.Errorf("task failed: %s", status.Exitstatus) + } + return nil + } + } +} + +// findLatestDump finds the most recent vzdump file for a VMID. +func (b *ProxmoxBackend) findLatestDump(ctx context.Context, vmid string) (string, error) { + path := fmt.Sprintf("/nodes/%s/storage/local/content", b.node) + data, err := b.apiGet(ctx, path) + if err != nil { + return "", err + } + + var files []struct { + Volid string `json:"volid"` + CTime int64 `json:"ctime"` + } + if err := json.Unmarshal(data, &files); err != nil { + return "", fmt.Errorf("parse storage content: %w", err) + } + + var latest string + var latestTime int64 + prefix := fmt.Sprintf("vzdump-qemu-%s-", vmid) + for _, f := range files { + if strings.Contains(f.Volid, prefix) && f.CTime > latestTime { + latest = f.Volid + latestTime = f.CTime + } + } + + if latest == "" { + return "", fmt.Errorf("no vzdump found for vmid %s", vmid) + } + return latest, nil +} + +// downloadFile downloads a file from Proxmox storage to a local path. +func (b *ProxmoxBackend) downloadFile(ctx context.Context, volid, localPath string) error { + apiURL := fmt.Sprintf("%s/api2/json/nodes/%s/storage/local/file-restore/download", + b.host, b.node) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, apiURL, nil) + if err != nil { + return err + } + q := req.URL.Query() + q.Set("volume", volid) + q.Set("filepath", "/") + req.URL.RawQuery = q.Encode() + req.Header.Set("Authorization", fmt.Sprintf("PVEAPIToken=%s=%s", b.tokenID, b.secret)) + + resp, err := b.httpClient.Do(req) + if err != nil { + return err + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("download failed: %d: %s", resp.StatusCode, string(body)) + } + + out, err := os.Create(localPath) + if err != nil { + return err + } + defer func() { _ = out.Close() }() + + _, err = io.Copy(out, resp.Body) + return err +} + +// deleteFile removes a file from Proxmox storage. +func (b *ProxmoxBackend) deleteFile(ctx context.Context, volid string) error { + path := fmt.Sprintf("/nodes/%s/storage/local/content/%s", b.node, url.PathEscape(volid)) + _, err := b.apiDelete(ctx, path) + return err +} + +// apiGet performs a GET request against the Proxmox API. +func (b *ProxmoxBackend) apiGet(ctx context.Context, path string) (json.RawMessage, error) { + return b.apiRequest(ctx, http.MethodGet, path, nil) +} + +// apiPost performs a POST request against the Proxmox API. +func (b *ProxmoxBackend) apiPost(ctx context.Context, path string, params url.Values) (json.RawMessage, error) { + return b.apiRequest(ctx, http.MethodPost, path, params) +} + +// apiDelete performs a DELETE request against the Proxmox API. +func (b *ProxmoxBackend) apiDelete(ctx context.Context, path string) (json.RawMessage, error) { + return b.apiRequest(ctx, http.MethodDelete, path, nil) +} + +// apiRequest performs an authenticated HTTP request against the Proxmox API. +func (b *ProxmoxBackend) apiRequest(ctx context.Context, method, path string, params url.Values) (json.RawMessage, error) { + apiURL := fmt.Sprintf("%s/api2/json%s", b.host, path) + + var body io.Reader + if params != nil { + body = strings.NewReader(params.Encode()) + } + + req, err := http.NewRequestWithContext(ctx, method, apiURL, body) + if err != nil { + return nil, fmt.Errorf("create request: %w", err) + } + + req.Header.Set("Authorization", fmt.Sprintf("PVEAPIToken=%s=%s", b.tokenID, b.secret)) + if params != nil { + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + } + + resp, err := b.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("http request: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("read response: %w", err) + } + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return nil, fmt.Errorf("api error %d: %s", resp.StatusCode, string(respBody)) + } + + var envelope struct { + Data json.RawMessage `json:"data"` + } + if err := json.Unmarshal(respBody, &envelope); err != nil { + return nil, fmt.Errorf("parse response: %w", err) + } + + return envelope.Data, nil +} + +// convertToQcow2 converts a vzdump archive to a QCOW2 image. +func convertToQcow2(ctx context.Context, src, dest string) error { + cmd := exec.CommandContext(ctx, "qemu-img", "convert", "-f", "raw", "-O", "qcow2", src, dest) + if output, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("qemu-img convert: %w: %s", err, string(output)) + } + return nil +} diff --git a/fluid-daemon/internal/snapshotpull/proxmox_backend_test.go b/fluid-daemon/internal/snapshotpull/proxmox_backend_test.go new file mode 100644 index 00000000..3a5b2464 --- /dev/null +++ b/fluid-daemon/internal/snapshotpull/proxmox_backend_test.go @@ -0,0 +1,30 @@ +package snapshotpull + +import ( + "testing" +) + +func TestNewProxmoxBackend(t *testing.T) { + b := NewProxmoxBackend("https://pve.example.com:8006", "user@pam!token", "secret", "node1", false, nil) + + if b.host != "https://pve.example.com:8006" { + t.Errorf("expected host https://pve.example.com:8006, got %s", b.host) + } + if b.tokenID != "user@pam!token" { + t.Errorf("expected tokenID user@pam!token, got %s", b.tokenID) + } + if b.secret != "secret" { + t.Errorf("expected secret 'secret', got %s", b.secret) + } + if b.node != "node1" { + t.Errorf("expected node node1, got %s", b.node) + } +} + +func TestNewProxmoxBackend_TrimsTrailingSlash(t *testing.T) { + b := NewProxmoxBackend("https://pve.example.com:8006/", "tok", "sec", "n1", false, nil) + + if b.host != "https://pve.example.com:8006" { + t.Errorf("expected trailing slash trimmed, got %s", b.host) + } +} diff --git a/fluid-daemon/internal/snapshotpull/puller.go b/fluid-daemon/internal/snapshotpull/puller.go new file mode 100644 index 00000000..a88ed11c --- /dev/null +++ b/fluid-daemon/internal/snapshotpull/puller.go @@ -0,0 +1,191 @@ +package snapshotpull + +import ( + "context" + "fmt" + "log/slog" + "os" + "regexp" + "strings" + "sync" + "time" + + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/image" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/state" + + "gorm.io/gorm" +) + +// PullRequest describes what to pull. +type PullRequest struct { + SourceHost string // identifier for the source host (e.g. hostname) + VMName string // VM name on the source host + SnapshotMode string // "cached" or "fresh" +} + +// PullResult describes the outcome of a pull. +type PullResult struct { + ImageName string + Cached bool + PulledAt time.Time +} + +// inflightEntry tracks an in-progress pull and its outcome. +type inflightEntry struct { + done chan struct{} + result *PullResult + err error +} + +// Puller orchestrates snapshot pulls with caching and deduplication. +type Puller struct { + imgStore *image.Store + db *gorm.DB + logger *slog.Logger + + mu sync.Mutex + inflight map[string]*inflightEntry +} + +// NewPuller creates a new Puller. +func NewPuller(imgStore *image.Store, db *gorm.DB, logger *slog.Logger) *Puller { + if logger == nil { + logger = slog.Default() + } + return &Puller{ + imgStore: imgStore, + db: db, + logger: logger.With("component", "puller"), + inflight: make(map[string]*inflightEntry), + } +} + +// Pull pulls a VM snapshot image, using the cache when appropriate. +// Concurrent pulls for the same image are deduplicated. +func (p *Puller) Pull(ctx context.Context, req PullRequest, backend SnapshotBackend) (*PullResult, error) { + imageName := cacheKey(req.SourceHost, req.VMName) + + // Check cache if mode is "cached" (default) + if req.SnapshotMode != "fresh" { + if result, ok := p.checkCache(ctx, imageName); ok { + p.logger.Info("cache hit", "image", imageName) + return result, nil + } + } + + // Deduplicate concurrent pulls for the same image + p.mu.Lock() + if entry, ok := p.inflight[imageName]; ok { + p.mu.Unlock() + // Wait for the in-flight pull to finish + select { + case <-entry.done: + return entry.result, entry.err + case <-ctx.Done(): + return nil, ctx.Err() + } + } + + // We're the first - register ourselves + entry := &inflightEntry{done: make(chan struct{})} + p.inflight[imageName] = entry + p.mu.Unlock() + + result, err := p.doPull(ctx, imageName, req, backend) + + // Store result on the entry so all waiters can read it + entry.result = result + entry.err = err + close(entry.done) + + // Clean up inflight map + p.mu.Lock() + delete(p.inflight, imageName) + p.mu.Unlock() + + return result, err +} + +// doPull performs the actual snapshot pull. +func (p *Puller) doPull(ctx context.Context, imageName string, req PullRequest, backend SnapshotBackend) (*PullResult, error) { + p.logger.Info("pulling snapshot", "image", imageName, "vm", req.VMName, "host", req.SourceHost) + + destPath := p.imgStore.BaseDir() + "/" + imageName + ".qcow2" + + // Remove existing file if doing a fresh pull + if req.SnapshotMode == "fresh" { + _ = os.Remove(destPath) + } + + // Snapshot and pull + if err := backend.SnapshotAndPull(ctx, req.VMName, destPath); err != nil { + return nil, fmt.Errorf("snapshot and pull: %w", err) + } + + // Extract kernel from the pulled image + _, err := image.ExtractKernel(ctx, destPath) + if err != nil { + p.logger.Warn("kernel extraction failed (sandbox may still work)", "image", imageName, "error", err) + } + + // Get file size + var sizeMB int64 + if info, err := os.Stat(destPath); err == nil { + sizeMB = info.Size() / (1024 * 1024) + } + + // Save to cache DB + now := time.Now().UTC() + cached := state.CachedImage{ + ID: imageName, + ImageName: imageName, + SourceHost: req.SourceHost, + VMName: req.VMName, + SizeMB: sizeMB, + PulledAt: now, + } + + // Upsert + if err := p.db.Where("image_name = ?", imageName). + Assign(cached). + FirstOrCreate(&cached).Error; err != nil { + p.logger.Warn("failed to save cache metadata", "image", imageName, "error", err) + } + + p.logger.Info("pull complete", "image", imageName, "size_mb", sizeMB) + + return &PullResult{ + ImageName: imageName, + Cached: false, + PulledAt: now, + }, nil +} + +// checkCache checks if an image is already cached and the file exists. +func (p *Puller) checkCache(ctx context.Context, imageName string) (*PullResult, bool) { + var cached state.CachedImage + if err := p.db.WithContext(ctx).Where("image_name = ?", imageName).First(&cached).Error; err != nil { + return nil, false + } + + // Verify the file still exists on disk + if !p.imgStore.HasImage(imageName) { + // DB says cached but file is gone - clean up + _ = p.db.Delete(&cached).Error + return nil, false + } + + return &PullResult{ + ImageName: cached.ImageName, + Cached: true, + PulledAt: cached.PulledAt, + }, true +} + +// cacheKey generates a sanitized cache key from host + vm name. +func cacheKey(host, vmName string) string { + safe := regexp.MustCompile(`[^a-zA-Z0-9_-]`) + h := safe.ReplaceAllString(host, "-") + v := safe.ReplaceAllString(vmName, "-") + return fmt.Sprintf("snap-%s-%s", strings.ToLower(h), strings.ToLower(v)) +} diff --git a/fluid-daemon/internal/snapshotpull/puller_test.go b/fluid-daemon/internal/snapshotpull/puller_test.go new file mode 100644 index 00000000..c48b8d98 --- /dev/null +++ b/fluid-daemon/internal/snapshotpull/puller_test.go @@ -0,0 +1,277 @@ +package snapshotpull + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/image" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/state" + + "github.com/glebarez/sqlite" + "gorm.io/gorm" + "gorm.io/gorm/logger" +) + +// mockBackend records calls and writes a dummy file. +type mockBackend struct { + callCount atomic.Int32 + delay time.Duration + failErr error +} + +func (m *mockBackend) SnapshotAndPull(_ context.Context, vmName string, destPath string) error { + m.callCount.Add(1) + if m.delay > 0 { + time.Sleep(m.delay) + } + if m.failErr != nil { + return m.failErr + } + // Write a dummy file + return os.WriteFile(destPath, []byte("fake-qcow2-data"), 0o644) +} + +func setupTestDB(t *testing.T) *gorm.DB { + t.Helper() + db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{ + Logger: logger.Default.LogMode(logger.Silent), + }) + if err != nil { + t.Fatal(err) + } + if err := db.AutoMigrate(&state.CachedImage{}); err != nil { + t.Fatal(err) + } + return db +} + +func setupTestImageStore(t *testing.T) *image.Store { + t.Helper() + dir := t.TempDir() + store, err := image.NewStore(dir, nil) + if err != nil { + t.Fatal(err) + } + return store +} + +func TestCacheKey(t *testing.T) { + tests := []struct { + host, vm string + want string + }{ + {"host1.example.com", "my-vm", "snap-host1-example-com-my-vm"}, + {"10.0.0.1", "test_vm", "snap-10-0-0-1-test_vm"}, + {"HOST", "VM", "snap-host-vm"}, + } + for _, tt := range tests { + got := cacheKey(tt.host, tt.vm) + if got != tt.want { + t.Errorf("cacheKey(%q, %q) = %q, want %q", tt.host, tt.vm, got, tt.want) + } + } +} + +func TestPuller_FreshPull(t *testing.T) { + db := setupTestDB(t) + imgStore := setupTestImageStore(t) + puller := NewPuller(imgStore, db, nil) + backend := &mockBackend{} + + req := PullRequest{ + SourceHost: "host1", + VMName: "vm1", + SnapshotMode: "fresh", + } + + result, err := puller.Pull(context.Background(), req, backend) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if result.Cached { + t.Error("expected Cached=false for fresh pull") + } + if result.ImageName == "" { + t.Error("expected non-empty ImageName") + } + if backend.callCount.Load() != 1 { + t.Errorf("expected 1 backend call, got %d", backend.callCount.Load()) + } + + // Verify file exists + path := filepath.Join(imgStore.BaseDir(), result.ImageName+".qcow2") + if _, err := os.Stat(path); os.IsNotExist(err) { + t.Error("expected image file to exist") + } + + // Verify DB record + var cached state.CachedImage + if err := db.Where("image_name = ?", result.ImageName).First(&cached).Error; err != nil { + t.Errorf("expected cache record in DB: %v", err) + } +} + +func TestPuller_CachedHit(t *testing.T) { + db := setupTestDB(t) + imgStore := setupTestImageStore(t) + puller := NewPuller(imgStore, db, nil) + backend := &mockBackend{} + + req := PullRequest{ + SourceHost: "host1", + VMName: "vm1", + SnapshotMode: "cached", + } + + // First pull - miss + result1, err := puller.Pull(context.Background(), req, backend) + if err != nil { + t.Fatalf("first pull error: %v", err) + } + if result1.Cached { + t.Error("first pull should not be cached") + } + + // Second pull - should be a cache hit + result2, err := puller.Pull(context.Background(), req, backend) + if err != nil { + t.Fatalf("second pull error: %v", err) + } + if !result2.Cached { + t.Error("second pull should be cached") + } + + // Backend should only be called once + if backend.callCount.Load() != 1 { + t.Errorf("expected 1 backend call, got %d", backend.callCount.Load()) + } +} + +func TestPuller_FreshBypassesCache(t *testing.T) { + db := setupTestDB(t) + imgStore := setupTestImageStore(t) + puller := NewPuller(imgStore, db, nil) + backend := &mockBackend{} + + // First pull to populate cache + req := PullRequest{ + SourceHost: "host1", + VMName: "vm1", + SnapshotMode: "cached", + } + _, err := puller.Pull(context.Background(), req, backend) + if err != nil { + t.Fatalf("first pull error: %v", err) + } + + // Fresh pull should bypass cache + req.SnapshotMode = "fresh" + result, err := puller.Pull(context.Background(), req, backend) + if err != nil { + t.Fatalf("fresh pull error: %v", err) + } + if result.Cached { + t.Error("fresh pull should not report cached") + } + if backend.callCount.Load() != 2 { + t.Errorf("expected 2 backend calls, got %d", backend.callCount.Load()) + } +} + +func TestPuller_DeduplicatesConcurrent(t *testing.T) { + db := setupTestDB(t) + imgStore := setupTestImageStore(t) + puller := NewPuller(imgStore, db, nil) + backend := &mockBackend{delay: 100 * time.Millisecond} + + req := PullRequest{ + SourceHost: "host1", + VMName: "vm1", + SnapshotMode: "fresh", + } + + var wg sync.WaitGroup + results := make([]*PullResult, 5) + errs := make([]error, 5) + + for i := 0; i < 5; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + results[idx], errs[idx] = puller.Pull(context.Background(), req, backend) + }(i) + } + + wg.Wait() + + for i, err := range errs { + if err != nil { + t.Errorf("goroutine %d error: %v", i, err) + } + } + + // Backend should only be called once despite 5 concurrent requests + if backend.callCount.Load() != 1 { + t.Errorf("expected 1 backend call (deduped), got %d", backend.callCount.Load()) + } +} + +func TestPuller_BackendError(t *testing.T) { + db := setupTestDB(t) + imgStore := setupTestImageStore(t) + puller := NewPuller(imgStore, db, nil) + backend := &mockBackend{failErr: fmt.Errorf("connection refused")} + + req := PullRequest{ + SourceHost: "host1", + VMName: "vm1", + SnapshotMode: "fresh", + } + + _, err := puller.Pull(context.Background(), req, backend) + if err == nil { + t.Fatal("expected error from backend failure") + } +} + +func TestPuller_CacheMissWhenFileDeleted(t *testing.T) { + db := setupTestDB(t) + imgStore := setupTestImageStore(t) + puller := NewPuller(imgStore, db, nil) + backend := &mockBackend{} + + req := PullRequest{ + SourceHost: "host1", + VMName: "vm1", + SnapshotMode: "cached", + } + + // First pull + result1, err := puller.Pull(context.Background(), req, backend) + if err != nil { + t.Fatalf("first pull error: %v", err) + } + + // Delete the file manually + path := filepath.Join(imgStore.BaseDir(), result1.ImageName+".qcow2") + _ = os.Remove(path) + + // Second pull should be a cache miss + result2, err := puller.Pull(context.Background(), req, backend) + if err != nil { + t.Fatalf("second pull error: %v", err) + } + if result2.Cached { + t.Error("expected cache miss when file deleted") + } + if backend.callCount.Load() != 2 { + t.Errorf("expected 2 backend calls, got %d", backend.callCount.Load()) + } +} diff --git a/fluid-daemon/internal/sourcevm/manager.go b/fluid-daemon/internal/sourcevm/manager.go new file mode 100644 index 00000000..22c8962e --- /dev/null +++ b/fluid-daemon/internal/sourcevm/manager.go @@ -0,0 +1,422 @@ +// Package sourcevm manages source VM operations: listing, validation, +// preparation for read-only access, and running read-only commands. +package sourcevm + +import ( + "bytes" + "context" + "fmt" + "log/slog" + "os/exec" + "strings" + "time" + + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/readonly" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/sshkeys" +) + +// VMInfo describes a source VM visible via libvirt. +type VMInfo struct { + Name string `json:"name"` + State string `json:"state"` + IPAddress string `json:"ip_address,omitempty"` + Prepared bool `json:"prepared"` +} + +// ValidationResult contains the result of validating a source VM. +type ValidationResult struct { + VMName string `json:"vm_name"` + Valid bool `json:"valid"` + State string `json:"state"` + MACAddress string `json:"mac_address,omitempty"` + IPAddress string `json:"ip_address,omitempty"` + HasNetwork bool `json:"has_network"` + Warnings []string `json:"warnings,omitempty"` + Errors []string `json:"errors,omitempty"` +} + +// PrepareResult contains the outcome of preparing a source VM. +type PrepareResult struct { + SourceVM string `json:"source_vm"` + IPAddress string `json:"ip_address,omitempty"` + Prepared bool `json:"prepared"` + UserCreated bool `json:"user_created"` + ShellInstalled bool `json:"shell_installed"` + CAKeyInstalled bool `json:"ca_key_installed"` + SSHDConfigured bool `json:"sshd_configured"` + PrincipalsCreated bool `json:"principals_created"` + SSHDRestarted bool `json:"sshd_restarted"` +} + +// Manager handles source VM operations. +type Manager struct { + libvirtURI string + network string + keyMgr sshkeys.KeyProvider + sshUser string + proxyJump string + logger *slog.Logger +} + +// NewManager creates a source VM manager. +func NewManager(libvirtURI, network string, keyMgr sshkeys.KeyProvider, sshUser, proxyJump string, logger *slog.Logger) *Manager { + if logger == nil { + logger = slog.Default() + } + if sshUser == "" { + sshUser = "sandbox" + } + return &Manager{ + libvirtURI: libvirtURI, + network: network, + keyMgr: keyMgr, + sshUser: sshUser, + proxyJump: proxyJump, + logger: logger.With("component", "sourcevm"), + } +} + +// ListVMs returns available source VMs (non-sandbox VMs visible to libvirt). +func (m *Manager) ListVMs(ctx context.Context) ([]VMInfo, error) { + // Use virsh to list all VMs + output, err := m.virsh(ctx, "list", "--all", "--name") + if err != nil { + return nil, fmt.Errorf("virsh list: %w", err) + } + + var vms []VMInfo + for _, line := range strings.Split(strings.TrimSpace(output), "\n") { + name := strings.TrimSpace(line) + if name == "" || strings.HasPrefix(name, "sbx-") { + continue // Skip sandbox VMs + } + + state, _ := m.getVMState(ctx, name) + ip := "" + if state == "running" { + ip, _ = m.getVMIP(ctx, name) + } + + vms = append(vms, VMInfo{ + Name: name, + State: state, + IPAddress: ip, + }) + } + + return vms, nil +} + +// ValidateSourceVM checks if a source VM is ready for read-only access. +func (m *Manager) ValidateSourceVM(ctx context.Context, vmName string) (*ValidationResult, error) { + result := &ValidationResult{ + VMName: vmName, + } + + // Check VM exists and get state + state, err := m.getVMState(ctx, vmName) + if err != nil { + result.Errors = append(result.Errors, fmt.Sprintf("VM not found: %v", err)) + return result, nil + } + result.State = state + + if state != "running" { + result.Errors = append(result.Errors, "VM is not running") + return result, nil + } + + // Check network/MAC + mac, err := m.getVMMAC(ctx, vmName) + if err == nil && mac != "" { + result.MACAddress = mac + result.HasNetwork = true + } else { + result.Warnings = append(result.Warnings, "Could not determine MAC address") + } + + // Check IP + ip, err := m.getVMIP(ctx, vmName) + if err == nil && ip != "" { + result.IPAddress = ip + } else { + result.Warnings = append(result.Warnings, "Could not determine IP address") + } + + // Check if fluid-readonly user exists by trying SSH + if ip != "" && m.keyMgr != nil { + creds, err := m.keyMgr.GetSourceVMCredentials(ctx, vmName) + if err == nil { + _, _, exitCode, err := m.sshCmd(ctx, ip, "fluid-readonly", creds, "whoami", 10*time.Second) + if err == nil && exitCode == 0 { + result.Valid = true + } else { + result.Warnings = append(result.Warnings, "SSH as fluid-readonly failed - VM may not be prepared") + } + } else { + result.Warnings = append(result.Warnings, "Could not get SSH credentials") + } + } + + if len(result.Errors) == 0 && result.HasNetwork && result.IPAddress != "" { + result.Valid = true + } + + return result, nil +} + +// PrepareSourceVM installs readonly shell, fluid-readonly user, SSH CA on a source VM. +func (m *Manager) PrepareSourceVM(ctx context.Context, vmName, sshUser, sshKeyPath string) (*PrepareResult, error) { + if sshUser == "" { + sshUser = m.sshUser + } + + ip, err := m.getVMIP(ctx, vmName) + if err != nil { + return nil, fmt.Errorf("get VM IP: %w", err) + } + + // Build SSH run function for the prepare flow + sshRun := func(ctx context.Context, command string) (string, string, int, error) { + return m.sshCmdWithKey(ctx, ip, sshUser, sshKeyPath, command, 60*time.Second) + } + + // Get CA public key + var caPubKey string + if m.keyMgr != nil { + // The key manager's CA should have the public key + // For now, we'll read it from the sshca package via the key path config + // This will be wired properly through the CA instance + caPubKey = "" // Will be set by caller + } + + if caPubKey == "" { + return nil, fmt.Errorf("CA public key is required for source VM preparation") + } + + result, err := readonly.Prepare(ctx, sshRun, caPubKey, nil, m.logger) + if err != nil { + return nil, err + } + + return &PrepareResult{ + SourceVM: vmName, + IPAddress: ip, + Prepared: true, + UserCreated: result.UserCreated, + ShellInstalled: result.ShellInstalled, + CAKeyInstalled: result.CAKeyInstalled, + SSHDConfigured: result.SSHDConfigured, + PrincipalsCreated: result.PrincipalsCreated, + SSHDRestarted: result.SSHDRestarted, + }, nil +} + +// PrepareSourceVMWithCA prepares a source VM with an explicit CA public key. +func (m *Manager) PrepareSourceVMWithCA(ctx context.Context, vmName, sshUser, sshKeyPath, caPubKey string) (*PrepareResult, error) { + if sshUser == "" { + sshUser = m.sshUser + } + + ip, err := m.getVMIP(ctx, vmName) + if err != nil { + return nil, fmt.Errorf("get VM IP: %w", err) + } + + sshRun := func(ctx context.Context, command string) (string, string, int, error) { + return m.sshCmdWithKey(ctx, ip, sshUser, sshKeyPath, command, 60*time.Second) + } + + result, err := readonly.Prepare(ctx, sshRun, caPubKey, nil, m.logger) + if err != nil { + return nil, err + } + + return &PrepareResult{ + SourceVM: vmName, + IPAddress: ip, + Prepared: true, + UserCreated: result.UserCreated, + ShellInstalled: result.ShellInstalled, + CAKeyInstalled: result.CAKeyInstalled, + SSHDConfigured: result.SSHDConfigured, + PrincipalsCreated: result.PrincipalsCreated, + SSHDRestarted: result.SSHDRestarted, + }, nil +} + +// RunSourceCommand executes a read-only command on a source VM. +// Two-layer validation: client-side allowlist + server-side restricted shell. +func (m *Manager) RunSourceCommand(ctx context.Context, vmName, command string, timeout time.Duration) (stdout, stderr string, exitCode int, err error) { + // Client-side validation + if err := readonly.ValidateCommand(command); err != nil { + return "", "", 126, fmt.Errorf("command validation: %w", err) + } + + ip, err := m.getVMIP(ctx, vmName) + if err != nil { + return "", "", -1, fmt.Errorf("get VM IP: %w", err) + } + + if m.keyMgr == nil { + return "", "", -1, fmt.Errorf("SSH key manager required for source VM commands") + } + + creds, err := m.keyMgr.GetSourceVMCredentials(ctx, vmName) + if err != nil { + return "", "", -1, fmt.Errorf("get credentials: %w", err) + } + + if timeout == 0 { + timeout = 30 * time.Second + } + + return m.sshCmd(ctx, ip, "fluid-readonly", creds, command, timeout) +} + +// ReadSourceFile reads a file from a source VM via base64-encoded transfer. +func (m *Manager) ReadSourceFile(ctx context.Context, vmName, path string) (string, error) { + // Use base64 encoding for safe binary transfer + command := fmt.Sprintf("base64 %s", path) + + stdout, stderr, exitCode, err := m.RunSourceCommand(ctx, vmName, command, 30*time.Second) + if err != nil { + return "", err + } + if exitCode != 0 { + return "", fmt.Errorf("read file failed (exit %d): %s", exitCode, stderr) + } + + return stdout, nil +} + +// --- Internal helpers --- + +func (m *Manager) virsh(ctx context.Context, args ...string) (string, error) { + allArgs := append([]string{"-c", m.libvirtURI}, args...) + cmd := exec.CommandContext(ctx, "virsh", allArgs...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("%s: %w", stderr.String(), err) + } + return stdout.String(), nil +} + +func (m *Manager) getVMState(ctx context.Context, vmName string) (string, error) { + output, err := m.virsh(ctx, "domstate", vmName) + if err != nil { + return "", err + } + return strings.TrimSpace(output), nil +} + +func (m *Manager) getVMMAC(ctx context.Context, vmName string) (string, error) { + output, err := m.virsh(ctx, "domiflist", vmName) + if err != nil { + return "", err + } + + for _, line := range strings.Split(output, "\n") { + fields := strings.Fields(line) + if len(fields) >= 5 && fields[0] != "Interface" && !strings.HasPrefix(line, "---") { + return fields[4], nil + } + } + return "", fmt.Errorf("no MAC address found") +} + +func (m *Manager) getVMIP(ctx context.Context, vmName string) (string, error) { + output, err := m.virsh(ctx, "domifaddr", vmName, "--source", "lease") + if err != nil { + // Try agent source + output, err = m.virsh(ctx, "domifaddr", vmName, "--source", "agent") + if err != nil { + return "", err + } + } + + for _, line := range strings.Split(output, "\n") { + fields := strings.Fields(line) + if len(fields) >= 4 { + ipCIDR := fields[3] + ip := strings.SplitN(ipCIDR, "/", 2)[0] + if ip != "" && !strings.HasPrefix(ip, "127.") { + return ip, nil + } + } + } + return "", fmt.Errorf("no IP address found") +} + +func (m *Manager) sshCmd(ctx context.Context, ip, user string, creds *sshkeys.Credentials, command string, timeout time.Duration) (string, string, int, error) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + args := []string{ + "-i", creds.PrivateKeyPath, + "-o", "CertificateFile=" + creds.CertificatePath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", fmt.Sprintf("ConnectTimeout=%d", int(timeout.Seconds())), + } + + if m.proxyJump != "" { + args = append(args, "-J", m.proxyJump) + } + + args = append(args, fmt.Sprintf("%s@%s", user, ip), command) + + cmd := exec.CommandContext(ctx, "ssh", args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + exitCode := 0 + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + exitCode = exitErr.ExitCode() + } else { + return stdout.String(), stderr.String(), -1, err + } + } + + return stdout.String(), stderr.String(), exitCode, nil +} + +func (m *Manager) sshCmdWithKey(ctx context.Context, ip, user, keyPath, command string, timeout time.Duration) (string, string, int, error) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + args := []string{ + "-i", keyPath, + "-o", "StrictHostKeyChecking=no", + "-o", "UserKnownHostsFile=/dev/null", + "-o", fmt.Sprintf("ConnectTimeout=%d", int(timeout.Seconds())), + } + + if m.proxyJump != "" { + args = append(args, "-J", m.proxyJump) + } + + args = append(args, fmt.Sprintf("%s@%s", user, ip), command) + + cmd := exec.CommandContext(ctx, "ssh", args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + exitCode := 0 + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + exitCode = exitErr.ExitCode() + } else { + return stdout.String(), stderr.String(), -1, err + } + } + + return stdout.String(), stderr.String(), exitCode, nil +} diff --git a/fluid/internal/sshca/ca.go b/fluid-daemon/internal/sshca/ca.go similarity index 96% rename from fluid/internal/sshca/ca.go rename to fluid-daemon/internal/sshca/ca.go index dc2326e0..49be47c9 100755 --- a/fluid/internal/sshca/ca.go +++ b/fluid-daemon/internal/sshca/ca.go @@ -5,9 +5,9 @@ // - Short-lived SSH certificate issuance // - Certificate validation and metadata // -// Certificates are designed to be ephemeral (1-10 minutes TTL) and are -// used to provide secure, auditable access to sandbox VMs without requiring -// any persistent credentials on the VM side. +// Certificates are designed to be ephemeral (default 30min, max 60min TTL) +// and are used to provide secure, auditable access to sandbox VMs without +// requiring any persistent credentials on the VM side. package sshca import ( @@ -75,8 +75,8 @@ type Config struct { // DefaultConfig returns a configuration with sensible defaults. func DefaultConfig() Config { return Config{ - CAKeyPath: "/etc/virsh-sandbox/ssh_ca", - CAPubKeyPath: "/etc/virsh-sandbox/ssh_ca.pub", + CAKeyPath: "/etc/fluid/ssh_ca", + CAPubKeyPath: "/etc/fluid/ssh_ca.pub", WorkDir: "/tmp/sshca", DefaultTTL: 30 * time.Minute, MaxTTL: 60 * time.Minute, @@ -296,7 +296,10 @@ func (ca *CA) IssueCertificate(ctx context.Context, req *CertificateRequest) (*C } // Generate unique certificate ID - certID := ca.generateCertID() + certID, err := ca.generateCertID() + if err != nil { + return nil, err + } // Build certificate identity identity := fmt.Sprintf("user:%s-vm:%s-sbx:%s-cert:%s", @@ -312,7 +315,7 @@ func (ca *CA) IssueCertificate(ctx context.Context, req *CertificateRequest) (*C validBefore := now.Add(ttl) // Format validity for ssh-keygen - validityStr := fmt.Sprintf("+%dm", int(ttl.Minutes())) + validityStr := fmt.Sprintf("+%ds", int(ttl.Seconds())) // Create temporary directory for this certificate tempDir, err := os.MkdirTemp(ca.cfg.WorkDir, "cert-") @@ -557,13 +560,12 @@ func (ca *CA) validateRequest(req *CertificateRequest) error { } // generateCertID generates a unique certificate identifier. -func (ca *CA) generateCertID() string { +func (ca *CA) generateCertID() (string, error) { var b [16]byte if _, err := rand.Read(b[:]); err != nil { - // Fallback to time-based ID if random fails (extremely unlikely) - return fmt.Sprintf("%x", time.Now().UnixNano()) + return "", fmt.Errorf("generate cert ID: %w", err) } - return fmt.Sprintf("%x", b[:8]) + return fmt.Sprintf("%x", b[:]), nil } // CertInfo extracts information from a certificate for display/audit. @@ -604,6 +606,6 @@ func (c *Certificate) SSHConnectCommand(privateKeyPath, certPath, vmIP string, p if len(c.Principals) > 0 { principal = c.Principals[0] } - return fmt.Sprintf("ssh -i %s -o CertificateFile=%s -o StrictHostKeyChecking=no -p %d %s@%s", + return fmt.Sprintf("ssh -i %s -o CertificateFile=%s -o StrictHostKeyChecking=accept-new -p %d %s@%s", privateKeyPath, certPath, port, principal, vmIP) } diff --git a/fluid-remote/internal/sshca/memstore.go b/fluid-daemon/internal/sshca/memstore.go similarity index 100% rename from fluid-remote/internal/sshca/memstore.go rename to fluid-daemon/internal/sshca/memstore.go diff --git a/fluid/internal/sshca/store.go b/fluid-daemon/internal/sshca/store.go similarity index 100% rename from fluid/internal/sshca/store.go rename to fluid-daemon/internal/sshca/store.go diff --git a/fluid-daemon/internal/sshconfig/parser.go b/fluid-daemon/internal/sshconfig/parser.go new file mode 100644 index 00000000..d4a9d5a8 --- /dev/null +++ b/fluid-daemon/internal/sshconfig/parser.go @@ -0,0 +1,144 @@ +// Package sshconfig parses SSH config files and probes remote hosts +// to detect hypervisor capabilities. +package sshconfig + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// SSHHost represents a parsed host entry from an SSH config file. +type SSHHost struct { + Name string // Host alias + HostName string // actual hostname/IP + User string + Port int + IdentityFile string +} + +// Parse reads SSH config content from a reader and returns structured host entries. +// Wildcard entries (Host *) are skipped. +func Parse(r io.Reader) ([]SSHHost, error) { + scanner := bufio.NewScanner(r) + var hosts []SSHHost + var current *SSHHost + + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + // Skip comments and empty lines + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + // Split on first whitespace or = + key, value := splitDirective(line) + if key == "" { + continue + } + + switch strings.ToLower(key) { + case "host": + // Save previous host if any + if current != nil { + hosts = append(hosts, *current) + } + + // Skip wildcard entries + if strings.Contains(value, "*") || strings.Contains(value, "?") { + current = nil + continue + } + + current = &SSHHost{ + Name: value, + Port: 22, + } + + case "hostname": + if current != nil { + current.HostName = value + } + + case "user": + if current != nil { + current.User = value + } + + case "port": + if current != nil { + if p, err := strconv.Atoi(value); err == nil { + current.Port = p + } + } + + case "identityfile": + if current != nil { + current.IdentityFile = expandTilde(value) + } + } + } + + // Save last host + if current != nil { + hosts = append(hosts, *current) + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("scan ssh config: %w", err) + } + + // Fill in defaults: if HostName is empty, use Name + for i := range hosts { + if hosts[i].HostName == "" { + hosts[i].HostName = hosts[i].Name + } + } + + return hosts, nil +} + +// ParseFile reads an SSH config file from the given path. +func ParseFile(path string) ([]SSHHost, error) { + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("open ssh config %s: %w", path, err) + } + defer func() { _ = f.Close() }() + return Parse(f) +} + +// splitDirective splits an SSH config line into key and value. +// Handles both "Key Value" and "Key=Value" formats. +func splitDirective(line string) (string, string) { + // Try = separator first + if idx := strings.IndexByte(line, '='); idx >= 0 { + return strings.TrimSpace(line[:idx]), strings.TrimSpace(line[idx+1:]) + } + + // Split on whitespace + parts := strings.SplitN(line, " ", 2) + if len(parts) < 2 { + parts = strings.SplitN(line, "\t", 2) + } + if len(parts) < 2 { + return parts[0], "" + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) +} + +// expandTilde replaces a leading ~ with the user's home directory. +func expandTilde(path string) string { + if strings.HasPrefix(path, "~/") { + home, err := os.UserHomeDir() + if err != nil { + return path + } + return home + path[1:] + } + return path +} diff --git a/fluid-daemon/internal/sshconfig/parser_test.go b/fluid-daemon/internal/sshconfig/parser_test.go new file mode 100644 index 00000000..6c491a2d --- /dev/null +++ b/fluid-daemon/internal/sshconfig/parser_test.go @@ -0,0 +1,220 @@ +package sshconfig + +import ( + "strings" + "testing" +) + +func TestParse_BasicConfig(t *testing.T) { + config := ` +Host webserver + HostName 10.0.0.1 + User admin + Port 2222 + IdentityFile /home/user/.ssh/id_rsa + +Host dbserver + HostName db.example.com + User postgres +` + hosts, err := Parse(strings.NewReader(config)) + if err != nil { + t.Fatal(err) + } + + if len(hosts) != 2 { + t.Fatalf("expected 2 hosts, got %d", len(hosts)) + } + + web := hosts[0] + if web.Name != "webserver" { + t.Errorf("expected name webserver, got %s", web.Name) + } + if web.HostName != "10.0.0.1" { + t.Errorf("expected hostname 10.0.0.1, got %s", web.HostName) + } + if web.User != "admin" { + t.Errorf("expected user admin, got %s", web.User) + } + if web.Port != 2222 { + t.Errorf("expected port 2222, got %d", web.Port) + } + if web.IdentityFile != "/home/user/.ssh/id_rsa" { + t.Errorf("expected identity file, got %s", web.IdentityFile) + } + + db := hosts[1] + if db.Name != "dbserver" { + t.Errorf("expected name dbserver, got %s", db.Name) + } + if db.Port != 22 { + t.Errorf("expected default port 22, got %d", db.Port) + } +} + +func TestParse_SkipsWildcard(t *testing.T) { + config := ` +Host * + ServerAliveInterval 60 + +Host myhost + HostName 192.168.1.1 +` + hosts, err := Parse(strings.NewReader(config)) + if err != nil { + t.Fatal(err) + } + + if len(hosts) != 1 { + t.Fatalf("expected 1 host (wildcard skipped), got %d", len(hosts)) + } + if hosts[0].Name != "myhost" { + t.Errorf("expected myhost, got %s", hosts[0].Name) + } +} + +func TestParse_HostNameDefaultsToName(t *testing.T) { + config := ` +Host myserver + User root + Port 22 +` + hosts, err := Parse(strings.NewReader(config)) + if err != nil { + t.Fatal(err) + } + + if len(hosts) != 1 { + t.Fatalf("expected 1 host, got %d", len(hosts)) + } + if hosts[0].HostName != "myserver" { + t.Errorf("expected HostName to default to Name, got %s", hosts[0].HostName) + } +} + +func TestParse_CommentsAndEmptyLines(t *testing.T) { + config := ` +# This is a comment +Host server1 + # Another comment + HostName 10.0.0.1 + + User admin +` + hosts, err := Parse(strings.NewReader(config)) + if err != nil { + t.Fatal(err) + } + + if len(hosts) != 1 { + t.Fatalf("expected 1 host, got %d", len(hosts)) + } + if hosts[0].HostName != "10.0.0.1" { + t.Errorf("expected 10.0.0.1, got %s", hosts[0].HostName) + } + if hosts[0].User != "admin" { + t.Errorf("expected admin, got %s", hosts[0].User) + } +} + +func TestParse_EqualsFormat(t *testing.T) { + config := ` +Host myhost + HostName=10.0.0.1 + User=root + Port=2222 +` + hosts, err := Parse(strings.NewReader(config)) + if err != nil { + t.Fatal(err) + } + + if len(hosts) != 1 { + t.Fatalf("expected 1 host, got %d", len(hosts)) + } + if hosts[0].HostName != "10.0.0.1" { + t.Errorf("expected 10.0.0.1, got %s", hosts[0].HostName) + } + if hosts[0].User != "root" { + t.Errorf("expected root, got %s", hosts[0].User) + } + if hosts[0].Port != 2222 { + t.Errorf("expected 2222, got %d", hosts[0].Port) + } +} + +func TestParse_EmptyInput(t *testing.T) { + hosts, err := Parse(strings.NewReader("")) + if err != nil { + t.Fatal(err) + } + if len(hosts) != 0 { + t.Errorf("expected 0 hosts, got %d", len(hosts)) + } +} + +func TestParse_MultipleHosts(t *testing.T) { + config := ` +Host prod1 + HostName 10.0.1.1 + User deploy + +Host prod2 + HostName 10.0.1.2 + User deploy + +Host staging + HostName 10.0.2.1 + User staging + IdentityFile ~/.ssh/staging_key +` + hosts, err := Parse(strings.NewReader(config)) + if err != nil { + t.Fatal(err) + } + + if len(hosts) != 3 { + t.Fatalf("expected 3 hosts, got %d", len(hosts)) + } +} + +func TestParse_SkipsQuestionMarkWildcard(t *testing.T) { + config := ` +Host web? + HostName 10.0.0.1 + +Host realhost + HostName 10.0.0.2 +` + hosts, err := Parse(strings.NewReader(config)) + if err != nil { + t.Fatal(err) + } + + if len(hosts) != 1 { + t.Fatalf("expected 1 host, got %d", len(hosts)) + } + if hosts[0].Name != "realhost" { + t.Errorf("expected realhost, got %s", hosts[0].Name) + } +} + +func TestSplitDirective(t *testing.T) { + tests := []struct { + line string + key string + val string + }{ + {"Host myhost", "Host", "myhost"}, + {"HostName=10.0.0.1", "HostName", "10.0.0.1"}, + {" Port 22", "Port", "22"}, + {"User\troot", "User", "root"}, + } + + for _, tt := range tests { + key, val := splitDirective(strings.TrimSpace(tt.line)) + if key != tt.key || val != tt.val { + t.Errorf("splitDirective(%q) = (%q, %q), want (%q, %q)", tt.line, key, val, tt.key, tt.val) + } + } +} diff --git a/fluid-daemon/internal/sshconfig/prober.go b/fluid-daemon/internal/sshconfig/prober.go new file mode 100644 index 00000000..cbdda9d7 --- /dev/null +++ b/fluid-daemon/internal/sshconfig/prober.go @@ -0,0 +1,113 @@ +package sshconfig + +import ( + "bytes" + "context" + "fmt" + "os/exec" + "strings" + "sync" + "time" +) + +// ProbeResult describes the outcome of probing a single host. +type ProbeResult struct { + Host SSHHost + Reachable bool + HasLibvirt bool + HasProxmox bool + VMs []string + Error string +} + +// ProbeAll probes multiple hosts concurrently and returns results. +func ProbeAll(ctx context.Context, hosts []SSHHost) []ProbeResult { + results := make([]ProbeResult, len(hosts)) + var wg sync.WaitGroup + + for i, host := range hosts { + wg.Add(1) + go func(idx int, h SSHHost) { + defer wg.Done() + results[idx] = Probe(ctx, h) + }(i, host) + } + + wg.Wait() + return results +} + +// Probe connects to a single host via SSH and detects hypervisor capabilities. +// Uses a 10-second timeout per host. +func Probe(ctx context.Context, host SSHHost) ProbeResult { + result := ProbeResult{Host: host} + + probeCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + // Test connectivity by running a simple command + out, err := sshRun(probeCtx, host, "echo ok") + if err != nil { + result.Error = fmt.Sprintf("unreachable: %v", err) + return result + } + if !strings.Contains(out, "ok") { + result.Error = "unexpected ssh response" + return result + } + result.Reachable = true + + // Detect libvirt + _, err = sshRun(probeCtx, host, "which virsh") + if err == nil { + result.HasLibvirt = true + + // List VMs + vmOut, err := sshRun(probeCtx, host, "virsh list --all --name") + if err == nil { + for _, line := range strings.Split(vmOut, "\n") { + name := strings.TrimSpace(line) + if name != "" { + result.VMs = append(result.VMs, name) + } + } + } + } + + // Detect Proxmox + _, err = sshRun(probeCtx, host, "which pveversion 2>/dev/null || test -x /usr/bin/pvesh") + if err == nil { + result.HasProxmox = true + } + + return result +} + +// sshRun executes a command on a remote host via SSH. +func sshRun(ctx context.Context, host SSHHost, command string) (string, error) { + args := []string{ + "-o", "StrictHostKeyChecking=no", + "-o", "BatchMode=yes", + "-o", "ConnectTimeout=5", + "-p", fmt.Sprintf("%d", host.Port), + } + if host.IdentityFile != "" { + args = append(args, "-i", host.IdentityFile) + } + + target := host.HostName + if host.User != "" { + target = host.User + "@" + host.HostName + } + args = append(args, target, command) + + cmd := exec.CommandContext(ctx, "ssh", args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("%w: %s", err, stderr.String()) + } + return stdout.String(), nil +} diff --git a/fluid-daemon/internal/sshconfig/prober_test.go b/fluid-daemon/internal/sshconfig/prober_test.go new file mode 100644 index 00000000..589e8f5a --- /dev/null +++ b/fluid-daemon/internal/sshconfig/prober_test.go @@ -0,0 +1,27 @@ +package sshconfig + +import ( + "testing" +) + +func TestProbeResult_DefaultValues(t *testing.T) { + result := ProbeResult{ + Host: SSHHost{Name: "test", HostName: "10.0.0.1", Port: 22}, + } + + if result.Reachable { + t.Error("expected Reachable=false by default") + } + if result.HasLibvirt { + t.Error("expected HasLibvirt=false by default") + } + if result.HasProxmox { + t.Error("expected HasProxmox=false by default") + } + if len(result.VMs) != 0 { + t.Error("expected no VMs by default") + } +} + +// Note: integration tests for Probe() and ProbeAll() require actual SSH access +// and are not included here. These would be tested via manual or end-to-end tests. diff --git a/fluid/internal/sshkeys/manager.go b/fluid-daemon/internal/sshkeys/manager.go similarity index 99% rename from fluid/internal/sshkeys/manager.go rename to fluid-daemon/internal/sshkeys/manager.go index db346fec..38b61783 100755 --- a/fluid/internal/sshkeys/manager.go +++ b/fluid-daemon/internal/sshkeys/manager.go @@ -15,7 +15,7 @@ import ( "sync" "time" - "github.com/aspectrr/fluid.sh/fluid/internal/sshca" + "github.com/aspectrr/fluid.sh/fluid-daemon/internal/sshca" ) // vmNameSanitizer is a compiled regex for sanitizing VM names used in filesystem paths. diff --git a/fluid-daemon/internal/state/sqlite.go b/fluid-daemon/internal/state/sqlite.go new file mode 100644 index 00000000..8244ced0 --- /dev/null +++ b/fluid-daemon/internal/state/sqlite.go @@ -0,0 +1,169 @@ +// Package state provides local SQLite state storage for the sandbox host. +package state + +import ( + "context" + "fmt" + "time" + + "github.com/glebarez/sqlite" + "gorm.io/gorm" + "gorm.io/gorm/logger" +) + +// Sandbox represents a sandbox in local state. +type Sandbox struct { + ID string `gorm:"primaryKey"` + Name string `gorm:"index"` + AgentID string `gorm:"index"` + BaseImage string + Bridge string + TAPDevice string + MACAddress string + IPAddress string + State string `gorm:"index"` + PID int + VCPUs int + MemoryMB int + TTLSeconds int + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time `gorm:"index"` +} + +// CachedImage tracks a pulled snapshot image in the local cache. +type CachedImage struct { + ID string `gorm:"primaryKey"` + ImageName string `gorm:"uniqueIndex"` + SourceHost string `gorm:"index"` + VMName string `gorm:"index"` + SizeMB int64 + PulledAt time.Time +} + +// Command represents a command execution record. +type Command struct { + ID string `gorm:"primaryKey"` + SandboxID string `gorm:"index"` + Command string + Stdout string + Stderr string + ExitCode int + DurationMS int64 + StartedAt time.Time + EndedAt time.Time +} + +// Store provides local state persistence via SQLite. +type Store struct { + db *gorm.DB +} + +// NewStore creates a new SQLite state store. +func NewStore(dbPath string) (*Store, error) { + db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{ + NowFunc: func() time.Time { return time.Now().UTC() }, + Logger: logger.Default.LogMode(logger.Silent), + }) + if err != nil { + return nil, fmt.Errorf("open sqlite: %w", err) + } + + // Auto-migrate tables + if err := db.AutoMigrate(&Sandbox{}, &Command{}, &CachedImage{}); err != nil { + return nil, fmt.Errorf("auto-migrate: %w", err) + } + + return &Store{db: db}, nil +} + +// DB returns the underlying GORM database handle. +func (s *Store) DB() *gorm.DB { return s.db } + +// Close closes the database connection. +func (s *Store) Close() error { + sqlDB, err := s.db.DB() + if err != nil { + return err + } + return sqlDB.Close() +} + +// CreateSandbox creates a new sandbox record. +func (s *Store) CreateSandbox(ctx context.Context, sb *Sandbox) error { + return s.db.WithContext(ctx).Create(sb).Error +} + +// GetSandbox retrieves a sandbox by ID. +func (s *Store) GetSandbox(ctx context.Context, id string) (*Sandbox, error) { + var sb Sandbox + if err := s.db.WithContext(ctx).Where("id = ? AND deleted_at IS NULL", id).First(&sb).Error; err != nil { + return nil, err + } + return &sb, nil +} + +// ListSandboxes returns all non-deleted sandboxes. +func (s *Store) ListSandboxes(ctx context.Context) ([]*Sandbox, error) { + var sandboxes []*Sandbox + if err := s.db.WithContext(ctx).Where("deleted_at IS NULL").Find(&sandboxes).Error; err != nil { + return nil, err + } + return sandboxes, nil +} + +// UpdateSandbox updates a sandbox record. +func (s *Store) UpdateSandbox(ctx context.Context, sb *Sandbox) error { + return s.db.WithContext(ctx).Save(sb).Error +} + +// DeleteSandbox soft-deletes a sandbox. +func (s *Store) DeleteSandbox(ctx context.Context, id string) error { + now := time.Now().UTC() + return s.db.WithContext(ctx).Model(&Sandbox{}).Where("id = ?", id). + Updates(map[string]any{ + "deleted_at": &now, + "state": "DESTROYED", + }).Error +} + +// ListExpiredSandboxes returns sandboxes past their TTL. +func (s *Store) ListExpiredSandboxes(ctx context.Context, defaultTTL time.Duration) ([]*Sandbox, error) { + var sandboxes []*Sandbox + now := time.Now().UTC() + + // Find sandboxes where TTL has expired + err := s.db.WithContext(ctx). + Where("deleted_at IS NULL AND state NOT IN (?, ?)", "DESTROYED", "ERROR"). + Find(&sandboxes).Error + if err != nil { + return nil, err + } + + var expired []*Sandbox + for _, sb := range sandboxes { + ttl := defaultTTL + if sb.TTLSeconds > 0 { + ttl = time.Duration(sb.TTLSeconds) * time.Second + } + if ttl > 0 && now.After(sb.CreatedAt.Add(ttl)) { + expired = append(expired, sb) + } + } + + return expired, nil +} + +// CreateCommand creates a command execution record. +func (s *Store) CreateCommand(ctx context.Context, cmd *Command) error { + return s.db.WithContext(ctx).Create(cmd).Error +} + +// ListSandboxCommands returns commands for a sandbox. +func (s *Store) ListSandboxCommands(ctx context.Context, sandboxID string) ([]*Command, error) { + var commands []*Command + if err := s.db.WithContext(ctx).Where("sandbox_id = ?", sandboxID).Order("started_at DESC").Find(&commands).Error; err != nil { + return nil, err + } + return commands, nil +} diff --git a/fluid-daemon/internal/state/sqlite_test.go b/fluid-daemon/internal/state/sqlite_test.go new file mode 100644 index 00000000..94f80c68 --- /dev/null +++ b/fluid-daemon/internal/state/sqlite_test.go @@ -0,0 +1,443 @@ +package state + +import ( + "context" + "testing" + "time" +) + +func newTestStore(t *testing.T) *Store { + t.Helper() + store, err := NewStore(":memory:") + if err != nil { + t.Fatalf("NewStore(:memory:) failed: %v", err) + } + t.Cleanup(func() { _ = store.Close() }) + return store +} + +func TestNewStore(t *testing.T) { + store, err := NewStore(":memory:") + if err != nil { + t.Fatalf("NewStore failed: %v", err) + } + defer func() { _ = store.Close() }() + + if store.db == nil { + t.Fatal("expected db to be non-nil") + } + + // Verify tables were created by checking we can query them. + var count int64 + if err := store.db.Model(&Sandbox{}).Count(&count).Error; err != nil { + t.Fatalf("sandbox table query failed: %v", err) + } + if err := store.db.Model(&Command{}).Count(&count).Error; err != nil { + t.Fatalf("command table query failed: %v", err) + } +} + +func TestCreateSandbox_GetSandbox(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + sb := &Sandbox{ + ID: "SBX-test1", + Name: "test-sandbox", + AgentID: "agent-1", + BaseImage: "/images/ubuntu.qcow2", + Bridge: "br0", + TAPDevice: "tap0", + MACAddress: "52:54:00:00:00:01", + IPAddress: "192.168.1.10", + State: "RUNNING", + PID: 1234, + VCPUs: 2, + MemoryMB: 2048, + TTLSeconds: 3600, + } + + if err := store.CreateSandbox(ctx, sb); err != nil { + t.Fatalf("CreateSandbox failed: %v", err) + } + + got, err := store.GetSandbox(ctx, "SBX-test1") + if err != nil { + t.Fatalf("GetSandbox failed: %v", err) + } + + if got.ID != sb.ID { + t.Errorf("ID = %q, want %q", got.ID, sb.ID) + } + if got.Name != sb.Name { + t.Errorf("Name = %q, want %q", got.Name, sb.Name) + } + if got.AgentID != sb.AgentID { + t.Errorf("AgentID = %q, want %q", got.AgentID, sb.AgentID) + } + if got.BaseImage != sb.BaseImage { + t.Errorf("BaseImage = %q, want %q", got.BaseImage, sb.BaseImage) + } + if got.Bridge != sb.Bridge { + t.Errorf("Bridge = %q, want %q", got.Bridge, sb.Bridge) + } + if got.TAPDevice != sb.TAPDevice { + t.Errorf("TAPDevice = %q, want %q", got.TAPDevice, sb.TAPDevice) + } + if got.MACAddress != sb.MACAddress { + t.Errorf("MACAddress = %q, want %q", got.MACAddress, sb.MACAddress) + } + if got.IPAddress != sb.IPAddress { + t.Errorf("IPAddress = %q, want %q", got.IPAddress, sb.IPAddress) + } + if got.State != sb.State { + t.Errorf("State = %q, want %q", got.State, sb.State) + } + if got.PID != sb.PID { + t.Errorf("PID = %d, want %d", got.PID, sb.PID) + } + if got.VCPUs != sb.VCPUs { + t.Errorf("VCPUs = %d, want %d", got.VCPUs, sb.VCPUs) + } + if got.MemoryMB != sb.MemoryMB { + t.Errorf("MemoryMB = %d, want %d", got.MemoryMB, sb.MemoryMB) + } + if got.TTLSeconds != sb.TTLSeconds { + t.Errorf("TTLSeconds = %d, want %d", got.TTLSeconds, sb.TTLSeconds) + } + if got.CreatedAt.IsZero() { + t.Error("CreatedAt should not be zero") + } +} + +func TestCreateSandbox_GetSandbox_NotFound(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + _, err := store.GetSandbox(ctx, "SBX-nonexistent") + if err == nil { + t.Fatal("expected error for nonexistent sandbox, got nil") + } +} + +func TestListSandboxes(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + sb1 := &Sandbox{ID: "SBX-list1", Name: "sb1", State: "RUNNING"} + sb2 := &Sandbox{ID: "SBX-list2", Name: "sb2", State: "RUNNING"} + sb3 := &Sandbox{ID: "SBX-list3", Name: "sb3", State: "RUNNING"} + + for _, sb := range []*Sandbox{sb1, sb2, sb3} { + if err := store.CreateSandbox(ctx, sb); err != nil { + t.Fatalf("CreateSandbox(%s) failed: %v", sb.ID, err) + } + } + + // Soft-delete one sandbox. + if err := store.DeleteSandbox(ctx, "SBX-list2"); err != nil { + t.Fatalf("DeleteSandbox failed: %v", err) + } + + list, err := store.ListSandboxes(ctx) + if err != nil { + t.Fatalf("ListSandboxes failed: %v", err) + } + + if len(list) != 2 { + t.Fatalf("ListSandboxes returned %d sandboxes, want 2", len(list)) + } + + ids := map[string]bool{} + for _, sb := range list { + ids[sb.ID] = true + } + if !ids["SBX-list1"] { + t.Error("expected SBX-list1 in list") + } + if !ids["SBX-list3"] { + t.Error("expected SBX-list3 in list") + } + if ids["SBX-list2"] { + t.Error("SBX-list2 should not be in list (soft-deleted)") + } +} + +func TestUpdateSandbox(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + sb := &Sandbox{ + ID: "SBX-update1", + Name: "before-update", + State: "RUNNING", + IPAddress: "10.0.0.1", + VCPUs: 1, + MemoryMB: 512, + } + if err := store.CreateSandbox(ctx, sb); err != nil { + t.Fatalf("CreateSandbox failed: %v", err) + } + + // Modify fields and update. + sb.Name = "after-update" + sb.State = "STOPPED" + sb.IPAddress = "10.0.0.2" + sb.VCPUs = 4 + sb.MemoryMB = 4096 + + if err := store.UpdateSandbox(ctx, sb); err != nil { + t.Fatalf("UpdateSandbox failed: %v", err) + } + + got, err := store.GetSandbox(ctx, "SBX-update1") + if err != nil { + t.Fatalf("GetSandbox failed: %v", err) + } + + if got.Name != "after-update" { + t.Errorf("Name = %q, want %q", got.Name, "after-update") + } + if got.State != "STOPPED" { + t.Errorf("State = %q, want %q", got.State, "STOPPED") + } + if got.IPAddress != "10.0.0.2" { + t.Errorf("IPAddress = %q, want %q", got.IPAddress, "10.0.0.2") + } + if got.VCPUs != 4 { + t.Errorf("VCPUs = %d, want 4", got.VCPUs) + } + if got.MemoryMB != 4096 { + t.Errorf("MemoryMB = %d, want 4096", got.MemoryMB) + } +} + +func TestDeleteSandbox(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + sb := &Sandbox{ID: "SBX-del1", Name: "to-delete", State: "RUNNING"} + if err := store.CreateSandbox(ctx, sb); err != nil { + t.Fatalf("CreateSandbox failed: %v", err) + } + + if err := store.DeleteSandbox(ctx, "SBX-del1"); err != nil { + t.Fatalf("DeleteSandbox failed: %v", err) + } + + // GetSandbox should not find it (deleted_at IS NULL filter). + _, err := store.GetSandbox(ctx, "SBX-del1") + if err == nil { + t.Fatal("expected error after soft delete, got nil") + } + + // Verify the record still exists with DESTROYED state and non-nil deleted_at + // by querying without the deleted_at filter. + var raw Sandbox + if err := store.db.Where("id = ?", "SBX-del1").First(&raw).Error; err != nil { + t.Fatalf("raw query failed: %v", err) + } + if raw.State != "DESTROYED" { + t.Errorf("State = %q, want %q", raw.State, "DESTROYED") + } + if raw.DeletedAt == nil { + t.Error("DeletedAt should be non-nil after soft delete") + } +} + +func TestListExpiredSandboxes(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + now := time.Now().UTC() + + // Sandbox with custom TTL of 60s, created 2 minutes ago - expired. + sb1 := &Sandbox{ + ID: "SBX-exp1", + Name: "expired-custom-ttl", + State: "RUNNING", + TTLSeconds: 60, + CreatedAt: now.Add(-2 * time.Minute), + } + + // Sandbox with no custom TTL, created 2 minutes ago. + // Will expire with a 1-minute default TTL. + sb2 := &Sandbox{ + ID: "SBX-exp2", + Name: "expired-default-ttl", + State: "RUNNING", + CreatedAt: now.Add(-2 * time.Minute), + } + + // Sandbox created just now - not expired. + sb3 := &Sandbox{ + ID: "SBX-fresh", + Name: "fresh", + State: "RUNNING", + TTLSeconds: 3600, + CreatedAt: now, + } + + // Sandbox that is already DESTROYED - should not appear. + sb4 := &Sandbox{ + ID: "SBX-destroyed", + Name: "destroyed", + State: "DESTROYED", + CreatedAt: now.Add(-10 * time.Minute), + } + + for _, sb := range []*Sandbox{sb1, sb2, sb3, sb4} { + if err := store.CreateSandbox(ctx, sb); err != nil { + t.Fatalf("CreateSandbox(%s) failed: %v", sb.ID, err) + } + } + + defaultTTL := 1 * time.Minute + expired, err := store.ListExpiredSandboxes(ctx, defaultTTL) + if err != nil { + t.Fatalf("ListExpiredSandboxes failed: %v", err) + } + + ids := map[string]bool{} + for _, sb := range expired { + ids[sb.ID] = true + } + + if !ids["SBX-exp1"] { + t.Error("SBX-exp1 should be expired (custom TTL 60s, created 2m ago)") + } + if !ids["SBX-exp2"] { + t.Error("SBX-exp2 should be expired (default TTL 1m, created 2m ago)") + } + if ids["SBX-fresh"] { + t.Error("SBX-fresh should not be expired") + } + if ids["SBX-destroyed"] { + t.Error("SBX-destroyed should not appear (state=DESTROYED)") + } + + if len(expired) != 2 { + t.Errorf("expected 2 expired sandboxes, got %d", len(expired)) + } +} + +func TestListExpiredSandboxes_NoExpired(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + now := time.Now().UTC() + + sb := &Sandbox{ + ID: "SBX-noexp", + Name: "not-expired", + State: "RUNNING", + TTLSeconds: 3600, + CreatedAt: now, + } + if err := store.CreateSandbox(ctx, sb); err != nil { + t.Fatalf("CreateSandbox failed: %v", err) + } + + expired, err := store.ListExpiredSandboxes(ctx, 1*time.Hour) + if err != nil { + t.Fatalf("ListExpiredSandboxes failed: %v", err) + } + + if len(expired) != 0 { + t.Errorf("expected 0 expired sandboxes, got %d", len(expired)) + } +} + +func TestCreateCommand_ListSandboxCommands(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + // Create a sandbox first (foreign key context). + sb := &Sandbox{ID: "SBX-cmd1", Name: "cmd-sandbox", State: "RUNNING"} + if err := store.CreateSandbox(ctx, sb); err != nil { + t.Fatalf("CreateSandbox failed: %v", err) + } + + now := time.Now().UTC() + + cmd1 := &Command{ + ID: "CMD-1", + SandboxID: "SBX-cmd1", + Command: "whoami", + Stdout: "root\n", + Stderr: "", + ExitCode: 0, + DurationMS: 50, + StartedAt: now.Add(-2 * time.Second), + EndedAt: now.Add(-2*time.Second + 50*time.Millisecond), + } + cmd2 := &Command{ + ID: "CMD-2", + SandboxID: "SBX-cmd1", + Command: "ls /tmp", + Stdout: "file1\nfile2\n", + Stderr: "", + ExitCode: 0, + DurationMS: 30, + StartedAt: now.Add(-1 * time.Second), + EndedAt: now.Add(-1*time.Second + 30*time.Millisecond), + } + // Command for a different sandbox - should not appear in results. + cmd3 := &Command{ + ID: "CMD-3", + SandboxID: "SBX-other", + Command: "echo hi", + Stdout: "hi\n", + ExitCode: 0, + DurationMS: 10, + StartedAt: now, + EndedAt: now.Add(10 * time.Millisecond), + } + + for _, cmd := range []*Command{cmd1, cmd2, cmd3} { + if err := store.CreateCommand(ctx, cmd); err != nil { + t.Fatalf("CreateCommand(%s) failed: %v", cmd.ID, err) + } + } + + commands, err := store.ListSandboxCommands(ctx, "SBX-cmd1") + if err != nil { + t.Fatalf("ListSandboxCommands failed: %v", err) + } + + if len(commands) != 2 { + t.Fatalf("expected 2 commands, got %d", len(commands)) + } + + // Results are ordered by started_at DESC, so cmd2 should be first. + if commands[0].ID != "CMD-2" { + t.Errorf("first command ID = %q, want CMD-2 (most recent)", commands[0].ID) + } + if commands[1].ID != "CMD-1" { + t.Errorf("second command ID = %q, want CMD-1", commands[1].ID) + } + + // Verify fields on first command. + c := commands[0] + if c.Command != "ls /tmp" { + t.Errorf("Command = %q, want %q", c.Command, "ls /tmp") + } + if c.Stdout != "file1\nfile2\n" { + t.Errorf("Stdout = %q, want %q", c.Stdout, "file1\nfile2\n") + } + if c.ExitCode != 0 { + t.Errorf("ExitCode = %d, want 0", c.ExitCode) + } + if c.DurationMS != 30 { + t.Errorf("DurationMS = %d, want 30", c.DurationMS) + } + + // List commands for a sandbox with none. + empty, err := store.ListSandboxCommands(ctx, "SBX-nonexistent") + if err != nil { + t.Fatalf("ListSandboxCommands for empty sandbox failed: %v", err) + } + if len(empty) != 0 { + t.Errorf("expected 0 commands for nonexistent sandbox, got %d", len(empty)) + } +} diff --git a/fluid-remote/.gitignore b/fluid-remote/.gitignore deleted file mode 100755 index 38f99c1e..00000000 --- a/fluid-remote/.gitignore +++ /dev/null @@ -1,39 +0,0 @@ -# If you prefer the allow list template instead of the deny list, see community template: -# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore -# -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Code coverage profiles and other test artifacts -*.out -coverage.* -*.coverprofile -profile.cov - -# Dependency directories (remove the comment below to include it) -# vendor/ - -# Go workspace file -go.work -go.work.sum - -# env file -.env -.env.lima -.ssh-ca/ -bin/ -.ansible/ -config.yaml -config.yml -hosts.txt - -# Editor/IDE -# .idea/ -# .vscode/ diff --git a/fluid-remote/AGENTS.md b/fluid-remote/AGENTS.md deleted file mode 100644 index 5e7cbf55..00000000 --- a/fluid-remote/AGENTS.md +++ /dev/null @@ -1,199 +0,0 @@ -# fluid-remote API - Development Guide - -This is the main fluid-remote Go API service that orchestrates KVM/libvirt virtual machines. - -## Important Development Notes - -### Mandatory Testing - -After every code change, tests MUST be created or updated to verify the new behavior: -- Add unit tests in `internal//*_test.go` files -- Run `make test` to verify all tests pass before considering work complete - -### Strict JSON Decoding - -The API uses strict JSON decoding (`DisallowUnknownFields()`). This means: -- Adding new fields to request structs requires rebuilding the API -- The running API will reject requests with fields it doesn't recognize -- Always rebuild and restart after modifying request/response DTOs - -### Rebuilding After Changes - -When modifying the API, you must rebuild for changes to take effect: - -```bash -# From repo root - rebuild and restart via docker-compose -docker-compose down && docker-compose up --build -d -``` - -```bash -# Use limactl to access the test VM -limactl shell fluid-remote-dev -``` - -### ARM Mac (Apple Silicon) Limitations - -On ARM Macs using Lima for libvirt: -- VMs may fail to start with "CPU mode 'host-passthrough' not supported" errors -- This is a hypervisor limitation, not a code issue -- The VM will be created but remain in "shut off" state - -## Prerequisites - -- Go 1.21+ -- libvirt/KVM installed and running -- PostgreSQL database -- Development tools (gofumpt, golangci-lint, swag) - -## Quick Start - -```bash -# Install development tools -make install-tools - -# Download dependencies -make deps - -# Run all checks and build -make all - -# Run the API server -make run -``` - -## Build Scripts - -```bash -# Build the API binary -make build -# Output: bin/fluid-remote-api - -# Clean build artifacts -make clean - -# Build Docker image -make docker-build -``` - -## Test Scripts - -```bash -# Run all tests -make test - -# Run tests with coverage report -make test-coverage -# Generates: coverage.out, coverage.html -``` - -## Code Quality Scripts - -```bash -# Run all checks (format, vet, lint) -make check - -# Format code with gofumpt -make fmt -# Or: ./scripts/fmt.sh - -# Run go vet -make vet -# Or: ./scripts/vet.sh - -# Run golangci-lint -make lint -# Or: ./scripts/lint.sh -``` - -## Dependency Management - -```bash -# Download dependencies -make deps - -# Tidy and verify go.mod -make tidy -``` - -## Code Generation - -```bash -# Generate OpenAPI/Swagger documentation -make generate-openapi -# Or: ./scripts/generate-openapi.sh - -# Run all code generation -make generate -``` - -## Development Setup - -```bash -# Install development tools (gofumpt, golangci-lint, swag) -make install-tools - -# Setup Lima libvirt environment (macOS) -make setup-lima -# Or: ./scripts/setup-lima-libvirt.sh - -# Create a test VM for development -make create-test-vm -# Or: ./scripts/create-test-vm.sh -``` - -## Running Locally - -### Environment Variables - -```bash -export LOG_FORMAT=text -export LOG_LEVEL=debug -export API_HTTP_ADDR=:8080 -export LIBVIRT_URI=qemu:///system -export LIBVIRT_NETWORK=default -export BASE_IMAGE_DIR=/var/lib/libvirt/images/base -export SANDBOX_WORKDIR=/var/lib/libvirt/images/jobs -export DATABASE_URL=postgresql://virsh_sandbox:virsh_sandbox@localhost:5432/virsh_sandbox -export DEFAULT_VCPUS=2 -export DEFAULT_MEMORY_MB=2048 -export COMMAND_TIMEOUT_SEC=600 -export IP_DISCOVERY_TIMEOUT_SEC=120 -``` - -### Run with make - -```bash -make run -``` - -### Run with Docker Compose (from repo root) - -```bash -docker-compose up fluid-remote --build -``` - -## All Makefile Targets - -Run `make help` to see all available targets: - -| Target | Description | -|--------|-------------| -| `all` | Run checks and build (default) | -| `build` | Build the API binary | -| `run` | Run the API server | -| `clean` | Clean build artifacts | -| `fmt` | Format code with gofumpt | -| `lint` | Run golangci-lint | -| `vet` | Run go vet | -| `test` | Run tests | -| `test-coverage` | Run tests with coverage | -| `check` | Run all code quality checks | -| `deps` | Download dependencies | -| `tidy` | Tidy and verify dependencies | -| `generate-openapi` | Generate OpenAPI documentation | -| `generate` | Run all code generation | -| `install-tools` | Install development tools | -| `setup-lima` | Setup Lima libvirt environment | -| `create-test-vm` | Create a test VM | -| `docker-build` | Build Docker image | -| `help` | Show help message | diff --git a/fluid-remote/Dockerfile b/fluid-remote/Dockerfile deleted file mode 100644 index bfb0c668..00000000 --- a/fluid-remote/Dockerfile +++ /dev/null @@ -1,108 +0,0 @@ -# syntax=docker/dockerfile:1.7 - -# ------------------------------------------------------------------------------ -# Builder stage: compile Go API binary with CGO (required for libvirt bindings) -# ------------------------------------------------------------------------------ -FROM golang:1.24-bookworm AS builder - -ARG TARGETOS=linux -ARG TARGETARCH=arm64 - -# CGO must be enabled for libvirt Go bindings (they wrap the C library) -ENV CGO_ENABLED=1 \ - GOOS=${TARGETOS} \ - GOARCH=${TARGETARCH} \ - GOFLAGS="-trimpath" - -WORKDIR /src - -# Install libvirt development headers required for CGO compilation -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - libvirt-dev \ - pkg-config \ - gcc \ - libc6-dev && \ - rm -rf /var/lib/apt/lists/* - -# Leverage Docker layer caching for dependencies -COPY go.mod go.sum ./ -RUN --mount=type=cache,target=/go/pkg/mod \ - go mod download - -# Copy the rest of the source -COPY . . - -# Build API (with CGO for libvirt bindings) -RUN --mount=type=cache,target=/go/pkg/mod \ - --mount=type=cache,target=/root/.cache/go-build \ - go build --tags libvirt -ldflags="-s -w" -o /out/api ./cmd/api - -# ------------------------------------------------------------------------------ -# Runtime stage: include libvirt/qemu tools and run the API -# ------------------------------------------------------------------------------ -FROM debian:bookworm-slim AS runtime - -# Install runtime dependencies: -# - libvirt0 (libvirt runtime library - required by the Go binary) -# - libvirt-clients (virsh) -# - qemu-utils (qemu-img, qemu-nbd) -# - qemu-system-x86 (various qemu system tools; often needed by libvirt) -# - cloud-image-utils (cloud-localds) -# - genisoimage (seed ISO fallback) -# - openssh-client (used by API to SSH into VMs) -# - curl (healthcheck) -# - ca-certificates (TLS) -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - libvirt0 \ - libvirt-clients \ - qemu-utils \ - qemu-system-x86 \ - cloud-image-utils \ - genisoimage \ - openssh-client \ - curl \ - ca-certificates \ - dnsmasq-utils && \ - rm -rf /var/lib/apt/lists/* - -# Create common directories (can be mounted from host in docker-compose) -RUN mkdir -p /var/lib/libvirt/images/base /var/lib/libvirt/images/jobs /var/lib/fluid-remote /root/.ssh - -# Copy compiled binary -COPY --from=builder /out/api /usr/local/bin/api - -# Copy API documentation -COPY ./docs /usr/local/bin/docs - -# Set working directory to where your binary and docs are -WORKDIR /usr/local/bin - -# Environment defaults (override in compose or runtime as needed) -ENV API_HTTP_ADDR=:8080 \ - LOG_FORMAT=text \ - LOG_LEVEL=info \ - LIBVIRT_URI=qemu:///system \ - LIBVIRT_NETWORK=default \ - BASE_IMAGE_DIR=/var/lib/libvirt/images/base \ - SANDBOX_WORKDIR=/var/lib/libvirt/images/jobs \ - DATABASE_URL=file:/var/lib/fluid-remote.db?_busy_timeout=10000&_fk=1 \ - DEFAULT_VCPUS=2 \ - DEFAULT_MEMORY_MB=2048 \ - COMMAND_TIMEOUT_SEC=600 \ - IP_DISCOVERY_TIMEOUT_SEC=120 - -# Expose API port -EXPOSE 8080 - -# Notes for runtime (documented here for convenience): -# - Mount libvirt socket (read/write) if talking to local libvirtd: -# -v /var/run/libvirt/libvirt-sock:/var/run/libvirt/libvirt-sock -# - Alternatively, use a TCP/TLS libvirt endpoint by setting LIBVIRT_URI accordingly. -# - Bind host image directories: -# -v /var/lib/libvirt/images/base:/var/lib/libvirt/images/base:ro -# -v /var/lib/libvirt/images/jobs:/var/lib/libvirt/images/jobs:rw -# - If using SSH to VMs, ensure network connectivity from container to the VMs. - -ENTRYPOINT ["/usr/local/bin/api"] diff --git a/fluid-remote/Makefile b/fluid-remote/Makefile deleted file mode 100644 index 9bcd5eee..00000000 --- a/fluid-remote/Makefile +++ /dev/null @@ -1,101 +0,0 @@ -# fluid-remote API Makefile - -.PHONY: all build build-dev run clean fmt lint vet test check generate-openapi help install-tools - -# Go parameters -GOCMD=go -GOBUILD=$(GOCMD) build -GORUN=$(GOCMD) run -GOTEST=$(GOCMD) test -GOVET=$(GOCMD) vet -GOMOD=$(GOCMD) mod -BINARY_NAME=fluid-remote -CMD_PATH=./cmd/api -TAGS=libvirt - -# PostHog key (empty by default for dev builds) -POSTHOG_KEY ?= - -# Build flags -LDFLAGS=-s -w -ifneq ($(POSTHOG_KEY),) - LDFLAGS += -X github.com/aspectrr/fluid.sh/fluid-remote/internal/telemetry.posthogAPIKey=$(POSTHOG_KEY) -endif -BUILD_FLAGS=-ldflags "$(LDFLAGS)" -tags "$(TAGS)" - -# Default target -all: check build - -## Build targets - -build: ## Build the API binary - $(GOBUILD) $(BUILD_FLAGS) -o bin/$(BINARY_NAME) $(CMD_PATH) - -build-dev: POSTHOG_KEY=phc_QR3I1IKrEOqx5jIfJkBMfyznynIxRYd8kzmZM9o9fRZ -build-dev: build ## Build with PostHog key - -run: ## Run the API server - $(GORUN) $(CMD_PATH) - -clean: ## Clean build artifacts - rm -rf bin/ - $(GOCMD) clean - -## Code quality targets - -fmt: ## Format code with gofumpt - @echo "Formatting code..." - gofumpt -w . - -lint: ## Run golangci-lint - @echo "Running golangci-lint..." - golangci-lint run --build-tags=$(TAGS) ./... - -vet: ## Run go vet - @echo "Running go vet..." - $(GOVET) -tags=$(TAGS) ./... - -test: ## Run tests - $(GOTEST) -v -race -tags=$(TAGS) ./... - -test-coverage: ## Run tests with coverage - $(GOTEST) -v -race -tags=$(TAGS) -coverprofile=coverage.out ./... - $(GOCMD) tool cover -html=coverage.out -o coverage.html - -check: fmt vet lint ## Run all code quality checks (fmt, vet, lint) - -## Dependency management - -deps: ## Download dependencies - $(GOMOD) download - -tidy: ## Tidy and verify dependencies - $(GOMOD) tidy - $(GOMOD) verify - -## Code generation - -generate-openapi: ## Generate OpenAPI/Swagger documentation - ./scripts/generate-openapi.sh - -generate: generate-openapi ## Run all code generation - -## Development setup - -install-tools: ## Install development tools (gofumpt, golangci-lint, swag) - go install mvdan.cc/gofumpt@latest - go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest - go install github.com/swaggo/swag/cmd/swag@latest - -## Docker targets - -docker-build: ## Build Docker image - docker build -f Dockerfile -t fluid-remote-api . - -## Help - -help: ## Show this help message - @echo "Usage: make [target]" - @echo "" - @echo "Targets:" - @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf " %-20s %s\n", $$1, $$2}' $(MAKEFILE_LIST) diff --git a/fluid-remote/README.md b/fluid-remote/README.md deleted file mode 100644 index ecd876eb..00000000 --- a/fluid-remote/README.md +++ /dev/null @@ -1,5 +0,0 @@ -## Fluid Remote API - -## Development - -Use `make build-dev` when building locally, this uses the dev posthog API key vs production. diff --git a/fluid-remote/cmd/api/main.go b/fluid-remote/cmd/api/main.go deleted file mode 100755 index 064f1495..00000000 --- a/fluid-remote/cmd/api/main.go +++ /dev/null @@ -1,291 +0,0 @@ -package main - -import ( - "context" - "flag" - "log/slog" - "net/http" - "os" - "os/signal" - "strings" - "syscall" - "time" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/ansible" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/config" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/janitor" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/libvirt" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/rest" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/sshca" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/sshkeys" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/store" - postgresStore "github.com/aspectrr/fluid.sh/fluid-remote/internal/store/postgres" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/telemetry" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/vm" -) - -// @title fluid-remote API -// @version 0.1.0 -// @description API for managing AI Agent VM sandboxes using libvirt -// @BasePath / - -// @tag.name Sandbox -// @tag.description Sandbox lifecycle management - create, start, run commands, snapshot, and destroy sandboxes - -// @tag.name VMs -// @tag.description Virtual machine listing and information - -// @tag.name Ansible -// @tag.description Ansible playbook job management - -// @tag.name Health -// @tag.description Health check endpoints -func main() { - // Parse command line flags - configPath := flag.String("config", "config.yaml", "path to config file") - flag.Parse() - - // Context with OS signal cancellation - ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) - defer stop() - - // Load config from YAML (with env var overrides for backward compatibility) - cfg, err := config.LoadWithEnvOverride(*configPath) - if err != nil { - slog.Error("failed to load config", "path", *configPath, "error", err) - os.Exit(1) - } - - // Logging setup - logger := setupLogger(cfg.Logging.Level, cfg.Logging.Format) - slog.SetDefault(logger) - - logger.Info("starting fluid-remote API", - "config", *configPath, - "addr", cfg.API.Addr, - "db", cfg.Database.URL, - "network", cfg.Libvirt.Network, - "default_vcpus", cfg.VM.DefaultVCPUs, - "default_memory_mb", cfg.VM.DefaultMemoryMB, - "command_timeout", cfg.VM.CommandTimeout.String(), - "ip_discovery_timeout", cfg.VM.IPDiscoveryTimeout.String(), - "ansible_playbooks_dir", cfg.Ansible.PlaybooksDir, - ) - - st, err := postgresStore.New(ctx, store.Config{ - DatabaseURL: cfg.Database.URL, - MaxOpenConns: cfg.Database.MaxOpenConns, - MaxIdleConns: cfg.Database.MaxIdleConns, - ConnMaxLifetime: cfg.Database.ConnMaxLifetime, - AutoMigrate: cfg.Database.AutoMigrate, - ReadOnly: false, - }) - if err != nil { - logger.Error("failed to initialize store", "error", err) - os.Exit(1) - } - defer func() { - if cerr := st.Close(); cerr != nil { - logger.Error("failed to close store", "error", cerr) - } - }() - - // Initialize libvirt manager with config - lvCfg := libvirt.Config{ - LibvirtURI: cfg.Libvirt.URI, - BaseImageDir: cfg.Libvirt.BaseImageDir, - WorkDir: cfg.Libvirt.WorkDir, - DefaultNetwork: cfg.Libvirt.Network, - SSHKeyInjectMethod: cfg.Libvirt.SSHKeyInjectMethod, - SSHProxyJump: cfg.SSH.ProxyJump, - SocketVMNetWrapper: cfg.Libvirt.SocketVMNetWrapper, - DefaultVCPUs: cfg.VM.DefaultVCPUs, - DefaultMemoryMB: cfg.VM.DefaultMemoryMB, - } - // Read SSH CA public key if it exists - if pubKeyData, err := os.ReadFile(cfg.SSH.CAPubPath); err == nil { - lvCfg.SSHCAPubKey = string(pubKeyData) - } - lvMgr := libvirt.NewVirshManager(lvCfg, logger) - - // Initialize domain manager for direct libvirt queries - domainMgr := libvirt.NewDomainManager(cfg.Libvirt.URI) - - // Initialize telemetry service. - // Design decision: telemetry failures should not crash the application. - // If telemetry initialization fails, we log the error and use a noop service - // that silently discards all events. This ensures the core API functionality - // remains available even when analytics infrastructure is unavailable. - telemetrySvc, err := telemetry.NewService(cfg.Telemetry) - if err != nil { - logger.Warn("telemetry initialization failed, using noop service", "error", err) - telemetrySvc = telemetry.NewNoopService() - } - defer telemetrySvc.Close() - - // Initialize SSH CA and key manager (optional - for managed credentials) - var keyMgr sshkeys.KeyProvider - if _, err := os.Stat(cfg.SSH.CAKeyPath); err == nil { - // SSH CA key exists, initialize managed key support - caCfg := sshca.Config{ - CAKeyPath: cfg.SSH.CAKeyPath, - CAPubKeyPath: cfg.SSH.CAPubPath, - WorkDir: cfg.SSH.WorkDir, - DefaultTTL: cfg.SSH.CertTTL, - MaxTTL: cfg.SSH.MaxTTL, - DefaultPrincipals: []string{cfg.SSH.DefaultUser}, - EnforceKeyPermissions: true, - } - ca, err := sshca.NewCA(caCfg) - if err != nil { - logger.Error("failed to create SSH CA", "error", err) - os.Exit(1) - } - if err := ca.Initialize(ctx); err != nil { - logger.Error("failed to initialize SSH CA", "error", err) - os.Exit(1) - } - - keyMgrCfg := sshkeys.Config{ - KeyDir: cfg.SSH.KeyDir, - CertificateTTL: cfg.SSH.CertTTL, - RefreshMargin: 30 * time.Second, - DefaultUsername: cfg.SSH.DefaultUser, - } - keyMgr, err = sshkeys.NewKeyManager(ca, keyMgrCfg, logger) - if err != nil { - logger.Error("failed to create SSH key manager", "error", err) - os.Exit(1) - } - defer func() { - if err := keyMgr.Close(); err != nil { - logger.Error("failed to close SSH key manager", "error", err) - } - }() - logger.Info("SSH key management enabled", - "key_dir", cfg.SSH.KeyDir, - "cert_ttl", cfg.SSH.CertTTL, - ) - } else { - logger.Info("SSH CA not found, managed credentials disabled", - "ca_key_path", cfg.SSH.CAKeyPath, - ) - } - - // Initialize VM service with logger and optional key manager - vmOpts := []vm.Option{ - vm.WithLogger(logger), - vm.WithVirshConfig(lvCfg), // Pass virsh config for remote manager creation - vm.WithTelemetry(telemetrySvc), - } - if keyMgr != nil { - vmOpts = append(vmOpts, vm.WithKeyManager(keyMgr)) - } - vmSvc := vm.NewService(lvMgr, st, vm.Config{ - Network: cfg.Libvirt.Network, - DefaultVCPUs: cfg.VM.DefaultVCPUs, - DefaultMemoryMB: cfg.VM.DefaultMemoryMB, - CommandTimeout: cfg.VM.CommandTimeout, - IPDiscoveryTimeout: cfg.VM.IPDiscoveryTimeout, - SSHProxyJump: cfg.SSH.ProxyJump, - }, vmOpts...) - - // Initialize Ansible runner - ansibleRunner := ansible.NewRunner(cfg.Ansible.InventoryPath, cfg.Ansible.Image, cfg.Ansible.AllowedPlaybooks) - - // Initialize Ansible playbook service - playbookSvc := ansible.NewPlaybookService(st, cfg.Ansible.PlaybooksDir) - - // Initialize multi-host manager if hosts are configured - var multiHostMgr *libvirt.MultiHostDomainManager - if len(cfg.Hosts) > 0 { - multiHostMgr = libvirt.NewMultiHostDomainManager(cfg.Hosts, logger) - logger.Info("multi-host VM listing enabled", - "host_count", len(cfg.Hosts), - ) - } - - // REST server setup with multi-host support - - restSrv := rest.NewServerWithMultiHost(vmSvc, domainMgr, multiHostMgr, ansibleRunner, playbookSvc, telemetrySvc) - - // Start janitor for background cleanup of expired sandboxes - - if cfg.Janitor.Enabled { - - janitorSvc := janitor.New(st, vmSvc, cfg.Janitor.DefaultTTL, logger) - go janitorSvc.Start(ctx, cfg.Janitor.Interval) - logger.Info("sandbox janitor enabled", - "interval", cfg.Janitor.Interval, - "default_ttl", cfg.Janitor.DefaultTTL, - ) - } else { - logger.Info("sandbox janitor disabled") - } - - // Build http.Server so we can gracefully shutdown - // WriteTimeout must be > IPDiscoveryTimeout to allow wait_for_ip to complete - writeTimeout := cfg.VM.IPDiscoveryTimeout + 30*time.Second - if writeTimeout < cfg.API.WriteTimeout { - writeTimeout = cfg.API.WriteTimeout - } - httpSrv := &http.Server{ - Addr: cfg.API.Addr, - Handler: restSrv.Router, // use the chi router directly for graceful shutdowns - ReadHeaderTimeout: 15 * time.Second, - ReadTimeout: cfg.API.ReadTimeout, - WriteTimeout: writeTimeout, - IdleTimeout: cfg.API.IdleTimeout, - } - - // Start HTTP server - serverErrCh := make(chan error, 1) - go func() { - logger.Info("http server listening", "addr", cfg.API.Addr) - if err := httpSrv.ListenAndServe(); err != nil && err != http.ErrServerClosed { - serverErrCh <- err - } - }() - - // Wait for signal or server error - select { - case <-ctx.Done(): - logger.Info("shutdown signal received") - case err := <-serverErrCh: - logger.Error("server error", "error", err) - } - - // Attempt graceful shutdown - shutdownCtx, cancel := context.WithTimeout(context.Background(), cfg.API.ShutdownTimeout) - defer cancel() - if err := httpSrv.Shutdown(shutdownCtx); err != nil { - logger.Error("http server graceful shutdown failed", "error", err) - _ = httpSrv.Close() - } else { - logger.Info("http server shut down gracefully") - } -} - -// setupLogger configures slog with level and format. -func setupLogger(levelStr, format string) *slog.Logger { - var level slog.Level - switch strings.ToLower(levelStr) { - case "debug": - level = slog.LevelDebug - case "warn", "warning": - level = slog.LevelWarn - case "error": - level = slog.LevelError - default: - level = slog.LevelInfo - } - jsonFmt := strings.ToLower(format) == "json" - - var handler slog.Handler - if jsonFmt { - handler = slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: level}) - } else { - handler = slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: level}) - } - return slog.New(handler) -} diff --git a/fluid-remote/config.example.yaml b/fluid-remote/config.example.yaml deleted file mode 100644 index 36511df7..00000000 --- a/fluid-remote/config.example.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# fluid-remote API configuration -# Copy this file to config.yaml and modify as needed. -# Environment variables can override any setting for backward compatibility. - -api: - addr: ":8080" - read_timeout: 60s - write_timeout: 120s - idle_timeout: 120s - -hosts: - - name: kvm-01 - address: 10.0.0.11 - - name: kvm-02 - address: 10.0.0.12 - -database: - url: "postgresql://virsh_sandbox:virsh_sandbox@localhost:5432/virsh_sandbox" - max_open_conns: 16 - max_idle_conns: 8 - conn_max_lifetime: 1h - auto_migrate: true - -telemetry: - enable_anonymous_usage: true - api_key: "" - endpoint: "https://app.posthog.com" - -libvirt: - uri: "qemu:///system" - network: "default" - base_image_dir: "/var/lib/libvirt/images/base" - work_dir: "/var/lib/libvirt/images/jobs" - ssh_key_inject_method: "virt-customize" # or "cloud-init" - # socket_vmnet_wrapper: "/path/to/qemu-socket-vmnet-wrapper.sh" # macOS only - -vm: - default_vcpus: 2 - default_memory_mb: 2048 - command_timeout: 10m - ip_discovery_timeout: 2m - -ssh: - # proxy_jump: "user@jumphost:22" # optional: for VMs on isolated networks - ca_key_path: "/etc/fluid-remote/ssh_ca" - ca_pub_path: "/etc/fluid-remote/ssh_ca.pub" - key_dir: "/tmp/sandbox-keys" - cert_ttl: 5m - max_ttl: 10m - work_dir: "/tmp/sshca" - default_user: "sandbox" - -ansible: - inventory_path: "./.ansible/inventory" - playbooks_dir: "./.ansible/playbooks" - image: "ansible-sandbox" - allowed_playbooks: - - "ping.yml" - -logging: - level: "info" # debug, info, warn, error - format: "text" # text or json diff --git a/fluid-remote/docs/openapi.yaml b/fluid-remote/docs/openapi.yaml deleted file mode 100644 index 4a4ee9f2..00000000 --- a/fluid-remote/docs/openapi.yaml +++ /dev/null @@ -1,3170 +0,0 @@ -openapi: 3.0.1 -info: - contact: {} - description: API for managing AI Agent VM sandboxes using libvirt - title: fluid-remote API - version: 0.1.0 -servers: - - url: / -tags: - - description: - "Sandbox lifecycle management - create, start, run commands, snapshot,\ - \ and destroy sandboxes" - name: Sandbox - - description: Virtual machine listing and information - name: VMs - - description: Ansible playbook job management - name: Ansible - - description: Health check endpoints - name: Health -paths: - /v1/access/ca-pubkey: - get: - description: Returns the CA public key that should be trusted by VMs - operationId: getCAPublicKey - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.caPublicKeyResponse" - description: OK - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Internal Server Error - summary: Get the SSH CA public key - tags: - - Access - /v1/access/certificate/{certID}: - delete: - description: "Immediately revokes a certificate, terminating any active sessions" - operationId: revokeCertificate - parameters: - - description: Certificate ID - in: path - name: certID - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.revokeCertificateRequest" - description: Revocation reason - required: false - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.revokeCertificateResponse" - description: OK - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Bad Request - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Not Found - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Internal Server Error - summary: Revoke a certificate - tags: - - Access - x-codegen-request-body-name: request - get: - description: Returns details about an issued certificate - operationId: getCertificate - parameters: - - description: Certificate ID - in: path - name: certID - required: true - schema: - type: string - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.certificateResponse" - description: OK - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Not Found - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Internal Server Error - summary: Get certificate details - tags: - - Access - /v1/access/certificates: - get: - description: Lists issued certificates with optional filtering - operationId: listCertificates - parameters: - - description: Filter by sandbox ID - in: query - name: sandbox_id - schema: - type: string - - description: Filter by user ID - in: query - name: user_id - schema: - type: string - - description: "Filter by status (ACTIVE, EXPIRED, REVOKED)" - in: query - name: status - schema: - type: string - - description: "Only show active, non-expired certificates" - in: query - name: active_only - schema: - type: boolean - - description: Maximum results to return - in: query - name: limit - schema: - type: integer - - description: Offset for pagination - in: query - name: offset - schema: - type: integer - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.listCertificatesResponse" - description: OK - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Internal Server Error - summary: List certificates - tags: - - Access - /v1/access/request: - post: - description: - Issues a short-lived SSH certificate for accessing a sandbox via - tmux - operationId: requestAccess - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.requestAccessRequest" - description: Access request - required: true - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.requestAccessResponse" - description: OK - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Bad Request - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Not Found - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Internal Server Error - summary: Request SSH access to a sandbox - tags: - - Access - x-codegen-request-body-name: request - /v1/access/session/end: - post: - description: Records the end of an SSH session - operationId: recordSessionEnd - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.sessionEndRequest" - description: Session end request - required: true - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.sessionEndResponse" - description: OK - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Bad Request - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Internal Server Error - summary: Record session end - tags: - - Access - x-codegen-request-body-name: request - /v1/access/session/start: - post: - description: Records the start of an SSH session (called by VM or auth service) - operationId: recordSessionStart - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.sessionStartRequest" - description: Session start request - required: true - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.sessionStartResponse" - description: OK - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Bad Request - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Internal Server Error - summary: Record session start - tags: - - Access - x-codegen-request-body-name: request - /v1/access/sessions: - get: - description: Lists access sessions with optional filtering - operationId: listSessions - parameters: - - description: Filter by sandbox ID - in: query - name: sandbox_id - schema: - type: string - - description: Filter by certificate ID - in: query - name: certificate_id - schema: - type: string - - description: Filter by user ID - in: query - name: user_id - schema: - type: string - - description: Only show active sessions - in: query - name: active_only - schema: - type: boolean - - description: Maximum results to return - in: query - name: limit - schema: - type: integer - - description: Offset for pagination - in: query - name: offset - schema: - type: integer - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.listSessionsResponse" - description: OK - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/internal_rest.accessErrorResponse" - description: Internal Server Error - summary: List sessions - tags: - - Access - /v1/ansible/jobs: - post: - description: Creates a new Ansible playbook execution job - operationId: createAnsibleJob - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.JobRequest" - description: Job creation parameters - required: true - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.JobResponse" - description: OK - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Bad Request - summary: Create Ansible job - tags: - - Ansible - x-codegen-request-body-name: request - /v1/ansible/jobs/{job_id}: - get: - description: Gets the status of an Ansible job - operationId: getAnsibleJob - parameters: - - description: Job ID - in: path - name: job_id - required: true - schema: - type: string - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.Job" - description: OK - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Not Found - summary: Get Ansible job - tags: - - Ansible - /v1/ansible/jobs/{job_id}/stream: - get: - description: Connects via WebSocket to run an Ansible job and stream output - operationId: streamAnsibleJobOutput - parameters: - - description: Job ID - in: path - name: job_id - required: true - schema: - type: string - responses: - "101": - content: - "*/*": - schema: - type: string - description: Switching Protocols - WebSocket connection established - "404": - content: - "*/*": - schema: - type: string - description: Invalid job ID - "409": - content: - "*/*": - schema: - type: string - description: Job already started or finished - summary: Stream Ansible job output - tags: - - Ansible - /v1/ansible/playbooks: - get: - description: Lists all Ansible playbooks - operationId: listPlaybooks - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.listPlaybooksResponse" - description: OK - summary: List playbooks - tags: - - Ansible Playbooks - post: - description: Creates a new Ansible playbook - operationId: createPlaybook - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.createPlaybookRequest" - description: Playbook creation parameters - required: true - responses: - "201": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.createPlaybookResponse" - description: Created - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Bad Request - "409": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Conflict - summary: Create playbook - tags: - - Ansible Playbooks - x-codegen-request-body-name: request - /v1/ansible/playbooks/{playbook_name}: - delete: - description: Deletes a playbook and all its tasks - operationId: deletePlaybook - parameters: - - description: Playbook name - in: path - name: playbook_name - required: true - schema: - type: string - responses: - "204": - content: {} - description: No Content - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Not Found - summary: Delete playbook - tags: - - Ansible Playbooks - get: - description: Gets a playbook and its tasks by name - operationId: getPlaybook - parameters: - - description: Playbook name - in: path - name: playbook_name - required: true - schema: - type: string - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.getPlaybookResponse" - description: OK - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Not Found - summary: Get playbook - tags: - - Ansible Playbooks - /v1/ansible/playbooks/{playbook_name}/export: - get: - description: Exports a playbook as raw YAML - operationId: exportPlaybook - parameters: - - description: Playbook name - in: path - name: playbook_name - required: true - schema: - type: string - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.exportPlaybookResponse" - description: OK - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Not Found - summary: Export playbook - tags: - - Ansible Playbooks - /v1/ansible/playbooks/{playbook_name}/tasks: - post: - description: Adds a new task to an existing playbook - operationId: addPlaybookTask - parameters: - - description: Playbook name - in: path - name: playbook_name - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.addTaskRequest" - description: Task parameters - required: true - responses: - "201": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.addTaskResponse" - description: Created - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Bad Request - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Not Found - summary: Add task to playbook - tags: - - Ansible Playbooks - x-codegen-request-body-name: request - /v1/ansible/playbooks/{playbook_name}/tasks/{task_id}: - delete: - description: Removes a task from a playbook - operationId: deletePlaybookTask - parameters: - - description: Playbook name - in: path - name: playbook_name - required: true - schema: - type: string - - description: Task ID - in: path - name: task_id - required: true - schema: - type: string - responses: - "204": - content: {} - description: No Content - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Not Found - summary: Delete task - tags: - - Ansible Playbooks - put: - description: Updates an existing task in a playbook - operationId: updatePlaybookTask - parameters: - - description: Playbook name - in: path - name: playbook_name - required: true - schema: - type: string - - description: Task ID - in: path - name: task_id - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.updateTaskRequest" - description: Task update parameters - required: true - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.updateTaskResponse" - description: OK - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Bad Request - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Not Found - summary: Update task - tags: - - Ansible Playbooks - x-codegen-request-body-name: request - /v1/ansible/playbooks/{playbook_name}/tasks/reorder: - patch: - description: Reorders tasks in a playbook - operationId: reorderPlaybookTasks - parameters: - - description: Playbook name - in: path - name: playbook_name - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.reorderTasksRequest" - description: New task order - required: true - responses: - "204": - content: {} - description: No Content - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Bad Request - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse" - description: Not Found - summary: Reorder tasks - tags: - - Ansible Playbooks - x-codegen-request-body-name: request - /v1/health: - get: - description: Returns service health status - operationId: getHealth - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.healthResponse" - description: OK - summary: Health check - tags: - - Health - /v1/sandboxes: - get: - description: - "Lists all sandboxes with optional filtering by agent_id, job_id,\ - \ base_image, state, or vm_name" - operationId: listSandboxes - parameters: - - description: Filter by agent ID - in: query - name: agent_id - schema: - type: string - - description: Filter by job ID - in: query - name: job_id - schema: - type: string - - description: Filter by base image - in: query - name: base_image - schema: - type: string - - description: - "Filter by state (CREATED, STARTING, RUNNING, STOPPED, DESTROYED,\ - \ ERROR)" - in: query - name: state - schema: - type: string - - description: Filter by VM name - in: query - name: vm_name - schema: - type: string - - description: Max results to return - in: query - name: limit - schema: - type: integer - - description: Number of results to skip - in: query - name: offset - schema: - type: integer - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.listSandboxesResponse" - description: OK - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Internal Server Error - summary: List sandboxes - tags: - - Sandbox - post: - description: - "Creates a new virtual machine sandbox by cloning from an existing\ - \ VM. When multi-host is configured, automatically routes to the host containing\ - \ the source VM." - operationId: createSandbox - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.createSandboxRequest" - description: Sandbox creation parameters - required: true - responses: - "201": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.createSandboxResponse" - description: Created - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Bad Request - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Internal Server Error - summary: Create a new sandbox - tags: - - Sandbox - x-codegen-request-body-name: request - /v1/sandboxes/{id}: - delete: - description: Destroys the sandbox and cleans up resources - operationId: destroySandbox - parameters: - - description: Sandbox ID - in: path - name: id - required: true - schema: - type: string - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.destroySandboxResponse" - description: OK - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Bad Request - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Not Found - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Internal Server Error - summary: Destroy sandbox - tags: - - Sandbox - get: - description: - Returns detailed information about a specific sandbox including - recent commands - operationId: getSandbox - parameters: - - description: Sandbox ID - in: path - name: id - required: true - schema: - type: string - - description: Include command history - in: query - name: include_commands - schema: - type: boolean - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.getSandboxResponse" - description: OK - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Bad Request - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Not Found - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Internal Server Error - summary: Get sandbox details - tags: - - Sandbox - /v1/sandboxes/{id}/commands: - get: - description: Returns all commands executed in the sandbox - operationId: listSandboxCommands - parameters: - - description: Sandbox ID - in: path - name: id - required: true - schema: - type: string - - description: Max results to return - in: query - name: limit - schema: - type: integer - - description: Number of results to skip - in: query - name: offset - schema: - type: integer - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.listSandboxCommandsResponse" - description: OK - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Bad Request - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Not Found - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Internal Server Error - summary: List sandbox commands - tags: - - Sandbox - /v1/sandboxes/{id}/diff: - post: - description: Computes differences between two snapshots - operationId: diffSnapshots - parameters: - - description: Sandbox ID - in: path - name: id - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.diffRequest" - description: Diff parameters - required: true - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.diffResponse" - description: OK - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Bad Request - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Internal Server Error - summary: Diff snapshots - tags: - - Sandbox - x-codegen-request-body-name: request - /v1/sandboxes/{id}/generate/{tool}: - post: - description: Generates Ansible or Puppet configuration from sandbox changes - operationId: generateConfiguration - parameters: - - description: Sandbox ID - in: path - name: id - required: true - schema: - type: string - - description: Tool type (ansible or puppet) - in: path - name: tool - required: true - schema: - type: string - responses: - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Bad Request - "501": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.generateResponse" - description: Not Implemented - summary: Generate configuration - tags: - - Sandbox - /v1/sandboxes/{id}/ip: - get: - description: - Discovers and returns the IP address for a running sandbox. Use - this for async workflows where wait_for_ip was false during start. - operationId: discoverSandboxIP - parameters: - - description: Sandbox ID - in: path - name: id - required: true - schema: - type: string - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.discoverIPResponse" - description: OK - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Bad Request - "404": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Not Found - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Internal Server Error - summary: Discover sandbox IP - tags: - - Sandbox - /v1/sandboxes/{id}/publish: - post: - description: Publishes sandbox changes to GitOps repository - operationId: publishChanges - parameters: - - description: Sandbox ID - in: path - name: id - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.publishRequest" - description: Publish parameters - required: true - responses: - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Bad Request - "501": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.publishResponse" - description: Not Implemented - summary: Publish changes - tags: - - Sandbox - x-codegen-request-body-name: request - /v1/sandboxes/{id}/run: - post: - description: - "Executes a command inside the sandbox via SSH. If private_key_path\ - \ is omitted and SSH CA is configured, managed credentials will be used automatically." - operationId: runSandboxCommand - parameters: - - description: Sandbox ID - in: path - name: id - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.runCommandRequest" - description: Command execution parameters - required: true - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.runCommandResponse" - description: OK - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Bad Request - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Internal Server Error - summary: Run command in sandbox - tags: - - Sandbox - x-codegen-request-body-name: request - /v1/sandboxes/{id}/snapshot: - post: - description: Creates a snapshot of the sandbox - operationId: createSnapshot - parameters: - - description: Sandbox ID - in: path - name: id - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.snapshotRequest" - description: Snapshot parameters - required: true - responses: - "201": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.snapshotResponse" - description: Created - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Bad Request - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Internal Server Error - summary: Create snapshot - tags: - - Sandbox - x-codegen-request-body-name: request - /v1/sandboxes/{id}/sshkey: - post: - description: Injects a public SSH key for a user in the sandbox - operationId: injectSshKey - parameters: - - description: Sandbox ID - in: path - name: id - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.injectSSHKeyRequest" - description: SSH key injection parameters - required: true - responses: - "204": - content: {} - description: No Content - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Bad Request - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Internal Server Error - summary: Inject SSH key into sandbox - tags: - - Sandbox - x-codegen-request-body-name: request - /v1/sandboxes/{id}/start: - post: - description: Starts the virtual machine sandbox - operationId: startSandbox - parameters: - - description: Sandbox ID - in: path - name: id - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.startSandboxRequest" - description: Start parameters - required: false - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.startSandboxResponse" - description: OK - "400": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Bad Request - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Internal Server Error - summary: Start sandbox - tags: - - Sandbox - x-codegen-request-body-name: request - /v1/sandboxes/{id}/stream: - get: - description: - "Connects via WebSocket to stream realtime sandbox activity (commands,\ - \ file changes)" - operationId: streamSandboxActivity - parameters: - - description: Sandbox ID - in: path - name: id - required: true - schema: - type: string - responses: - "101": - content: - "*/*": - schema: - type: string - description: Switching Protocols - WebSocket connection established - "400": - content: - "*/*": - schema: - type: string - description: Invalid sandbox ID - "404": - content: - "*/*": - schema: - type: string - description: Sandbox not found - summary: Stream sandbox activity - tags: - - Sandbox - /v1/vms: - get: - description: - "Returns a list of host virtual machines from libvirt (excludes\ - \ sandboxes). When multi-host is configured, aggregates VMs from all hosts." - operationId: listVirtualMachines - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.listVMsResponse" - description: OK - "500": - content: - application/json: - schema: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse" - description: Internal Server Error - summary: List all host VMs - tags: - - VMs -components: - schemas: - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.Job: - example: - check: true - id: id - vm_name: vm_name - playbook: playbook - status: pending - properties: - check: - type: boolean - id: - type: string - playbook: - type: string - status: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.JobStatus" - vm_name: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.JobRequest: - properties: - check: - type: boolean - playbook: - type: string - vm_name: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.JobResponse: - example: - job_id: job_id - ws_url: ws_url - properties: - job_id: - type: string - ws_url: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.JobStatus: - enum: - - pending - - running - - finished - - failed - type: string - x-enum-varnames: - - JobStatusPending - - JobStatusRunning - - JobStatusFinished - - JobStatusFailed - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.addTaskRequest: - properties: - module: - type: string - name: - type: string - params: - properties: {} - type: object - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.addTaskResponse: - example: - task: - module: module - name: name - created_at: created_at - id: id - position: 0 - params: "{}" - playbook_id: playbook_id - properties: - task: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.PlaybookTask" - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.createPlaybookRequest: - properties: - become: - type: boolean - hosts: - type: string - name: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.createPlaybookResponse: - example: - playbook: - file_path: file_path - updated_at: updated_at - hosts: hosts - name: name - created_at: created_at - id: id - become: true - properties: - playbook: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Playbook" - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.exportPlaybookResponse: - example: - yaml: yaml - properties: - yaml: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.getPlaybookResponse: - example: - tasks: - - module: module - name: name - created_at: created_at - id: id - position: 0 - params: "{}" - playbook_id: playbook_id - - module: module - name: name - created_at: created_at - id: id - position: 0 - params: "{}" - playbook_id: playbook_id - playbook: - file_path: file_path - updated_at: updated_at - hosts: hosts - name: name - created_at: created_at - id: id - become: true - properties: - playbook: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Playbook" - tasks: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.PlaybookTask" - type: array - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.listPlaybooksResponse: - example: - total: 0 - playbooks: - - file_path: file_path - updated_at: updated_at - hosts: hosts - name: name - created_at: created_at - id: id - become: true - - file_path: file_path - updated_at: updated_at - hosts: hosts - name: name - created_at: created_at - id: id - become: true - properties: - playbooks: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Playbook" - type: array - total: - type: integer - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.reorderTasksRequest: - properties: - task_ids: - items: - type: string - type: array - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.updateTaskRequest: - properties: - module: - type: string - name: - type: string - params: - properties: {} - type: object - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_ansible.updateTaskResponse: - example: - task: - module: module - name: name - created_at: created_at - id: id - position: 0 - params: "{}" - playbook_id: playbook_id - properties: - task: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.PlaybookTask" - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_error.ErrorResponse: - example: - code: 0 - details: details - error: error - properties: - code: - type: integer - details: - type: string - error: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.ErrorResponse: - example: - code: 1 - details: details - error: error - properties: - code: - type: integer - details: - type: string - error: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.accessErrorResponse: - properties: - code: - type: integer - details: - type: string - error: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.caPublicKeyResponse: - properties: - public_key: - description: PublicKey is the CA public key in OpenSSH format. - type: string - usage: - description: Usage explains how to use this key. - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.certificateResponse: - properties: - id: - type: string - identity: - type: string - is_expired: - type: boolean - issued_at: - type: string - principals: - items: - type: string - type: array - sandbox_id: - type: string - serial_number: - type: integer - status: - type: string - ttl_seconds: - type: integer - user_id: - type: string - valid_after: - type: string - valid_before: - type: string - vm_id: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.createSandboxRequest: - properties: - agent_id: - description: required - type: string - auto_start: - description: "optional; if true, start the VM immediately after creation" - type: boolean - cpu: - description: optional; default from service config if <=0 - type: integer - memory_mb: - description: optional; default from service config if <=0 - type: integer - source_vm_name: - description: required; name of existing VM in libvirt to clone from - type: string - ttl_seconds: - description: optional; TTL for auto garbage collection - type: integer - wait_for_ip: - description: "optional; if true and auto_start, wait for IP discovery" - type: boolean - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.createSandboxResponse: - example: - sandbox: - host_address: host_address - agent_id: agent_id - ttl_seconds: 0 - created_at: created_at - ip_address: ip_address - deleted_at: deleted_at - network: network - base_image: base_image - updated_at: updated_at - job_id: job_id - sandbox_name: sandbox_name - id: id - state: CREATED - host_name: host_name - ip_address: ip_address - properties: - ip_address: - description: populated when auto_start and wait_for_ip are true - type: string - sandbox: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Sandbox" - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.destroySandboxResponse: - example: - base_image: base_image - ttl_seconds: 0 - sandbox_name: sandbox_name - state: CREATED - properties: - base_image: - type: string - sandbox_name: - type: string - state: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.SandboxState" - ttl_seconds: - type: integer - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.diffRequest: - properties: - from_snapshot: - description: required - type: string - to_snapshot: - description: required - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.diffResponse: - example: - diff: - to_snapshot: to_snapshot - from_snapshot: from_snapshot - sandbox_id: sandbox_id - created_at: created_at - id: id - diff_json: "{}" - properties: - diff: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Diff" - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.discoverIPResponse: - example: - ip_address: ip_address - properties: - ip_address: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.generateResponse: - example: - note: note - message: message - properties: - message: - type: string - note: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.getSandboxResponse: - example: - sandbox: - host_address: host_address - agent_id: agent_id - ttl_seconds: 0 - created_at: created_at - ip_address: ip_address - deleted_at: deleted_at - network: network - base_image: base_image - updated_at: updated_at - job_id: job_id - sandbox_name: sandbox_name - id: id - state: CREATED - host_name: host_name - commands: - - metadata: - redacted: - key: redacted - work_dir: work_dir - user: user - timeout: 6 - stdout: stdout - env_json: env_json - exit_code: 0 - sandbox_id: sandbox_id - started_at: started_at - id: id - stderr: stderr - command: command - ended_at: ended_at - - metadata: - redacted: - key: redacted - work_dir: work_dir - user: user - timeout: 6 - stdout: stdout - env_json: env_json - exit_code: 0 - sandbox_id: sandbox_id - started_at: started_at - id: id - stderr: stderr - command: command - ended_at: ended_at - properties: - commands: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Command" - type: array - sandbox: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Sandbox" - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.healthResponse: - example: - status: status - properties: - status: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.hostError: - example: - host_address: host_address - error: error - host_name: host_name - properties: - error: - type: string - host_address: - type: string - host_name: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.injectSSHKeyRequest: - properties: - public_key: - description: required - type: string - username: - description: 'required (explicit); typical: "ubuntu" or "centos"' - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.listCertificatesResponse: - properties: - certificates: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.certificateResponse" - type: array - total: - type: integer - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.listSandboxCommandsResponse: - example: - total: 0 - commands: - - metadata: - redacted: - key: redacted - work_dir: work_dir - user: user - timeout: 6 - stdout: stdout - env_json: env_json - exit_code: 0 - sandbox_id: sandbox_id - started_at: started_at - id: id - stderr: stderr - command: command - ended_at: ended_at - - metadata: - redacted: - key: redacted - work_dir: work_dir - user: user - timeout: 6 - stdout: stdout - env_json: env_json - exit_code: 0 - sandbox_id: sandbox_id - started_at: started_at - id: id - stderr: stderr - command: command - ended_at: ended_at - properties: - commands: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Command" - type: array - total: - type: integer - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.listSandboxesResponse: - example: - total: 6 - sandboxes: - - base_image: base_image - agent_id: agent_id - updated_at: updated_at - job_id: job_id - ttl_seconds: 0 - created_at: created_at - sandbox_name: sandbox_name - id: id - ip_address: ip_address - state: state - network: network - - base_image: base_image - agent_id: agent_id - updated_at: updated_at - job_id: job_id - ttl_seconds: 0 - created_at: created_at - sandbox_name: sandbox_name - id: id - ip_address: ip_address - state: state - network: network - properties: - sandboxes: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.sandboxInfo" - type: array - total: - type: integer - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.listSessionsResponse: - properties: - sessions: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.sessionResponse" - type: array - total: - type: integer - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.listVMsResponse: - example: - vms: - - host_address: host_address - name: name - state: state - persistent: true - uuid: uuid - disk_path: disk_path - host_name: host_name - - host_address: host_address - name: name - state: state - persistent: true - uuid: uuid - disk_path: disk_path - host_name: host_name - host_errors: - - host_address: host_address - error: error - host_name: host_name - - host_address: host_address - error: error - host_name: host_name - properties: - host_errors: - description: Errors from unreachable hosts (multi-host mode) - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.hostError" - type: array - vms: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.vmInfo" - type: array - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.publishRequest: - properties: - job_id: - description: required - type: string - message: - description: optional commit/PR message - type: string - reviewers: - description: optional - items: - type: string - type: array - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.publishResponse: - example: - note: note - message: message - properties: - message: - type: string - note: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.requestAccessRequest: - properties: - public_key: - description: PublicKey is the user's SSH public key in OpenSSH format. - type: string - sandbox_id: - description: SandboxID is the target sandbox. - type: string - ttl_minutes: - description: TTLMinutes is the requested access duration (1-10 minutes). - type: integer - user_id: - description: UserID identifies the requesting user. - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.requestAccessResponse: - properties: - certificate: - description: Certificate is the SSH certificate content (save as key-cert.pub). - type: string - certificate_id: - description: CertificateID is the ID of the issued certificate. - type: string - connect_command: - description: ConnectCommand is an example SSH command for connecting. - type: string - instructions: - description: Instructions provides usage instructions. - type: string - ssh_port: - description: SSHPort is the SSH port (usually 22). - type: integer - ttl_seconds: - description: TTLSeconds is the remaining validity in seconds. - type: integer - username: - description: Username is the SSH username to use. - type: string - valid_until: - description: ValidUntil is when the certificate expires (RFC3339). - type: string - vm_ip_address: - description: VMIPAddress is the IP address of the sandbox VM. - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.revokeCertificateRequest: - properties: - reason: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.revokeCertificateResponse: - properties: - id: - type: string - message: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.runCommandRequest: - properties: - command: - description: required - type: string - env: - additionalProperties: - type: string - description: optional - type: object - private_key_path: - description: - "optional; if empty, uses managed credentials (requires SSH\ - \ CA)" - type: string - timeout_sec: - description: optional; default from service config - type: integer - user: - description: optional; defaults to "sandbox" when using managed credentials - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.runCommandResponse: - example: - command: - metadata: - redacted: - key: redacted - work_dir: work_dir - user: user - timeout: 6 - stdout: stdout - env_json: env_json - exit_code: 0 - sandbox_id: sandbox_id - started_at: started_at - id: id - stderr: stderr - command: command - ended_at: ended_at - properties: - command: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Command" - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.sandboxInfo: - example: - base_image: base_image - agent_id: agent_id - updated_at: updated_at - job_id: job_id - ttl_seconds: 0 - created_at: created_at - sandbox_name: sandbox_name - id: id - ip_address: ip_address - state: state - network: network - properties: - agent_id: - type: string - base_image: - type: string - created_at: - type: string - id: - type: string - ip_address: - type: string - job_id: - type: string - network: - type: string - sandbox_name: - type: string - state: - type: string - ttl_seconds: - type: integer - updated_at: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.sessionEndRequest: - properties: - reason: - type: string - session_id: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.sessionEndResponse: - properties: - message: - type: string - session_id: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.sessionResponse: - properties: - certificate_id: - type: string - duration_seconds: - type: integer - ended_at: - type: string - id: - type: string - sandbox_id: - type: string - source_ip: - type: string - started_at: - type: string - status: - type: string - user_id: - type: string - vm_id: - type: string - vm_ip_address: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.sessionStartRequest: - properties: - certificate_id: - type: string - source_ip: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.sessionStartResponse: - properties: - session_id: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.snapshotRequest: - properties: - external: - description: optional; default false (internal snapshot) - type: boolean - name: - description: required - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.snapshotResponse: - example: - snapshot: - ref: ref - kind: INTERNAL - name: name - sandbox_id: sandbox_id - created_at: created_at - id: id - meta_json: meta_json - properties: - snapshot: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Snapshot" - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.startSandboxRequest: - properties: - wait_for_ip: - description: optional; default false - type: boolean - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.startSandboxResponse: - example: - ip_address: ip_address - properties: - ip_address: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_rest.vmInfo: - example: - host_address: host_address - name: name - state: state - persistent: true - uuid: uuid - disk_path: disk_path - host_name: host_name - properties: - disk_path: - type: string - host_address: - description: Host IP/hostname (multi-host mode) - type: string - host_name: - description: Host display name (multi-host mode) - type: string - name: - type: string - persistent: - type: boolean - state: - type: string - uuid: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_store.ChangeDiff: - properties: - commands_run: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.CommandSummary" - type: array - files_added: - items: - type: string - type: array - files_modified: - items: - type: string - type: array - files_removed: - items: - type: string - type: array - packages_added: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.PackageInfo" - type: array - packages_removed: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.PackageInfo" - type: array - services_changed: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.ServiceChange" - type: array - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Command: - example: - metadata: - redacted: - key: redacted - work_dir: work_dir - user: user - timeout: 6 - stdout: stdout - env_json: env_json - exit_code: 0 - sandbox_id: sandbox_id - started_at: started_at - id: id - stderr: stderr - command: command - ended_at: ended_at - properties: - command: - type: string - ended_at: - type: string - env_json: - description: JSON-encoded env map - type: string - exit_code: - type: integer - id: - type: string - metadata: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.CommandExecRecord" - sandbox_id: - type: string - started_at: - type: string - stderr: - type: string - stdout: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_store.CommandExecRecord: - example: - redacted: - key: redacted - work_dir: work_dir - user: user - timeout: 6 - properties: - redacted: - additionalProperties: - type: string - description: placeholders for secrets redaction - type: object - timeout: - $ref: "#/components/schemas/time.Duration" - user: - type: string - work_dir: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_store.CommandSummary: - properties: - at: - type: string - cmd: - type: string - exit_code: - type: integer - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Diff: - example: - to_snapshot: to_snapshot - from_snapshot: from_snapshot - sandbox_id: sandbox_id - created_at: created_at - id: id - diff_json: "{}" - properties: - created_at: - type: string - diff_json: - allOf: - - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.ChangeDiff" - description: JSON-encoded change diff - type: object - from_snapshot: - type: string - id: - type: string - sandbox_id: - type: string - to_snapshot: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_store.PackageInfo: - properties: - name: - type: string - version: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Playbook: - example: - file_path: file_path - updated_at: updated_at - hosts: hosts - name: name - created_at: created_at - id: id - become: true - properties: - become: - description: whether to use privilege escalation - type: boolean - created_at: - type: string - file_path: - description: rendered YAML file path - type: string - hosts: - description: 'target hosts pattern (e.g., "all", "webservers")' - type: string - id: - type: string - name: - description: unique playbook name - type: string - updated_at: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_store.PlaybookTask: - example: - module: module - name: name - created_at: created_at - id: id - position: 0 - params: "{}" - playbook_id: playbook_id - properties: - created_at: - type: string - id: - type: string - module: - description: "ansible module (apt, shell, copy, etc.)" - type: string - name: - description: task name/description - type: string - params: - description: module-specific parameters - properties: {} - type: object - playbook_id: - type: string - position: - description: ordering within playbook - type: integer - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Sandbox: - example: - host_address: host_address - agent_id: agent_id - ttl_seconds: 0 - created_at: created_at - ip_address: ip_address - deleted_at: deleted_at - network: network - base_image: base_image - updated_at: updated_at - job_id: job_id - sandbox_name: sandbox_name - id: id - state: CREATED - host_name: host_name - properties: - agent_id: - description: requesting agent identity - type: string - base_image: - description: base qcow2 filename - type: string - created_at: - description: Metadata - type: string - deleted_at: - type: string - host_address: - description: IP or hostname of the libvirt host - type: string - host_name: - description: - "Multi-host support: identifies which libvirt host this sandbox\ - \ runs on" - type: string - id: - description: 'e.g., "SBX-0001"' - type: string - ip_address: - description: discovered IP (if any) - type: string - job_id: - description: correlation id for the end-to-end change set - type: string - network: - description: libvirt network name - type: string - sandbox_name: - description: libvirt domain name - type: string - state: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.SandboxState" - ttl_seconds: - description: optional TTL for auto GC - type: integer - updated_at: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_store.SandboxState: - enum: - - CREATED - - STARTING - - RUNNING - - STOPPED - - DESTROYED - - ERROR - type: string - x-enum-varnames: - - SandboxStateCreated - - SandboxStateStarting - - SandboxStateRunning - - SandboxStateStopped - - SandboxStateDestroyed - - SandboxStateError - github_com_aspectrr_fluid_sh_fluid-remote_internal_store.ServiceChange: - properties: - enabled: - type: boolean - name: - type: string - state: - description: started|stopped|restarted|reloaded - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Snapshot: - example: - ref: ref - kind: INTERNAL - name: name - sandbox_id: sandbox_id - created_at: created_at - id: id - meta_json: meta_json - properties: - created_at: - type: string - id: - type: string - kind: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.SnapshotKind" - meta_json: - description: optional JSON metadata - type: string - name: - description: logical name (unique per sandbox) - type: string - ref: - description: |- - Ref is a backend-specific reference: for internal snapshots this could be a UUID or name, - for external snapshots it could be a file path to the overlay qcow2. - type: string - sandbox_id: - type: string - type: object - github_com_aspectrr_fluid_sh_fluid-remote_internal_store.SnapshotKind: - enum: - - INTERNAL - - EXTERNAL - type: string - x-enum-varnames: - - SnapshotKindInternal - - SnapshotKindExternal - internal_ansible.Job: - properties: - check: - type: boolean - id: - type: string - playbook: - type: string - status: - $ref: "#/components/schemas/internal_ansible.JobStatus" - vm_name: - type: string - type: object - internal_ansible.JobRequest: - properties: - check: - type: boolean - playbook: - type: string - vm_name: - type: string - type: object - internal_ansible.JobResponse: - properties: - job_id: - type: string - ws_url: - type: string - type: object - internal_ansible.JobStatus: - enum: - - pending - - running - - finished - - failed - type: string - x-enum-varnames: - - JobStatusPending - - JobStatusRunning - - JobStatusFinished - - JobStatusFailed - internal_ansible.addTaskRequest: - properties: - module: - type: string - name: - type: string - params: - properties: {} - type: object - type: object - internal_ansible.addTaskResponse: - properties: - task: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.PlaybookTask" - type: object - internal_ansible.createPlaybookRequest: - properties: - become: - type: boolean - hosts: - type: string - name: - type: string - type: object - internal_ansible.createPlaybookResponse: - properties: - playbook: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Playbook" - type: object - internal_ansible.exportPlaybookResponse: - properties: - yaml: - type: string - type: object - internal_ansible.getPlaybookResponse: - properties: - playbook: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Playbook" - tasks: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.PlaybookTask" - type: array - type: object - internal_ansible.listPlaybooksResponse: - properties: - playbooks: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Playbook" - type: array - total: - type: integer - type: object - internal_ansible.reorderTasksRequest: - properties: - task_ids: - items: - type: string - type: array - type: object - internal_ansible.updateTaskRequest: - properties: - module: - type: string - name: - type: string - params: - properties: {} - type: object - type: object - internal_ansible.updateTaskResponse: - properties: - task: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.PlaybookTask" - type: object - internal_rest.ErrorResponse: - properties: - code: - type: integer - details: - type: string - error: - type: string - type: object - internal_rest.accessErrorResponse: - example: - code: 0 - details: details - error: error - properties: - code: - type: integer - details: - type: string - error: - type: string - type: object - internal_rest.caPublicKeyResponse: - example: - public_key: public_key - usage: usage - properties: - public_key: - description: PublicKey is the CA public key in OpenSSH format. - type: string - usage: - description: Usage explains how to use this key. - type: string - type: object - internal_rest.certificateResponse: - example: - ttl_seconds: 6 - principals: - - principals - - principals - serial_number: 0 - issued_at: issued_at - valid_after: valid_after - vm_id: vm_id - is_expired: true - valid_before: valid_before - user_id: user_id - identity: identity - sandbox_id: sandbox_id - id: id - status: status - properties: - id: - type: string - identity: - type: string - is_expired: - type: boolean - issued_at: - type: string - principals: - items: - type: string - type: array - sandbox_id: - type: string - serial_number: - type: integer - status: - type: string - ttl_seconds: - type: integer - user_id: - type: string - valid_after: - type: string - valid_before: - type: string - vm_id: - type: string - type: object - internal_rest.createSandboxRequest: - properties: - agent_id: - description: required - type: string - auto_start: - description: "optional; if true, start the VM immediately after creation" - type: boolean - cpu: - description: optional; default from service config if <=0 - type: integer - memory_mb: - description: optional; default from service config if <=0 - type: integer - source_vm_name: - description: required; name of existing VM in libvirt to clone from - type: string - ttl_seconds: - description: optional; TTL for auto garbage collection - type: integer - wait_for_ip: - description: "optional; if true and auto_start, wait for IP discovery" - type: boolean - type: object - internal_rest.createSandboxResponse: - properties: - ip_address: - description: populated when auto_start and wait_for_ip are true - type: string - sandbox: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Sandbox" - type: object - internal_rest.destroySandboxResponse: - properties: - base_image: - type: string - sandbox_name: - type: string - state: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.SandboxState" - ttl_seconds: - type: integer - type: object - internal_rest.diffRequest: - properties: - from_snapshot: - description: required - type: string - to_snapshot: - description: required - type: string - type: object - internal_rest.diffResponse: - properties: - diff: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Diff" - type: object - internal_rest.discoverIPResponse: - properties: - ip_address: - type: string - type: object - internal_rest.generateResponse: - properties: - message: - type: string - note: - type: string - type: object - internal_rest.getSandboxResponse: - properties: - commands: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Command" - type: array - sandbox: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Sandbox" - type: object - internal_rest.healthResponse: - properties: - status: - type: string - type: object - internal_rest.hostError: - properties: - error: - type: string - host_address: - type: string - host_name: - type: string - type: object - internal_rest.injectSSHKeyRequest: - properties: - public_key: - description: required - type: string - username: - description: 'required (explicit); typical: "ubuntu" or "centos"' - type: string - type: object - internal_rest.listCertificatesResponse: - example: - total: 0 - certificates: - - ttl_seconds: 6 - principals: - - principals - - principals - serial_number: 0 - issued_at: issued_at - valid_after: valid_after - vm_id: vm_id - is_expired: true - valid_before: valid_before - user_id: user_id - identity: identity - sandbox_id: sandbox_id - id: id - status: status - - ttl_seconds: 6 - principals: - - principals - - principals - serial_number: 0 - issued_at: issued_at - valid_after: valid_after - vm_id: vm_id - is_expired: true - valid_before: valid_before - user_id: user_id - identity: identity - sandbox_id: sandbox_id - id: id - status: status - properties: - certificates: - items: - $ref: "#/components/schemas/internal_rest.certificateResponse" - type: array - total: - type: integer - type: object - internal_rest.listSandboxCommandsResponse: - properties: - commands: - items: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Command" - type: array - total: - type: integer - type: object - internal_rest.listSandboxesResponse: - properties: - sandboxes: - items: - $ref: "#/components/schemas/internal_rest.sandboxInfo" - type: array - total: - type: integer - type: object - internal_rest.listSessionsResponse: - example: - sessions: - - certificate_id: certificate_id - duration_seconds: 0 - vm_id: vm_id - user_id: user_id - sandbox_id: sandbox_id - started_at: started_at - vm_ip_address: vm_ip_address - id: id - ended_at: ended_at - source_ip: source_ip - status: status - - certificate_id: certificate_id - duration_seconds: 0 - vm_id: vm_id - user_id: user_id - sandbox_id: sandbox_id - started_at: started_at - vm_ip_address: vm_ip_address - id: id - ended_at: ended_at - source_ip: source_ip - status: status - total: 6 - properties: - sessions: - items: - $ref: "#/components/schemas/internal_rest.sessionResponse" - type: array - total: - type: integer - type: object - internal_rest.listVMsResponse: - properties: - host_errors: - description: Errors from unreachable hosts (multi-host mode) - items: - $ref: "#/components/schemas/internal_rest.hostError" - type: array - vms: - items: - $ref: "#/components/schemas/internal_rest.vmInfo" - type: array - type: object - internal_rest.publishRequest: - properties: - job_id: - description: required - type: string - message: - description: optional commit/PR message - type: string - reviewers: - description: optional - items: - type: string - type: array - type: object - internal_rest.publishResponse: - properties: - message: - type: string - note: - type: string - type: object - internal_rest.requestAccessRequest: - properties: - public_key: - description: PublicKey is the user's SSH public key in OpenSSH format. - type: string - sandbox_id: - description: SandboxID is the target sandbox. - type: string - ttl_minutes: - description: TTLMinutes is the requested access duration (1-10 minutes). - type: integer - user_id: - description: UserID identifies the requesting user. - type: string - type: object - internal_rest.requestAccessResponse: - example: - certificate_id: certificate_id - instructions: instructions - ssh_port: 0 - valid_until: valid_until - certificate: certificate - ttl_seconds: 6 - connect_command: connect_command - vm_ip_address: vm_ip_address - username: username - properties: - certificate: - description: Certificate is the SSH certificate content (save as key-cert.pub). - type: string - certificate_id: - description: CertificateID is the ID of the issued certificate. - type: string - connect_command: - description: ConnectCommand is an example SSH command for connecting. - type: string - instructions: - description: Instructions provides usage instructions. - type: string - ssh_port: - description: SSHPort is the SSH port (usually 22). - type: integer - ttl_seconds: - description: TTLSeconds is the remaining validity in seconds. - type: integer - username: - description: Username is the SSH username to use. - type: string - valid_until: - description: ValidUntil is when the certificate expires (RFC3339). - type: string - vm_ip_address: - description: VMIPAddress is the IP address of the sandbox VM. - type: string - type: object - internal_rest.revokeCertificateRequest: - properties: - reason: - type: string - type: object - internal_rest.revokeCertificateResponse: - example: - id: id - message: message - properties: - id: - type: string - message: - type: string - type: object - internal_rest.runCommandRequest: - properties: - command: - description: required - type: string - env: - additionalProperties: - type: string - description: optional - type: object - private_key_path: - description: - "optional; if empty, uses managed credentials (requires SSH\ - \ CA)" - type: string - timeout_sec: - description: optional; default from service config - type: integer - user: - description: optional; defaults to "sandbox" when using managed credentials - type: string - type: object - internal_rest.runCommandResponse: - properties: - command: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Command" - type: object - internal_rest.sandboxInfo: - properties: - agent_id: - type: string - base_image: - type: string - created_at: - type: string - id: - type: string - ip_address: - type: string - job_id: - type: string - network: - type: string - sandbox_name: - type: string - state: - type: string - ttl_seconds: - type: integer - updated_at: - type: string - type: object - internal_rest.sessionEndRequest: - properties: - reason: - type: string - session_id: - type: string - type: object - internal_rest.sessionEndResponse: - example: - session_id: session_id - message: message - properties: - message: - type: string - session_id: - type: string - type: object - internal_rest.sessionResponse: - example: - certificate_id: certificate_id - duration_seconds: 0 - vm_id: vm_id - user_id: user_id - sandbox_id: sandbox_id - started_at: started_at - vm_ip_address: vm_ip_address - id: id - ended_at: ended_at - source_ip: source_ip - status: status - properties: - certificate_id: - type: string - duration_seconds: - type: integer - ended_at: - type: string - id: - type: string - sandbox_id: - type: string - source_ip: - type: string - started_at: - type: string - status: - type: string - user_id: - type: string - vm_id: - type: string - vm_ip_address: - type: string - type: object - internal_rest.sessionStartRequest: - properties: - certificate_id: - type: string - source_ip: - type: string - type: object - internal_rest.sessionStartResponse: - example: - session_id: session_id - properties: - session_id: - type: string - type: object - internal_rest.snapshotRequest: - properties: - external: - description: optional; default false (internal snapshot) - type: boolean - name: - description: required - type: string - type: object - internal_rest.snapshotResponse: - properties: - snapshot: - $ref: "#/components/schemas/github_com_aspectrr_fluid_sh_fluid-remote_internal_store.Snapshot" - type: object - internal_rest.startSandboxRequest: - properties: - wait_for_ip: - description: optional; default false - type: boolean - type: object - internal_rest.startSandboxResponse: - properties: - ip_address: - type: string - type: object - internal_rest.vmInfo: - properties: - disk_path: - type: string - host_address: - description: Host IP/hostname (multi-host mode) - type: string - host_name: - description: Host display name (multi-host mode) - type: string - name: - type: string - persistent: - type: boolean - state: - type: string - uuid: - type: string - type: object - time.Duration: - enum: - - -9223372036854775808 - - 9223372036854775807 - - 1 - - 1000 - - 1000000 - - 1000000000 - - 60000000000 - - 3600000000000 - format: int64 - type: integer - x-enum-varnames: - - minDuration - - maxDuration - - Nanosecond - - Microsecond - - Millisecond - - Second - - Minute - - Hour -x-original-swagger-version: "2.0" diff --git a/fluid-remote/go.sum b/fluid-remote/go.sum deleted file mode 100644 index 55d75a0e..00000000 --- a/fluid-remote/go.sum +++ /dev/null @@ -1,98 +0,0 @@ -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/MarceloPetrucio/go-scalar-api-reference v0.0.0-20240521013641-ce5d2efe0e06 h1:W4Yar1SUsPmmA51qoIRb174uDO/Xt3C48MB1YX9Y3vM= -github.com/MarceloPetrucio/go-scalar-api-reference v0.0.0-20240521013641-ce5d2efe0e06/go.mod h1:/wotfjM8I3m8NuIHPz3S8k+CCYH80EqDT8ZeNLqMQm0= -github.com/beevik/etree v1.4.0 h1:oz1UedHRepuY3p4N5OjE0nK1WLCqtzHf25bxplKOHLs= -github.com/beevik/etree v1.4.0/go.mod h1:cyWiXwGoasx60gHvtnEh5x8+uIjUVnjWqBvEnhnqKDA= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk= -github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= -github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= -github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= -github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= -github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= -github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= -github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= -github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= -github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= -github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= -github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= -github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= -github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= -github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= -github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= -github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA= -github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posthog/posthog-go v1.9.0 h1:7tRfnaHqPNrBNTnSnFLQwJ5aVz6LOBngiwl15lD8bHU= -github.com/posthog/posthog-go v1.9.0/go.mod h1:0i1H2BlsK9mHvHGc9Kp6oenUlHUqPl45hWzRtR/2PVI= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= -golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/datatypes v1.2.7 h1:ww9GAhF1aGXZY3EB3cJPJ7//JiuQo7DlQA7NNlVaTdk= -gorm.io/datatypes v1.2.7/go.mod h1:M2iO+6S3hhi4nAyYe444Pcb0dcIiOMJ7QHaUXxyiNZY= -gorm.io/driver/mysql v1.5.6 h1:Ld4mkIickM+EliaQZQx3uOJDJHtrd70MxAUqWqlx3Y8= -gorm.io/driver/mysql v1.5.6/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM= -gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4= -gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo= -gorm.io/driver/sqlite v1.6.0 h1:WHRRrIiulaPiPFmDcod6prc4l2VGVWHz80KspNsxSfQ= -gorm.io/driver/sqlite v1.6.0/go.mod h1:AO9V1qIQddBESngQUKWL9yoH93HIeA1X6V633rBwyT8= -gorm.io/driver/sqlserver v1.6.0 h1:VZOBQVsVhkHU/NzNhRJKoANt5pZGQAS1Bwc6m6dgfnc= -gorm.io/driver/sqlserver v1.6.0/go.mod h1:WQzt4IJo/WHKnckU9jXBLMJIVNMVeTu25dnOzehntWw= -gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= -gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg= -gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs= -libvirt.org/go/libvirt v1.11010.0 h1:1EIh2x6qcRoIBBOvrgN62vq5FIpgUBrmGadprQ/4M0Y= -libvirt.org/go/libvirt v1.11010.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ= diff --git a/fluid-remote/internal/ansible/ansible.go b/fluid-remote/internal/ansible/ansible.go deleted file mode 100755 index 3f4534b0..00000000 --- a/fluid-remote/internal/ansible/ansible.go +++ /dev/null @@ -1,195 +0,0 @@ -package ansible - -import ( - "bufio" - "context" - "fmt" - "os/exec" - "sync" - - "github.com/google/uuid" -) - -// JobStatus represents the current state of an Ansible job. -type JobStatus string - -const ( - JobStatusPending JobStatus = "pending" - JobStatusRunning JobStatus = "running" - JobStatusFinished JobStatus = "finished" - JobStatusFailed JobStatus = "failed" -) - -// Job represents an Ansible playbook execution job. -type Job struct { - ID string `json:"id"` - VMName string `json:"vm_name"` - Playbook string `json:"playbook"` - Check bool `json:"check"` - Status JobStatus `json:"status"` -} - -// JobRequest contains parameters for creating a new Ansible job. -type JobRequest struct { - VMName string `json:"vm_name"` - Playbook string `json:"playbook"` - Check bool `json:"check"` -} - -// JobResponse is returned when a job is created. -type JobResponse struct { - JobID string `json:"job_id"` - WSURL string `json:"ws_url"` -} - -// Runner manages Ansible job execution. -type Runner struct { - mu sync.RWMutex - jobs map[string]*Job - allowedPlaybooks map[string]struct{} - inventoryPath string - ansibleImage string -} - -// NewRunner creates a new Ansible runner. -func NewRunner(inventoryPath, ansibleImage string, allowedPlaybooks []string) *Runner { - allowed := make(map[string]struct{}, len(allowedPlaybooks)) - for _, p := range allowedPlaybooks { - allowed[p] = struct{}{} - } - - return &Runner{ - jobs: make(map[string]*Job), - allowedPlaybooks: allowed, - inventoryPath: inventoryPath, - ansibleImage: ansibleImage, - } -} - -// CreateJob creates a new Ansible job and returns its ID. -func (r *Runner) CreateJob(req JobRequest) (*JobResponse, error) { - if _, ok := r.allowedPlaybooks[req.Playbook]; !ok { - return nil, fmt.Errorf("playbook not allowed: %s", req.Playbook) - } - - jobID := uuid.New().String() - job := &Job{ - ID: jobID, - VMName: req.VMName, - Playbook: req.Playbook, - Check: req.Check, - Status: JobStatusPending, - } - - r.mu.Lock() - r.jobs[jobID] = job - r.mu.Unlock() - - return &JobResponse{ - JobID: jobID, - WSURL: fmt.Sprintf("/ws/ansible/jobs/%s", jobID), - }, nil -} - -// GetJob retrieves a job by ID. -func (r *Runner) GetJob(jobID string) (*Job, bool) { - r.mu.RLock() - defer r.mu.RUnlock() - job, ok := r.jobs[jobID] - return job, ok -} - -// SetJobStatus updates a job's status. -func (r *Runner) SetJobStatus(jobID string, status JobStatus) { - r.mu.Lock() - defer r.mu.Unlock() - if job, ok := r.jobs[jobID]; ok { - job.Status = status - } -} - -// OutputWriter is an interface for writing job output lines. -type OutputWriter interface { - WriteLine(line string) error -} - -// RunJob executes an Ansible job via Docker and streams output to the writer. -// This is a blocking call that returns when the job completes. -func (r *Runner) RunJob(ctx context.Context, jobID string, writer OutputWriter) error { - job, ok := r.GetJob(jobID) - if !ok { - return fmt.Errorf("job not found: %s", jobID) - } - - // Build ansible command - ansibleCmd := fmt.Sprintf( - "ansible-playbook -i %s playbooks/%s --limit %s", - r.inventoryPath, - job.Playbook, - job.VMName, - ) - if job.Check { - ansibleCmd += " --check" - } - - if err := writer.WriteLine(fmt.Sprintf("Running: %s\n", ansibleCmd)); err != nil { - return err - } - - // Build docker command - dockerArgs := []string{ - "run", - "--rm", - "--network", "host", - "--read-only", - "--pids-limit", "128", - "--memory", "512m", - "-e", fmt.Sprintf("ANSIBLE_CMD=%s", ansibleCmd), - "-v", "/ansible:/runner:ro", - "-v", "/var/run/libvirt:/var/run/libvirt", - r.ansibleImage, - } - - cmd := exec.CommandContext(ctx, "docker", dockerArgs...) - - // Capture stdout and stderr together - stdout, err := cmd.StdoutPipe() - if err != nil { - return fmt.Errorf("failed to create stdout pipe: %w", err) - } - cmd.Stderr = cmd.Stdout // Merge stderr into stdout - - r.SetJobStatus(jobID, JobStatusRunning) - - if err := cmd.Start(); err != nil { - r.SetJobStatus(jobID, JobStatusFailed) - return fmt.Errorf("failed to start docker: %w", err) - } - - // Stream output line by line - scanner := bufio.NewScanner(stdout) - for scanner.Scan() { - if err := writer.WriteLine(scanner.Text()); err != nil { - // Client disconnected, but let the process continue - break - } - } - - // Wait for the process to complete - exitErr := cmd.Wait() - exitCode := 0 - if exitErr != nil { - if exitError, ok := exitErr.(*exec.ExitError); ok { - exitCode = exitError.ExitCode() - } else { - r.SetJobStatus(jobID, JobStatusFailed) - return fmt.Errorf("failed to wait for docker: %w", exitErr) - } - } - - r.SetJobStatus(jobID, JobStatusFinished) - - _ = writer.WriteLine(fmt.Sprintf("\nJob finished (rc=%d)", exitCode)) - - return nil -} diff --git a/fluid-remote/internal/ansible/handler.go b/fluid-remote/internal/ansible/handler.go deleted file mode 100755 index 47445ee7..00000000 --- a/fluid-remote/internal/ansible/handler.go +++ /dev/null @@ -1,196 +0,0 @@ -package ansible - -import ( - "context" - "net/http" - "time" - - "github.com/go-chi/chi/v5" - "github.com/gorilla/websocket" - - serverError "github.com/aspectrr/fluid.sh/fluid-remote/internal/error" - serverJSON "github.com/aspectrr/fluid.sh/fluid-remote/internal/json" -) - -// Handler provides HTTP handlers for Ansible operations. -type Handler struct { - runner *Runner - upgrader websocket.Upgrader -} - -// NewHandler creates a new Ansible HTTP handler. -func NewHandler(runner *Runner) *Handler { - return &Handler{ - runner: runner, - upgrader: websocket.Upgrader{ - ReadBufferSize: 1024, - WriteBufferSize: 1024, - CheckOrigin: func(r *http.Request) bool { - // Allow all origins for now; tighten in production - return true - }, - }, - } -} - -// wsOutputWriter implements OutputWriter for WebSocket connections. -type wsOutputWriter struct { - conn *websocket.Conn -} - -func (w *wsOutputWriter) WriteLine(line string) error { - return w.conn.WriteMessage(websocket.TextMessage, []byte(line)) -} - -// HandleCreateJob creates a new Ansible job. -// @Summary Create Ansible job -// @Description Creates a new Ansible playbook execution job -// @Tags Ansible -// @Accept json -// @Produce json -// @Param request body JobRequest true "Job creation parameters" -// @Success 200 {object} JobResponse -// @Failure 400 {object} serverError.ErrorResponse -// @Id createAnsibleJob -// @Router /v1/ansible/jobs [post] -func (h *Handler) HandleCreateJob(w http.ResponseWriter, r *http.Request) { - var req JobRequest - if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { - serverError.RespondError(w, http.StatusBadRequest, err) - return - } - - if req.VMName == "" { - serverError.RespondError(w, http.StatusBadRequest, - &validationError{field: "vm_name", message: "vm_name is required"}) - return - } - if req.Playbook == "" { - serverError.RespondError(w, http.StatusBadRequest, - &validationError{field: "playbook", message: "playbook is required"}) - return - } - - resp, err := h.runner.CreateJob(req) - if err != nil { - serverError.RespondError(w, http.StatusBadRequest, err) - return - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, resp) -} - -// HandleGetJob retrieves job status. -// @Summary Get Ansible job -// @Description Gets the status of an Ansible job -// @Tags Ansible -// @Accept json -// @Produce json -// @Param job_id path string true "Job ID" -// @Success 200 {object} Job -// @Failure 404 {object} serverError.ErrorResponse -// @Id getAnsibleJob -// @Router /v1/ansible/jobs/{job_id} [get] -func (h *Handler) HandleGetJob(w http.ResponseWriter, r *http.Request) { - jobID := chi.URLParam(r, "job_id") - - job, ok := h.runner.GetJob(jobID) - if !ok { - serverError.RespondError(w, http.StatusNotFound, - ¬FoundError{resource: "job", id: jobID}) - return - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, job) -} - -// HandleJobWebSocket handles WebSocket connections for job output streaming. -// @Summary Stream Ansible job output -// @Description Connects via WebSocket to run an Ansible job and stream output -// @Tags Ansible -// @Param job_id path string true "Job ID" -// @Success 101 {string} string "Switching Protocols - WebSocket connection established" -// @Failure 404 {string} string "Invalid job ID" -// @Failure 409 {string} string "Job already started or finished" -// @Id streamAnsibleJobOutput -// @Router /v1/ansible/jobs/{job_id}/stream [get] -func (h *Handler) HandleJobWebSocket(w http.ResponseWriter, r *http.Request) { - jobID := chi.URLParam(r, "job_id") - - job, ok := h.runner.GetJob(jobID) - if !ok { - http.Error(w, "Invalid job ID", http.StatusNotFound) - return - } - - if job.Status != JobStatusPending { - http.Error(w, "Job already started or finished", http.StatusConflict) - return - } - - conn, err := h.upgrader.Upgrade(w, r, nil) - if err != nil { - // Upgrade already sends the error response - return - } - defer func() { _ = conn.Close() }() - - // Set a reasonable deadline for the entire job - if err := conn.SetWriteDeadline(time.Now().Add(10 * time.Minute)); err != nil { - return - } - - writer := &wsOutputWriter{conn: conn} - - ctx, cancel := context.WithTimeout(r.Context(), 10*time.Minute) - defer cancel() - - if err := h.runner.RunJob(ctx, jobID, writer); err != nil { - _ = conn.WriteMessage(websocket.TextMessage, []byte("Error: "+err.Error())) - } - - _ = conn.WriteMessage(websocket.CloseMessage, - websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) -} - -// RegisterRoutes registers Ansible routes on the given router. -func (h *Handler) RegisterRoutes(r chi.Router) { - r.Route("/ansible", func(r chi.Router) { - r.Post("/jobs", h.HandleCreateJob) - r.Get("/jobs/{job_id}", h.HandleGetJob) - r.Get("/jobs/{job_id}/stream", h.HandleJobWebSocket) - }) -} - -// RegisterRoutesWithPlaybooks registers Ansible routes including playbook management. -func (h *Handler) RegisterRoutesWithPlaybooks(r chi.Router, playbookHandler *PlaybookHandler) { - r.Route("/ansible", func(r chi.Router) { - r.Post("/jobs", h.HandleCreateJob) - r.Get("/jobs/{job_id}", h.HandleGetJob) - r.Get("/jobs/{job_id}/stream", h.HandleJobWebSocket) - - if playbookHandler != nil { - playbookHandler.RegisterPlaybookRoutes(r) - } - }) -} - -// validationError represents a validation error. -type validationError struct { - field string - message string -} - -func (e *validationError) Error() string { - return e.message -} - -// notFoundError represents a resource not found error. -type notFoundError struct { - resource string - id string -} - -func (e *notFoundError) Error() string { - return e.resource + " not found: " + e.id -} diff --git a/fluid-remote/internal/ansible/playbook.go b/fluid-remote/internal/ansible/playbook.go deleted file mode 100755 index 8f8b81d7..00000000 --- a/fluid-remote/internal/ansible/playbook.go +++ /dev/null @@ -1,350 +0,0 @@ -package ansible - -import ( - "context" - "fmt" - "os" - "path/filepath" - - "github.com/google/uuid" - "gopkg.in/yaml.v3" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/store" -) - -// PlaybookService manages Ansible playbook creation and rendering. -type PlaybookService struct { - store store.DataStore - playbooksDir string -} - -// NewPlaybookService creates a new PlaybookService. -func NewPlaybookService(st store.DataStore, playbooksDir string) *PlaybookService { - return &PlaybookService{ - store: st, - playbooksDir: playbooksDir, - } -} - -// PlaybookDir returns the configured playbooks directory. -func (s *PlaybookService) PlaybookDir() string { - return s.playbooksDir -} - -// CreatePlaybookRequest contains parameters for creating a new playbook. -type CreatePlaybookRequest struct { - Name string `json:"name"` - Hosts string `json:"hosts"` - Become bool `json:"become"` -} - -// CreatePlaybook creates a new playbook in the database. -func (s *PlaybookService) CreatePlaybook(ctx context.Context, req CreatePlaybookRequest) (*store.Playbook, error) { - if req.Name == "" { - return nil, fmt.Errorf("name is required") - } - if req.Hosts == "" { - req.Hosts = "all" - } - - pb := &store.Playbook{ - ID: uuid.New().String(), - Name: req.Name, - Hosts: req.Hosts, - Become: req.Become, - } - - if err := s.store.CreatePlaybook(ctx, pb); err != nil { - return nil, fmt.Errorf("create playbook: %w", err) - } - - return pb, nil -} - -// GetPlaybook retrieves a playbook by ID. -func (s *PlaybookService) GetPlaybook(ctx context.Context, id string) (*store.Playbook, error) { - return s.store.GetPlaybook(ctx, id) -} - -// GetPlaybookByName retrieves a playbook by name. -func (s *PlaybookService) GetPlaybookByName(ctx context.Context, name string) (*store.Playbook, error) { - return s.store.GetPlaybookByName(ctx, name) -} - -// ListPlaybooks lists all playbooks. -func (s *PlaybookService) ListPlaybooks(ctx context.Context, opt *store.ListOptions) ([]*store.Playbook, error) { - return s.store.ListPlaybooks(ctx, opt) -} - -// DeletePlaybook deletes a playbook and its tasks. -func (s *PlaybookService) DeletePlaybook(ctx context.Context, id string) error { - // Get playbook to find file path - pb, err := s.store.GetPlaybook(ctx, id) - if err != nil { - return err - } - - // Delete from database - if err := s.store.DeletePlaybook(ctx, id); err != nil { - return err - } - - // Remove rendered file if it exists - if pb.FilePath != nil && *pb.FilePath != "" { - _ = os.Remove(*pb.FilePath) - } - - return nil -} - -// AddTaskRequest contains parameters for adding a task to a playbook. -type AddTaskRequest struct { - Name string `json:"name"` - Module string `json:"module"` - Params map[string]any `json:"params"` -} - -// AddTask adds a task to an existing playbook and re-renders the YAML. -func (s *PlaybookService) AddTask(ctx context.Context, playbookID string, req AddTaskRequest) (*store.PlaybookTask, error) { - if req.Name == "" { - return nil, fmt.Errorf("task name is required") - } - if req.Module == "" { - return nil, fmt.Errorf("module is required") - } - - // Get next position - pos, err := s.store.GetNextTaskPosition(ctx, playbookID) - if err != nil { - return nil, fmt.Errorf("get next position: %w", err) - } - - task := &store.PlaybookTask{ - ID: uuid.New().String(), - PlaybookID: playbookID, - Position: pos, - Name: req.Name, - Module: req.Module, - Params: req.Params, - } - - if err := s.store.CreatePlaybookTask(ctx, task); err != nil { - return nil, fmt.Errorf("create task: %w", err) - } - - // Re-render playbook to disk - if err := s.RenderPlaybook(ctx, playbookID); err != nil { - return nil, fmt.Errorf("render playbook: %w", err) - } - - return task, nil -} - -// GetTask retrieves a task by ID. -func (s *PlaybookService) GetTask(ctx context.Context, id string) (*store.PlaybookTask, error) { - return s.store.GetPlaybookTask(ctx, id) -} - -// ListTasks lists all tasks for a playbook. -func (s *PlaybookService) ListTasks(ctx context.Context, playbookID string) ([]*store.PlaybookTask, error) { - return s.store.ListPlaybookTasks(ctx, playbookID, nil) -} - -// UpdateTaskRequest contains parameters for updating a task. -type UpdateTaskRequest struct { - Name *string `json:"name,omitempty"` - Module *string `json:"module,omitempty"` - Params map[string]any `json:"params,omitempty"` -} - -// UpdateTask updates an existing task and re-renders the playbook. -func (s *PlaybookService) UpdateTask(ctx context.Context, taskID string, req UpdateTaskRequest) (*store.PlaybookTask, error) { - task, err := s.store.GetPlaybookTask(ctx, taskID) - if err != nil { - return nil, err - } - - if req.Name != nil { - task.Name = *req.Name - } - if req.Module != nil { - task.Module = *req.Module - } - if req.Params != nil { - task.Params = req.Params - } - - if err := s.store.UpdatePlaybookTask(ctx, task); err != nil { - return nil, fmt.Errorf("update task: %w", err) - } - - // Re-render playbook - if err := s.RenderPlaybook(ctx, task.PlaybookID); err != nil { - return nil, fmt.Errorf("render playbook: %w", err) - } - - return task, nil -} - -// DeleteTask removes a task from a playbook and re-renders. -func (s *PlaybookService) DeleteTask(ctx context.Context, taskID string) error { - task, err := s.store.GetPlaybookTask(ctx, taskID) - if err != nil { - return err - } - playbookID := task.PlaybookID - - if err := s.store.DeletePlaybookTask(ctx, taskID); err != nil { - return err - } - - // Re-render playbook - return s.RenderPlaybook(ctx, playbookID) -} - -// ReorderTasksRequest contains the new task order. -type ReorderTasksRequest struct { - TaskIDs []string `json:"task_ids"` -} - -// ReorderTasks reorders tasks in a playbook and re-renders. -func (s *PlaybookService) ReorderTasks(ctx context.Context, playbookID string, taskIDs []string) error { - if err := s.store.ReorderPlaybookTasks(ctx, playbookID, taskIDs); err != nil { - return err - } - return s.RenderPlaybook(ctx, playbookID) -} - -// RenderPlaybook generates the YAML file from the database state. -func (s *PlaybookService) RenderPlaybook(ctx context.Context, playbookID string) error { - pb, err := s.store.GetPlaybook(ctx, playbookID) - if err != nil { - return fmt.Errorf("get playbook: %w", err) - } - - tasks, err := s.store.ListPlaybookTasks(ctx, playbookID, nil) - if err != nil { - return fmt.Errorf("list tasks: %w", err) - } - - yamlContent, err := s.renderYAML(pb, tasks) - if err != nil { - return fmt.Errorf("render yaml: %w", err) - } - - // Ensure directory exists - if err := os.MkdirAll(s.playbooksDir, 0o750); err != nil { - return fmt.Errorf("create playbooks dir: %w", err) - } - - // Write to file - filePath := filepath.Join(s.playbooksDir, pb.Name+".yml") - if err := os.WriteFile(filePath, yamlContent, 0o640); err != nil { - return fmt.Errorf("write file: %w", err) - } - - // Update playbook with file path - pb.FilePath = &filePath - if err := s.store.UpdatePlaybook(ctx, pb); err != nil { - return fmt.Errorf("update playbook path: %w", err) - } - - return nil -} - -// ExportPlaybook returns the YAML content without writing to disk. -func (s *PlaybookService) ExportPlaybook(ctx context.Context, playbookID string) ([]byte, error) { - pb, err := s.store.GetPlaybook(ctx, playbookID) - if err != nil { - return nil, fmt.Errorf("get playbook: %w", err) - } - - tasks, err := s.store.ListPlaybookTasks(ctx, playbookID, nil) - if err != nil { - return nil, fmt.Errorf("list tasks: %w", err) - } - - return s.renderYAML(pb, tasks) -} - -// ansiblePlay represents a single play in an Ansible playbook. -type ansiblePlay struct { - Name string `yaml:"name"` - Hosts string `yaml:"hosts"` - Become bool `yaml:"become,omitempty"` - Tasks []ansibleTask `yaml:"tasks"` -} - -// ansibleTask represents a task in YAML format. -type ansibleTask map[string]any - -// renderYAML converts playbook and tasks to Ansible YAML format. -func (s *PlaybookService) renderYAML(pb *store.Playbook, tasks []*store.PlaybookTask) ([]byte, error) { - ansibleTasks := make([]ansibleTask, 0, len(tasks)) - for _, t := range tasks { - task := ansibleTask{ - "name": t.Name, - } - // Add module with its params - if len(t.Params) > 0 { - task[t.Module] = t.Params - } else { - task[t.Module] = nil - } - ansibleTasks = append(ansibleTasks, task) - } - - play := ansiblePlay{ - Name: pb.Name, - Hosts: pb.Hosts, - Become: pb.Become, - Tasks: ansibleTasks, - } - - // Ansible playbook is a list of plays - playbook := []ansiblePlay{play} - - return yaml.Marshal(playbook) -} - -// PlaybookWithTasks combines a playbook with its tasks for API responses. -type PlaybookWithTasks struct { - Playbook *store.Playbook `json:"playbook"` - Tasks []*store.PlaybookTask `json:"tasks"` -} - -// GetPlaybookWithTasks retrieves a playbook along with all its tasks. -func (s *PlaybookService) GetPlaybookWithTasks(ctx context.Context, playbookID string) (*PlaybookWithTasks, error) { - pb, err := s.store.GetPlaybook(ctx, playbookID) - if err != nil { - return nil, err - } - - tasks, err := s.store.ListPlaybookTasks(ctx, playbookID, nil) - if err != nil { - return nil, err - } - - return &PlaybookWithTasks{ - Playbook: pb, - Tasks: tasks, - }, nil -} - -// GetPlaybookWithTasksByName retrieves a playbook by name along with all its tasks. -func (s *PlaybookService) GetPlaybookWithTasksByName(ctx context.Context, name string) (*PlaybookWithTasks, error) { - pb, err := s.store.GetPlaybookByName(ctx, name) - if err != nil { - return nil, err - } - - tasks, err := s.store.ListPlaybookTasks(ctx, pb.ID, nil) - if err != nil { - return nil, err - } - - return &PlaybookWithTasks{ - Playbook: pb, - Tasks: tasks, - }, nil -} diff --git a/fluid-remote/internal/ansible/playbook_handler.go b/fluid-remote/internal/ansible/playbook_handler.go deleted file mode 100755 index 75c02211..00000000 --- a/fluid-remote/internal/ansible/playbook_handler.go +++ /dev/null @@ -1,407 +0,0 @@ -package ansible - -import ( - "errors" - "net/http" - - "github.com/go-chi/chi/v5" - - serverError "github.com/aspectrr/fluid.sh/fluid-remote/internal/error" - serverJSON "github.com/aspectrr/fluid.sh/fluid-remote/internal/json" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/store" -) - -// PlaybookHandler provides HTTP handlers for playbook management. -type PlaybookHandler struct { - svc *PlaybookService -} - -// NewPlaybookHandler creates a new PlaybookHandler. -func NewPlaybookHandler(svc *PlaybookService) *PlaybookHandler { - return &PlaybookHandler{svc: svc} -} - -// --- Request/Response DTOs --- - -type createPlaybookRequest struct { - Name string `json:"name"` - Hosts string `json:"hosts"` - Become bool `json:"become"` -} - -type createPlaybookResponse struct { - Playbook *store.Playbook `json:"playbook"` -} - -type getPlaybookResponse struct { - Playbook *store.Playbook `json:"playbook"` - Tasks []*store.PlaybookTask `json:"tasks"` -} - -type listPlaybooksResponse struct { - Playbooks []*store.Playbook `json:"playbooks"` - Total int `json:"total"` -} - -type addTaskRequest struct { - Name string `json:"name"` - Module string `json:"module"` - Params map[string]any `json:"params" swaggertype:"object" ` -} - -type addTaskResponse struct { - Task *store.PlaybookTask `json:"task"` -} - -type updateTaskRequest struct { - Name *string `json:"name,omitempty"` - Module *string `json:"module,omitempty"` - Params map[string]any `json:"params,omitempty" swaggertype:"object"` -} - -type updateTaskResponse struct { - Task *store.PlaybookTask `json:"task"` -} - -type reorderTasksRequest struct { - TaskIDs []string `json:"task_ids"` -} - -type exportPlaybookResponse struct { - YAML string `json:"yaml"` -} - -// --- Handlers --- - -// HandleCreatePlaybook creates a new playbook. -// @Summary Create playbook -// @Description Creates a new Ansible playbook -// @Tags Ansible Playbooks -// @Accept json -// @Produce json -// @Param request body createPlaybookRequest true "Playbook creation parameters" -// @Success 201 {object} createPlaybookResponse -// @Failure 400 {object} serverError.ErrorResponse -// @Failure 409 {object} serverError.ErrorResponse -// @Id createPlaybook -// @Router /v1/ansible/playbooks [post] -func (h *PlaybookHandler) HandleCreatePlaybook(w http.ResponseWriter, r *http.Request) { - var req createPlaybookRequest - if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { - serverError.RespondError(w, http.StatusBadRequest, err) - return - } - - if req.Name == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("name is required")) - return - } - - pb, err := h.svc.CreatePlaybook(r.Context(), CreatePlaybookRequest(req)) - if err != nil { - if errors.Is(err, store.ErrAlreadyExists) { - serverError.RespondError(w, http.StatusConflict, errors.New("playbook with this name already exists")) - return - } - serverError.RespondError(w, http.StatusInternalServerError, err) - return - } - - _ = serverJSON.RespondJSON(w, http.StatusCreated, createPlaybookResponse{Playbook: pb}) -} - -// HandleGetPlaybook retrieves a playbook by name. -// @Summary Get playbook -// @Description Gets a playbook and its tasks by name -// @Tags Ansible Playbooks -// @Accept json -// @Produce json -// @Param playbook_name path string true "Playbook name" -// @Success 200 {object} getPlaybookResponse -// @Failure 404 {object} serverError.ErrorResponse -// @Id getPlaybook -// @Router /v1/ansible/playbooks/{playbook_name} [get] -func (h *PlaybookHandler) HandleGetPlaybook(w http.ResponseWriter, r *http.Request) { - name := chi.URLParam(r, "playbook_name") - - result, err := h.svc.GetPlaybookWithTasksByName(r.Context(), name) - if err != nil { - if errors.Is(err, store.ErrNotFound) { - serverError.RespondError(w, http.StatusNotFound, errors.New("playbook not found")) - return - } - serverError.RespondError(w, http.StatusInternalServerError, err) - return - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, getPlaybookResponse{ - Playbook: result.Playbook, - Tasks: result.Tasks, - }) -} - -// HandleListPlaybooks lists all playbooks. -// @Summary List playbooks -// @Description Lists all Ansible playbooks -// @Tags Ansible Playbooks -// @Accept json -// @Produce json -// @Success 200 {object} listPlaybooksResponse -// @Id listPlaybooks -// @Router /v1/ansible/playbooks [get] -func (h *PlaybookHandler) HandleListPlaybooks(w http.ResponseWriter, r *http.Request) { - playbooks, err := h.svc.ListPlaybooks(r.Context(), nil) - if err != nil { - serverError.RespondError(w, http.StatusInternalServerError, err) - return - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, listPlaybooksResponse{ - Playbooks: playbooks, - Total: len(playbooks), - }) -} - -// HandleDeletePlaybook deletes a playbook. -// @Summary Delete playbook -// @Description Deletes a playbook and all its tasks -// @Tags Ansible Playbooks -// @Accept json -// @Produce json -// @Param playbook_name path string true "Playbook name" -// @Success 204 -// @Failure 404 {object} serverError.ErrorResponse -// @Id deletePlaybook -// @Router /v1/ansible/playbooks/{playbook_name} [delete] -func (h *PlaybookHandler) HandleDeletePlaybook(w http.ResponseWriter, r *http.Request) { - name := chi.URLParam(r, "playbook_name") - - pb, err := h.svc.GetPlaybookByName(r.Context(), name) - if err != nil { - if errors.Is(err, store.ErrNotFound) { - serverError.RespondError(w, http.StatusNotFound, errors.New("playbook not found")) - return - } - serverError.RespondError(w, http.StatusInternalServerError, err) - return - } - - if err := h.svc.DeletePlaybook(r.Context(), pb.ID); err != nil { - serverError.RespondError(w, http.StatusInternalServerError, err) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -// HandleAddTask adds a task to a playbook. -// @Summary Add task to playbook -// @Description Adds a new task to an existing playbook -// @Tags Ansible Playbooks -// @Accept json -// @Produce json -// @Param playbook_name path string true "Playbook name" -// @Param request body addTaskRequest true "Task parameters" -// @Success 201 {object} addTaskResponse -// @Failure 400 {object} serverError.ErrorResponse -// @Failure 404 {object} serverError.ErrorResponse -// @Id addPlaybookTask -// @Router /v1/ansible/playbooks/{playbook_name}/tasks [post] -func (h *PlaybookHandler) HandleAddTask(w http.ResponseWriter, r *http.Request) { - name := chi.URLParam(r, "playbook_name") - - pb, err := h.svc.GetPlaybookByName(r.Context(), name) - if err != nil { - if errors.Is(err, store.ErrNotFound) { - serverError.RespondError(w, http.StatusNotFound, errors.New("playbook not found")) - return - } - serverError.RespondError(w, http.StatusInternalServerError, err) - return - } - - var req addTaskRequest - if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { - serverError.RespondError(w, http.StatusBadRequest, err) - return - } - - if req.Name == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("task name is required")) - return - } - if req.Module == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("module is required")) - return - } - - task, err := h.svc.AddTask(r.Context(), pb.ID, AddTaskRequest(req)) - if err != nil { - serverError.RespondError(w, http.StatusInternalServerError, err) - return - } - - _ = serverJSON.RespondJSON(w, http.StatusCreated, addTaskResponse{Task: task}) -} - -// HandleUpdateTask updates a task. -// @Summary Update task -// @Description Updates an existing task in a playbook -// @Tags Ansible Playbooks -// @Accept json -// @Produce json -// @Param playbook_name path string true "Playbook name" -// @Param task_id path string true "Task ID" -// @Param request body updateTaskRequest true "Task update parameters" -// @Success 200 {object} updateTaskResponse -// @Failure 400 {object} serverError.ErrorResponse -// @Failure 404 {object} serverError.ErrorResponse -// @Id updatePlaybookTask -// @Router /v1/ansible/playbooks/{playbook_name}/tasks/{task_id} [put] -func (h *PlaybookHandler) HandleUpdateTask(w http.ResponseWriter, r *http.Request) { - taskID := chi.URLParam(r, "task_id") - - var req updateTaskRequest - if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { - serverError.RespondError(w, http.StatusBadRequest, err) - return - } - - task, err := h.svc.UpdateTask(r.Context(), taskID, UpdateTaskRequest(req)) - if err != nil { - if errors.Is(err, store.ErrNotFound) { - serverError.RespondError(w, http.StatusNotFound, errors.New("task not found")) - return - } - serverError.RespondError(w, http.StatusInternalServerError, err) - return - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, updateTaskResponse{Task: task}) -} - -// HandleDeleteTask deletes a task from a playbook. -// @Summary Delete task -// @Description Removes a task from a playbook -// @Tags Ansible Playbooks -// @Accept json -// @Produce json -// @Param playbook_name path string true "Playbook name" -// @Param task_id path string true "Task ID" -// @Success 204 -// @Failure 404 {object} serverError.ErrorResponse -// @Id deletePlaybookTask -// @Router /v1/ansible/playbooks/{playbook_name}/tasks/{task_id} [delete] -func (h *PlaybookHandler) HandleDeleteTask(w http.ResponseWriter, r *http.Request) { - taskID := chi.URLParam(r, "task_id") - - if err := h.svc.DeleteTask(r.Context(), taskID); err != nil { - if errors.Is(err, store.ErrNotFound) { - serverError.RespondError(w, http.StatusNotFound, errors.New("task not found")) - return - } - serverError.RespondError(w, http.StatusInternalServerError, err) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -// HandleReorderTasks reorders tasks in a playbook. -// @Summary Reorder tasks -// @Description Reorders tasks in a playbook -// @Tags Ansible Playbooks -// @Accept json -// @Produce json -// @Param playbook_name path string true "Playbook name" -// @Param request body reorderTasksRequest true "New task order" -// @Success 204 -// @Failure 400 {object} serverError.ErrorResponse -// @Failure 404 {object} serverError.ErrorResponse -// @Id reorderPlaybookTasks -// @Router /v1/ansible/playbooks/{playbook_name}/tasks/reorder [patch] -func (h *PlaybookHandler) HandleReorderTasks(w http.ResponseWriter, r *http.Request) { - name := chi.URLParam(r, "playbook_name") - - pb, err := h.svc.GetPlaybookByName(r.Context(), name) - if err != nil { - if errors.Is(err, store.ErrNotFound) { - serverError.RespondError(w, http.StatusNotFound, errors.New("playbook not found")) - return - } - serverError.RespondError(w, http.StatusInternalServerError, err) - return - } - - var req reorderTasksRequest - if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { - serverError.RespondError(w, http.StatusBadRequest, err) - return - } - - if len(req.TaskIDs) == 0 { - serverError.RespondError(w, http.StatusBadRequest, errors.New("task_ids is required")) - return - } - - if err := h.svc.ReorderTasks(r.Context(), pb.ID, req.TaskIDs); err != nil { - serverError.RespondError(w, http.StatusInternalServerError, err) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -// HandleExportPlaybook exports a playbook as YAML. -// @Summary Export playbook -// @Description Exports a playbook as raw YAML -// @Tags Ansible Playbooks -// @Accept json -// @Produce json -// @Param playbook_name path string true "Playbook name" -// @Success 200 {object} exportPlaybookResponse -// @Failure 404 {object} serverError.ErrorResponse -// @Id exportPlaybook -// @Router /v1/ansible/playbooks/{playbook_name}/export [get] -func (h *PlaybookHandler) HandleExportPlaybook(w http.ResponseWriter, r *http.Request) { - name := chi.URLParam(r, "playbook_name") - - pb, err := h.svc.GetPlaybookByName(r.Context(), name) - if err != nil { - if errors.Is(err, store.ErrNotFound) { - serverError.RespondError(w, http.StatusNotFound, errors.New("playbook not found")) - return - } - serverError.RespondError(w, http.StatusInternalServerError, err) - return - } - - yamlContent, err := h.svc.ExportPlaybook(r.Context(), pb.ID) - if err != nil { - serverError.RespondError(w, http.StatusInternalServerError, err) - return - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, exportPlaybookResponse{YAML: string(yamlContent)}) -} - -// RegisterPlaybookRoutes registers playbook routes on the given router. -func (h *PlaybookHandler) RegisterPlaybookRoutes(r chi.Router) { - r.Route("/playbooks", func(r chi.Router) { - r.Get("/", h.HandleListPlaybooks) - r.Post("/", h.HandleCreatePlaybook) - - r.Route("/{playbook_name}", func(r chi.Router) { - r.Get("/", h.HandleGetPlaybook) - r.Delete("/", h.HandleDeletePlaybook) - r.Get("/export", h.HandleExportPlaybook) - - r.Route("/tasks", func(r chi.Router) { - r.Post("/", h.HandleAddTask) - r.Patch("/reorder", h.HandleReorderTasks) - r.Put("/{task_id}", h.HandleUpdateTask) - r.Delete("/{task_id}", h.HandleDeleteTask) - }) - }) - }) -} diff --git a/fluid-remote/internal/ansible/playbook_test.go b/fluid-remote/internal/ansible/playbook_test.go deleted file mode 100755 index 9558c815..00000000 --- a/fluid-remote/internal/ansible/playbook_test.go +++ /dev/null @@ -1,478 +0,0 @@ -package ansible - -import ( - "context" - "os" - "path/filepath" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/store" -) - -// mockStore implements store.DataStore for testing playbook operations. -type mockStore struct { - playbooks map[string]*store.Playbook - playbookTasks map[string]*store.PlaybookTask -} - -func newMockStore() *mockStore { - return &mockStore{ - playbooks: make(map[string]*store.Playbook), - playbookTasks: make(map[string]*store.PlaybookTask), - } -} - -func (m *mockStore) CreatePlaybook(ctx context.Context, pb *store.Playbook) error { - if _, exists := m.playbooks[pb.ID]; exists { - return store.ErrAlreadyExists - } - for _, existing := range m.playbooks { - if existing.Name == pb.Name { - return store.ErrAlreadyExists - } - } - m.playbooks[pb.ID] = pb - return nil -} - -func (m *mockStore) GetPlaybook(ctx context.Context, id string) (*store.Playbook, error) { - pb, ok := m.playbooks[id] - if !ok { - return nil, store.ErrNotFound - } - return pb, nil -} - -func (m *mockStore) GetPlaybookByName(ctx context.Context, name string) (*store.Playbook, error) { - for _, pb := range m.playbooks { - if pb.Name == name { - return pb, nil - } - } - return nil, store.ErrNotFound -} - -func (m *mockStore) ListPlaybooks(ctx context.Context, opt *store.ListOptions) ([]*store.Playbook, error) { - result := make([]*store.Playbook, 0, len(m.playbooks)) - for _, pb := range m.playbooks { - result = append(result, pb) - } - return result, nil -} - -func (m *mockStore) UpdatePlaybook(ctx context.Context, pb *store.Playbook) error { - if _, ok := m.playbooks[pb.ID]; !ok { - return store.ErrNotFound - } - m.playbooks[pb.ID] = pb - return nil -} - -func (m *mockStore) DeletePlaybook(ctx context.Context, id string) error { - if _, ok := m.playbooks[id]; !ok { - return store.ErrNotFound - } - // Delete associated tasks - for taskID, task := range m.playbookTasks { - if task.PlaybookID == id { - delete(m.playbookTasks, taskID) - } - } - delete(m.playbooks, id) - return nil -} - -func (m *mockStore) CreatePlaybookTask(ctx context.Context, task *store.PlaybookTask) error { - if _, exists := m.playbookTasks[task.ID]; exists { - return store.ErrAlreadyExists - } - m.playbookTasks[task.ID] = task - return nil -} - -func (m *mockStore) GetPlaybookTask(ctx context.Context, id string) (*store.PlaybookTask, error) { - task, ok := m.playbookTasks[id] - if !ok { - return nil, store.ErrNotFound - } - return task, nil -} - -func (m *mockStore) ListPlaybookTasks(ctx context.Context, playbookID string, opt *store.ListOptions) ([]*store.PlaybookTask, error) { - result := make([]*store.PlaybookTask, 0) - for _, task := range m.playbookTasks { - if task.PlaybookID == playbookID { - result = append(result, task) - } - } - // Sort by position - for i := 0; i < len(result)-1; i++ { - for j := i + 1; j < len(result); j++ { - if result[i].Position > result[j].Position { - result[i], result[j] = result[j], result[i] - } - } - } - return result, nil -} - -func (m *mockStore) UpdatePlaybookTask(ctx context.Context, task *store.PlaybookTask) error { - if _, ok := m.playbookTasks[task.ID]; !ok { - return store.ErrNotFound - } - m.playbookTasks[task.ID] = task - return nil -} - -func (m *mockStore) DeletePlaybookTask(ctx context.Context, id string) error { - if _, ok := m.playbookTasks[id]; !ok { - return store.ErrNotFound - } - delete(m.playbookTasks, id) - return nil -} - -func (m *mockStore) ReorderPlaybookTasks(ctx context.Context, playbookID string, taskIDs []string) error { - for i, taskID := range taskIDs { - task, ok := m.playbookTasks[taskID] - if !ok || task.PlaybookID != playbookID { - return store.ErrNotFound - } - task.Position = i - } - return nil -} - -func (m *mockStore) GetNextTaskPosition(ctx context.Context, playbookID string) (int, error) { - maxPos := -1 - for _, task := range m.playbookTasks { - if task.PlaybookID == playbookID && task.Position > maxPos { - maxPos = task.Position - } - } - return maxPos + 1, nil -} - -// Stub implementations for other DataStore methods -func (m *mockStore) CreateSandbox(ctx context.Context, sb *store.Sandbox) error { - return nil -} - -func (m *mockStore) GetSandbox(ctx context.Context, id string) (*store.Sandbox, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) GetSandboxByVMName(ctx context.Context, vmName string) (*store.Sandbox, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) ListSandboxes(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - return nil, nil -} -func (m *mockStore) UpdateSandbox(ctx context.Context, sb *store.Sandbox) error { return nil } -func (m *mockStore) UpdateSandboxState(ctx context.Context, id string, newState store.SandboxState, ipAddr *string) error { - return nil -} -func (m *mockStore) DeleteSandbox(ctx context.Context, id string) error { return nil } -func (m *mockStore) ListExpiredSandboxes(ctx context.Context, defaultTTL time.Duration) ([]*store.Sandbox, error) { - return nil, nil -} - -func (m *mockStore) CreateSnapshot(ctx context.Context, sn *store.Snapshot) error { - return nil -} - -func (m *mockStore) GetSnapshot(ctx context.Context, id string) (*store.Snapshot, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) GetSnapshotByName(ctx context.Context, sandboxID, name string) (*store.Snapshot, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) ListSnapshots(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Snapshot, error) { - return nil, nil -} -func (m *mockStore) SaveCommand(ctx context.Context, cmd *store.Command) error { return nil } -func (m *mockStore) GetCommand(ctx context.Context, id string) (*store.Command, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) ListCommands(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Command, error) { - return nil, nil -} -func (m *mockStore) SaveDiff(ctx context.Context, d *store.Diff) error { return nil } -func (m *mockStore) GetDiff(ctx context.Context, id string) (*store.Diff, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) GetDiffBySnapshots(ctx context.Context, sandboxID, fromSnapshot, toSnapshot string) (*store.Diff, error) { - return nil, store.ErrNotFound -} -func (m *mockStore) CreateChangeSet(ctx context.Context, cs *store.ChangeSet) error { return nil } -func (m *mockStore) GetChangeSet(ctx context.Context, id string) (*store.ChangeSet, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) GetChangeSetByJob(ctx context.Context, jobID string) (*store.ChangeSet, error) { - return nil, store.ErrNotFound -} -func (m *mockStore) CreatePublication(ctx context.Context, p *store.Publication) error { return nil } -func (m *mockStore) UpdatePublicationStatus(ctx context.Context, id string, status store.PublicationStatus, commitSHA, prURL, errMsg *string) error { - return nil -} - -func (m *mockStore) GetPublication(ctx context.Context, id string) (*store.Publication, error) { - return nil, store.ErrNotFound -} - -func TestCreatePlaybook(t *testing.T) { - ms := newMockStore() - tmpDir := t.TempDir() - svc := NewPlaybookService(ms, tmpDir) - ctx := context.Background() - - pb, err := svc.CreatePlaybook(ctx, CreatePlaybookRequest{ - Name: "test-playbook", - Hosts: "all", - Become: true, - }) - - require.NoError(t, err) - assert.NotEmpty(t, pb.ID) - assert.Equal(t, "test-playbook", pb.Name) - assert.Equal(t, "all", pb.Hosts) - assert.True(t, pb.Become) -} - -func TestCreatePlaybookDuplicate(t *testing.T) { - ms := newMockStore() - tmpDir := t.TempDir() - svc := NewPlaybookService(ms, tmpDir) - ctx := context.Background() - - _, err := svc.CreatePlaybook(ctx, CreatePlaybookRequest{Name: "test-playbook", Hosts: "all"}) - require.NoError(t, err) - - _, err = svc.CreatePlaybook(ctx, CreatePlaybookRequest{Name: "test-playbook", Hosts: "all"}) - assert.ErrorIs(t, err, store.ErrAlreadyExists) -} - -func TestAddTask(t *testing.T) { - ms := newMockStore() - tmpDir := t.TempDir() - svc := NewPlaybookService(ms, tmpDir) - ctx := context.Background() - - pb, err := svc.CreatePlaybook(ctx, CreatePlaybookRequest{ - Name: "test-playbook", - Hosts: "all", - }) - require.NoError(t, err) - - task, err := svc.AddTask(ctx, pb.ID, AddTaskRequest{ - Name: "Install nginx", - Module: "apt", - Params: map[string]any{"name": "nginx", "state": "present"}, - }) - - require.NoError(t, err) - assert.NotEmpty(t, task.ID) - assert.Equal(t, "Install nginx", task.Name) - assert.Equal(t, "apt", task.Module) - assert.Equal(t, 0, task.Position) -} - -func TestAddMultipleTasks(t *testing.T) { - ms := newMockStore() - tmpDir := t.TempDir() - svc := NewPlaybookService(ms, tmpDir) - ctx := context.Background() - - pb, err := svc.CreatePlaybook(ctx, CreatePlaybookRequest{Name: "test-playbook", Hosts: "all"}) - require.NoError(t, err) - - task1, err := svc.AddTask(ctx, pb.ID, AddTaskRequest{Name: "Task 1", Module: "shell", Params: map[string]any{"cmd": "echo 1"}}) - require.NoError(t, err) - assert.Equal(t, 0, task1.Position) - - task2, err := svc.AddTask(ctx, pb.ID, AddTaskRequest{Name: "Task 2", Module: "shell", Params: map[string]any{"cmd": "echo 2"}}) - require.NoError(t, err) - assert.Equal(t, 1, task2.Position) - - task3, err := svc.AddTask(ctx, pb.ID, AddTaskRequest{Name: "Task 3", Module: "shell", Params: map[string]any{"cmd": "echo 3"}}) - require.NoError(t, err) - assert.Equal(t, 2, task3.Position) -} - -func TestRenderPlaybook(t *testing.T) { - ms := newMockStore() - tmpDir := t.TempDir() - svc := NewPlaybookService(ms, tmpDir) - ctx := context.Background() - - pb, err := svc.CreatePlaybook(ctx, CreatePlaybookRequest{ - Name: "nginx-setup", - Hosts: "webservers", - Become: true, - }) - require.NoError(t, err) - - _, err = svc.AddTask(ctx, pb.ID, AddTaskRequest{ - Name: "Install nginx", - Module: "apt", - Params: map[string]any{"name": "nginx", "state": "present"}, - }) - require.NoError(t, err) - - _, err = svc.AddTask(ctx, pb.ID, AddTaskRequest{ - Name: "Start nginx", - Module: "service", - Params: map[string]any{"name": "nginx", "state": "started"}, - }) - require.NoError(t, err) - - // Check file was created - filePath := filepath.Join(tmpDir, "nginx-setup.yml") - assert.FileExists(t, filePath) - - // Check content - content, err := os.ReadFile(filePath) - require.NoError(t, err) - - yamlStr := string(content) - assert.Contains(t, yamlStr, "name: nginx-setup") - assert.Contains(t, yamlStr, "hosts: webservers") - assert.Contains(t, yamlStr, "become: true") - assert.Contains(t, yamlStr, "Install nginx") - assert.Contains(t, yamlStr, "apt:") - assert.Contains(t, yamlStr, "Start nginx") - assert.Contains(t, yamlStr, "service:") -} - -func TestExportPlaybook(t *testing.T) { - ms := newMockStore() - tmpDir := t.TempDir() - svc := NewPlaybookService(ms, tmpDir) - ctx := context.Background() - - pb, err := svc.CreatePlaybook(ctx, CreatePlaybookRequest{ - Name: "test-export", - Hosts: "all", - Become: false, - }) - require.NoError(t, err) - - _, err = svc.AddTask(ctx, pb.ID, AddTaskRequest{ - Name: "Echo hello", - Module: "shell", - Params: map[string]any{"cmd": "echo hello"}, - }) - require.NoError(t, err) - - yaml, err := svc.ExportPlaybook(ctx, pb.ID) - require.NoError(t, err) - - yamlStr := string(yaml) - assert.Contains(t, yamlStr, "name: test-export") - assert.Contains(t, yamlStr, "hosts: all") - assert.Contains(t, yamlStr, "Echo hello") - assert.Contains(t, yamlStr, "shell:") -} - -func TestDeleteTask(t *testing.T) { - ms := newMockStore() - tmpDir := t.TempDir() - svc := NewPlaybookService(ms, tmpDir) - ctx := context.Background() - - pb, err := svc.CreatePlaybook(ctx, CreatePlaybookRequest{Name: "test-playbook", Hosts: "all"}) - require.NoError(t, err) - - task, err := svc.AddTask(ctx, pb.ID, AddTaskRequest{Name: "Task 1", Module: "shell", Params: map[string]any{"cmd": "echo 1"}}) - require.NoError(t, err) - - err = svc.DeleteTask(ctx, task.ID) - require.NoError(t, err) - - _, err = svc.GetTask(ctx, task.ID) - assert.ErrorIs(t, err, store.ErrNotFound) -} - -func TestDeletePlaybook(t *testing.T) { - ms := newMockStore() - tmpDir := t.TempDir() - svc := NewPlaybookService(ms, tmpDir) - ctx := context.Background() - - pb, err := svc.CreatePlaybook(ctx, CreatePlaybookRequest{Name: "test-playbook", Hosts: "all"}) - require.NoError(t, err) - - _, err = svc.AddTask(ctx, pb.ID, AddTaskRequest{Name: "Task 1", Module: "shell", Params: map[string]any{"cmd": "echo 1"}}) - require.NoError(t, err) - - err = svc.DeletePlaybook(ctx, pb.ID) - require.NoError(t, err) - - _, err = svc.GetPlaybook(ctx, pb.ID) - assert.ErrorIs(t, err, store.ErrNotFound) -} - -func TestReorderTasks(t *testing.T) { - ms := newMockStore() - tmpDir := t.TempDir() - svc := NewPlaybookService(ms, tmpDir) - ctx := context.Background() - - pb, err := svc.CreatePlaybook(ctx, CreatePlaybookRequest{Name: "test-playbook", Hosts: "all"}) - require.NoError(t, err) - - task1, _ := svc.AddTask(ctx, pb.ID, AddTaskRequest{Name: "Task 1", Module: "shell", Params: map[string]any{"cmd": "echo 1"}}) - task2, _ := svc.AddTask(ctx, pb.ID, AddTaskRequest{Name: "Task 2", Module: "shell", Params: map[string]any{"cmd": "echo 2"}}) - task3, _ := svc.AddTask(ctx, pb.ID, AddTaskRequest{Name: "Task 3", Module: "shell", Params: map[string]any{"cmd": "echo 3"}}) - - // Reorder: 3, 1, 2 - err = svc.ReorderTasks(ctx, pb.ID, []string{task3.ID, task1.ID, task2.ID}) - require.NoError(t, err) - - tasks, err := svc.ListTasks(ctx, pb.ID) - require.NoError(t, err) - - assert.Equal(t, "Task 3", tasks[0].Name) - assert.Equal(t, "Task 1", tasks[1].Name) - assert.Equal(t, "Task 2", tasks[2].Name) -} - -func TestUpdateTask(t *testing.T) { - ms := newMockStore() - tmpDir := t.TempDir() - svc := NewPlaybookService(ms, tmpDir) - ctx := context.Background() - - pb, err := svc.CreatePlaybook(ctx, CreatePlaybookRequest{Name: "test-playbook", Hosts: "all"}) - require.NoError(t, err) - - task, err := svc.AddTask(ctx, pb.ID, AddTaskRequest{ - Name: "Original name", - Module: "shell", - Params: map[string]any{"cmd": "echo original"}, - }) - require.NoError(t, err) - - newName := "Updated name" - newModule := "command" - updated, err := svc.UpdateTask(ctx, task.ID, UpdateTaskRequest{ - Name: &newName, - Module: &newModule, - Params: map[string]any{"cmd": "echo updated"}, - }) - require.NoError(t, err) - - assert.Equal(t, "Updated name", updated.Name) - assert.Equal(t, "command", updated.Module) - assert.Equal(t, "echo updated", updated.Params["cmd"]) -} diff --git a/fluid-remote/internal/config/config.go b/fluid-remote/internal/config/config.go deleted file mode 100644 index b7dce117..00000000 --- a/fluid-remote/internal/config/config.go +++ /dev/null @@ -1,351 +0,0 @@ -package config - -import ( - "fmt" - "os" - "time" - - "gopkg.in/yaml.v3" -) - -// Config is the root configuration for fluid-remote API. -type Config struct { - API APIConfig `yaml:"api"` - Database DatabaseConfig `yaml:"database"` - Libvirt LibvirtConfig `yaml:"libvirt"` - VM VMConfig `yaml:"vm"` - SSH SSHConfig `yaml:"ssh"` - Ansible AnsibleConfig `yaml:"ansible"` - Logging LoggingConfig `yaml:"logging"` - Telemetry TelemetryConfig `yaml:"telemetry"` - Janitor JanitorConfig `yaml:"janitor"` // Background cleanup of expired sandboxes - Hosts []HostConfig `yaml:"hosts"` // Remote libvirt hosts for multi-host VM listing -} - -// APIConfig holds HTTP server settings. -type APIConfig struct { - Addr string `yaml:"addr"` - ReadTimeout time.Duration `yaml:"read_timeout"` - WriteTimeout time.Duration `yaml:"write_timeout"` - IdleTimeout time.Duration `yaml:"idle_timeout"` - ShutdownTimeout time.Duration `yaml:"shutdown_timeout"` -} - -// DatabaseConfig holds PostgreSQL connection settings. -type DatabaseConfig struct { - URL string `yaml:"url"` - MaxOpenConns int `yaml:"max_open_conns"` - MaxIdleConns int `yaml:"max_idle_conns"` - ConnMaxLifetime time.Duration `yaml:"conn_max_lifetime"` - AutoMigrate bool `yaml:"auto_migrate"` -} - -// TelemetryConfig holds telemetry settings. -type TelemetryConfig struct { - EnableAnonymousUsage bool `yaml:"enable_anonymous_usage"` - APIKey string `yaml:"api_key"` - Endpoint string `yaml:"endpoint"` -} - -// LibvirtConfig holds libvirt/KVM settings. -type LibvirtConfig struct { - URI string `yaml:"uri"` - Network string `yaml:"network"` - BaseImageDir string `yaml:"base_image_dir"` - WorkDir string `yaml:"work_dir"` - SSHKeyInjectMethod string `yaml:"ssh_key_inject_method"` - SocketVMNetWrapper string `yaml:"socket_vmnet_wrapper"` -} - -// VMConfig holds VM default settings. -type VMConfig struct { - DefaultVCPUs int `yaml:"default_vcpus"` - DefaultMemoryMB int `yaml:"default_memory_mb"` - CommandTimeout time.Duration `yaml:"command_timeout"` - IPDiscoveryTimeout time.Duration `yaml:"ip_discovery_timeout"` -} - -// SSHConfig holds SSH CA and key management settings. -type SSHConfig struct { - ProxyJump string `yaml:"proxy_jump"` - CAKeyPath string `yaml:"ca_key_path"` - CAPubPath string `yaml:"ca_pub_path"` - KeyDir string `yaml:"key_dir"` - CertTTL time.Duration `yaml:"cert_ttl"` - MaxTTL time.Duration `yaml:"max_ttl"` - WorkDir string `yaml:"work_dir"` - DefaultUser string `yaml:"default_user"` -} - -// AnsibleConfig holds Ansible runner settings. -type AnsibleConfig struct { - InventoryPath string `yaml:"inventory_path"` - PlaybooksDir string `yaml:"playbooks_dir"` - Image string `yaml:"image"` - AllowedPlaybooks []string `yaml:"allowed_playbooks"` -} - -// LoggingConfig holds logging settings. -type LoggingConfig struct { - Level string `yaml:"level"` - Format string `yaml:"format"` -} - -// JanitorConfig holds settings for the background sandbox cleanup service. -type JanitorConfig struct { - // Enabled controls whether the janitor runs. Default: true - Enabled bool `yaml:"enabled"` - // Interval is how often the janitor checks for expired sandboxes. Default: 1m - Interval time.Duration `yaml:"interval"` - // DefaultTTL is the default TTL for sandboxes that don't specify one. Default: 0 (no auto-cleanup) - // Sandboxes with TTLSeconds set will use their own TTL instead. - DefaultTTL time.Duration `yaml:"default_ttl"` -} - -// HostConfig represents a remote libvirt host for multi-host VM management. -// Authentication uses system SSH config (~/.ssh/config and ssh-agent). -type HostConfig struct { - Name string `yaml:"name"` // Display name (e.g., "kvm-01") - Address string `yaml:"address"` // IP or hostname - SSHUser string `yaml:"ssh_user"` // SSH user (default: root) - SSHPort int `yaml:"ssh_port"` // SSH port (default: 22) - QueryTimeout time.Duration `yaml:"query_timeout"` // Per-host query timeout (default: 30s) -} - -// DefaultConfig returns config with sensible defaults. -func DefaultConfig() *Config { - return &Config{ - API: APIConfig{ - Addr: ":8080", - ReadTimeout: 60 * time.Second, - WriteTimeout: 120 * time.Second, - IdleTimeout: 120 * time.Second, - ShutdownTimeout: 20 * time.Second, - }, - Database: DatabaseConfig{ - URL: "postgresql://virsh_sandbox:virsh_sandbox@postgres:5432/virsh_sandbox", - MaxOpenConns: 16, - MaxIdleConns: 8, - ConnMaxLifetime: time.Hour, - AutoMigrate: true, - }, - Telemetry: TelemetryConfig{ - EnableAnonymousUsage: true, - }, - Libvirt: LibvirtConfig{ - URI: "qemu:///system", - Network: "default", - BaseImageDir: "/var/lib/libvirt/images/base", - WorkDir: "/var/lib/libvirt/images/jobs", - SSHKeyInjectMethod: "virt-customize", - }, - VM: VMConfig{ - DefaultVCPUs: 2, - DefaultMemoryMB: 2048, - CommandTimeout: 10 * time.Minute, - IPDiscoveryTimeout: 2 * time.Minute, - }, - SSH: SSHConfig{ - CAKeyPath: "/etc/fluid-remote/ssh_ca", - CAPubPath: "/etc/fluid-remote/ssh_ca.pub", - KeyDir: "/tmp/sandbox-keys", - CertTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - WorkDir: "/tmp/sshca", - DefaultUser: "sandbox", - }, - Ansible: AnsibleConfig{ - InventoryPath: "./.ansible/inventory", - PlaybooksDir: "./.ansible/playbooks", - Image: "ansible-sandbox", - AllowedPlaybooks: []string{"ping.yml"}, - }, - Logging: LoggingConfig{ - Level: "info", - Format: "text", - }, - Janitor: JanitorConfig{ - Enabled: true, - Interval: 1 * time.Minute, - DefaultTTL: 0, // No auto-cleanup by default; sandboxes must set TTLSeconds - }, - } -} - -// Load reads config from a YAML file. If the file doesn't exist, returns default config. -// Environment variables can override config values - they take precedence. -func Load(path string) (*Config, error) { - cfg := DefaultConfig() - - data, err := os.ReadFile(path) - if err != nil { - if os.IsNotExist(err) { - // No config file - use defaults - return cfg, nil - } - return nil, fmt.Errorf("reading config file: %w", err) - } - - if err := yaml.Unmarshal(data, cfg); err != nil { - return nil, fmt.Errorf("parsing config file: %w", err) - } - - return cfg, nil -} - -// LoadWithEnvOverride loads config from YAML and allows env vars to override. -// Env vars use the pattern: VIRSH_SANDBOX_
_ (uppercase, underscores). -func LoadWithEnvOverride(path string) (*Config, error) { - cfg, err := Load(path) - if err != nil { - return nil, err - } - - // Apply environment variable overrides - applyEnvOverrides(cfg) - - return cfg, nil -} - -// applyEnvOverrides applies environment variable overrides to config. -// This allows backward compatibility with existing env var usage. -func applyEnvOverrides(cfg *Config) { - // API - if v := os.Getenv("API_HTTP_ADDR"); v != "" { - cfg.API.Addr = v - } - if v := os.Getenv("API_SHUTDOWN_TIMEOUT_SEC"); v != "" { - if d := parseDuration(v); d > 0 { - cfg.API.ShutdownTimeout = d - } - } - - // Database - if v := os.Getenv("DATABASE_URL"); v != "" { - cfg.Database.URL = v - } - - // Telemetry - if v := os.Getenv("ENABLE_ANONYMOUS_USAGE"); v != "" { - cfg.Telemetry.EnableAnonymousUsage = v == "true" - } - if v := os.Getenv("TELEMETRY_API_KEY"); v != "" { - cfg.Telemetry.APIKey = v - } - if v := os.Getenv("TELEMETRY_ENDPOINT"); v != "" { - cfg.Telemetry.Endpoint = v - } - - // Libvirt - if v := os.Getenv("LIBVIRT_URI"); v != "" { - cfg.Libvirt.URI = v - } - if v := os.Getenv("LIBVIRT_NETWORK"); v != "" { - cfg.Libvirt.Network = v - } - if v := os.Getenv("BASE_IMAGE_DIR"); v != "" { - cfg.Libvirt.BaseImageDir = v - } - if v := os.Getenv("SANDBOX_WORKDIR"); v != "" { - cfg.Libvirt.WorkDir = v - } - if v := os.Getenv("SSH_KEY_INJECT_METHOD"); v != "" { - cfg.Libvirt.SSHKeyInjectMethod = v - } - if v := os.Getenv("SOCKET_VMNET_WRAPPER"); v != "" { - cfg.Libvirt.SocketVMNetWrapper = v - } - - // VM - if v := os.Getenv("DEFAULT_VCPUS"); v != "" { - if i := atoi(v); i > 0 { - cfg.VM.DefaultVCPUs = i - } - } - if v := os.Getenv("DEFAULT_MEMORY_MB"); v != "" { - if i := atoi(v); i > 0 { - cfg.VM.DefaultMemoryMB = i - } - } - if v := os.Getenv("COMMAND_TIMEOUT_SEC"); v != "" { - if d := parseDuration(v); d > 0 { - cfg.VM.CommandTimeout = d - } - } - if v := os.Getenv("IP_DISCOVERY_TIMEOUT_SEC"); v != "" { - if d := parseDuration(v); d > 0 { - cfg.VM.IPDiscoveryTimeout = d - } - } - - // SSH - if v := os.Getenv("SSH_PROXY_JUMP"); v != "" { - cfg.SSH.ProxyJump = v - } - if v := os.Getenv("SSH_CA_KEY_PATH"); v != "" { - cfg.SSH.CAKeyPath = v - } - if v := os.Getenv("SSH_CA_PUB_KEY_PATH"); v != "" { - cfg.SSH.CAPubPath = v - } - if v := os.Getenv("SSH_KEY_DIR"); v != "" { - cfg.SSH.KeyDir = v - } - if v := os.Getenv("SSH_CERT_TTL_SEC"); v != "" { - if d := parseDuration(v); d > 0 { - cfg.SSH.CertTTL = d - } - } - - // Ansible - if v := os.Getenv("ANSIBLE_INVENTORY_PATH"); v != "" { - cfg.Ansible.InventoryPath = v - } - if v := os.Getenv("ANSIBLE_PLAYBOOKS_DIR"); v != "" { - cfg.Ansible.PlaybooksDir = v - } - if v := os.Getenv("ANSIBLE_IMAGE"); v != "" { - cfg.Ansible.Image = v - } - - // Logging - if v := os.Getenv("LOG_LEVEL"); v != "" { - cfg.Logging.Level = v - } - if v := os.Getenv("LOG_FORMAT"); v != "" { - cfg.Logging.Format = v - } - - // Janitor - if v := os.Getenv("JANITOR_ENABLED"); v != "" { - cfg.Janitor.Enabled = v == "true" || v == "1" - } - if v := os.Getenv("JANITOR_INTERVAL"); v != "" { - if d := parseDuration(v); d > 0 { - cfg.Janitor.Interval = d - } - } - if v := os.Getenv("SANDBOX_DEFAULT_TTL"); v != "" { - if d := parseDuration(v); d > 0 { - cfg.Janitor.DefaultTTL = d - } - } -} - -func atoi(s string) int { - var i int - _, _ = fmt.Sscanf(s, "%d", &i) - return i -} - -func parseDuration(s string) time.Duration { - // Try Go duration format first - if d, err := time.ParseDuration(s); err == nil { - return d - } - // Fall back to seconds - if sec := atoi(s); sec > 0 { - return time.Duration(sec) * time.Second - } - return 0 -} diff --git a/fluid-remote/internal/config/config_test.go b/fluid-remote/internal/config/config_test.go deleted file mode 100644 index 3abc4454..00000000 --- a/fluid-remote/internal/config/config_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package config - -import ( - "os" - "path/filepath" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestDefaultConfig(t *testing.T) { - cfg := DefaultConfig() - - assert.Equal(t, ":8080", cfg.API.Addr) - assert.Equal(t, 60*time.Second, cfg.API.ReadTimeout) - assert.Equal(t, 20*time.Second, cfg.API.ShutdownTimeout) - assert.Equal(t, 2, cfg.VM.DefaultVCPUs) - assert.Equal(t, 2048, cfg.VM.DefaultMemoryMB) - assert.Equal(t, "qemu:///system", cfg.Libvirt.URI) - assert.Equal(t, "info", cfg.Logging.Level) -} - -func TestLoad_NonExistentFile(t *testing.T) { - cfg, err := Load("/nonexistent/config.yaml") - require.NoError(t, err) - assert.Equal(t, DefaultConfig(), cfg) -} - -func TestLoad_ValidYAML(t *testing.T) { - tmpDir := t.TempDir() - configPath := filepath.Join(tmpDir, "config.yaml") - - yaml := ` -api: - addr: ":9090" - read_timeout: 30s - -database: - url: "postgresql://test:test@localhost:5432/test" - max_open_conns: 32 - -vm: - default_vcpus: 4 - default_memory_mb: 4096 - command_timeout: 5m - -logging: - level: "debug" - format: "json" -` - err := os.WriteFile(configPath, []byte(yaml), 0o644) - require.NoError(t, err) - - cfg, err := Load(configPath) - require.NoError(t, err) - - assert.Equal(t, ":9090", cfg.API.Addr) - assert.Equal(t, 30*time.Second, cfg.API.ReadTimeout) - assert.Equal(t, "postgresql://test:test@localhost:5432/test", cfg.Database.URL) - assert.Equal(t, 32, cfg.Database.MaxOpenConns) - assert.Equal(t, 4, cfg.VM.DefaultVCPUs) - assert.Equal(t, 4096, cfg.VM.DefaultMemoryMB) - assert.Equal(t, 5*time.Minute, cfg.VM.CommandTimeout) - assert.Equal(t, "debug", cfg.Logging.Level) - assert.Equal(t, "json", cfg.Logging.Format) -} - -func TestLoad_PartialYAML(t *testing.T) { - tmpDir := t.TempDir() - configPath := filepath.Join(tmpDir, "config.yaml") - - // Only override some values - defaults should fill the rest - yaml := ` -api: - addr: ":3000" -logging: - level: "warn" -` - err := os.WriteFile(configPath, []byte(yaml), 0o644) - require.NoError(t, err) - - cfg, err := Load(configPath) - require.NoError(t, err) - - // Overridden values - assert.Equal(t, ":3000", cfg.API.Addr) - assert.Equal(t, "warn", cfg.Logging.Level) - - // Default values preserved - assert.Equal(t, 60*time.Second, cfg.API.ReadTimeout) - assert.Equal(t, 2, cfg.VM.DefaultVCPUs) - assert.Equal(t, "qemu:///system", cfg.Libvirt.URI) -} - -func TestLoad_InvalidYAML(t *testing.T) { - tmpDir := t.TempDir() - configPath := filepath.Join(tmpDir, "config.yaml") - - err := os.WriteFile(configPath, []byte("invalid: yaml: content:"), 0o644) - require.NoError(t, err) - - _, err = Load(configPath) - assert.Error(t, err) -} - -func TestLoadWithEnvOverride(t *testing.T) { - tmpDir := t.TempDir() - configPath := filepath.Join(tmpDir, "config.yaml") - - yaml := ` -api: - addr: ":8080" -database: - url: "postgresql://yaml:yaml@localhost/yaml" -` - err := os.WriteFile(configPath, []byte(yaml), 0o644) - require.NoError(t, err) - - // Set env vars to override - t.Setenv("API_HTTP_ADDR", ":9999") - t.Setenv("DATABASE_URL", "postgresql://env:env@localhost/env") - t.Setenv("DEFAULT_VCPUS", "8") - - cfg, err := LoadWithEnvOverride(configPath) - require.NoError(t, err) - - // Env vars should override YAML - assert.Equal(t, ":9999", cfg.API.Addr) - assert.Equal(t, "postgresql://env:env@localhost/env", cfg.Database.URL) - assert.Equal(t, 8, cfg.VM.DefaultVCPUs) -} - -func TestApplyEnvOverrides_AllFields(t *testing.T) { - cfg := DefaultConfig() - - t.Setenv("API_HTTP_ADDR", ":7777") - t.Setenv("API_SHUTDOWN_TIMEOUT_SEC", "30") - t.Setenv("DATABASE_URL", "postgresql://test/test") - t.Setenv("LIBVIRT_URI", "qemu:///session") - t.Setenv("LIBVIRT_NETWORK", "custom-net") - t.Setenv("BASE_IMAGE_DIR", "/custom/base") - t.Setenv("SANDBOX_WORKDIR", "/custom/work") - t.Setenv("DEFAULT_VCPUS", "16") - t.Setenv("DEFAULT_MEMORY_MB", "8192") - t.Setenv("COMMAND_TIMEOUT_SEC", "300") - t.Setenv("IP_DISCOVERY_TIMEOUT_SEC", "60") - t.Setenv("SSH_PROXY_JUMP", "jump@host") - t.Setenv("SSH_CA_KEY_PATH", "/custom/ca") - t.Setenv("SSH_KEY_DIR", "/custom/keys") - t.Setenv("SSH_CERT_TTL_SEC", "600") - t.Setenv("ANSIBLE_INVENTORY_PATH", "/custom/inventory") - t.Setenv("ANSIBLE_PLAYBOOKS_DIR", "/custom/playbooks") - t.Setenv("ANSIBLE_IMAGE", "custom-ansible") - t.Setenv("LOG_LEVEL", "debug") - t.Setenv("LOG_FORMAT", "json") - - applyEnvOverrides(cfg) - - assert.Equal(t, ":7777", cfg.API.Addr) - assert.Equal(t, 30*time.Second, cfg.API.ShutdownTimeout) - assert.Equal(t, "postgresql://test/test", cfg.Database.URL) - assert.Equal(t, "qemu:///session", cfg.Libvirt.URI) - assert.Equal(t, "custom-net", cfg.Libvirt.Network) - assert.Equal(t, "/custom/base", cfg.Libvirt.BaseImageDir) - assert.Equal(t, "/custom/work", cfg.Libvirt.WorkDir) - assert.Equal(t, 16, cfg.VM.DefaultVCPUs) - assert.Equal(t, 8192, cfg.VM.DefaultMemoryMB) - assert.Equal(t, 5*time.Minute, cfg.VM.CommandTimeout) - assert.Equal(t, time.Minute, cfg.VM.IPDiscoveryTimeout) - assert.Equal(t, "jump@host", cfg.SSH.ProxyJump) - assert.Equal(t, "/custom/ca", cfg.SSH.CAKeyPath) - assert.Equal(t, "/custom/keys", cfg.SSH.KeyDir) - assert.Equal(t, 10*time.Minute, cfg.SSH.CertTTL) - assert.Equal(t, "/custom/inventory", cfg.Ansible.InventoryPath) - assert.Equal(t, "/custom/playbooks", cfg.Ansible.PlaybooksDir) - assert.Equal(t, "custom-ansible", cfg.Ansible.Image) - assert.Equal(t, "debug", cfg.Logging.Level) - assert.Equal(t, "json", cfg.Logging.Format) -} - -func TestParseDuration(t *testing.T) { - tests := []struct { - input string - expected time.Duration - }{ - {"60", 60 * time.Second}, - {"300", 5 * time.Minute}, - {"5m", 5 * time.Minute}, - {"1h", time.Hour}, - {"30s", 30 * time.Second}, - {"", 0}, - {"invalid", 0}, - } - - for _, tt := range tests { - t.Run(tt.input, func(t *testing.T) { - result := parseDuration(tt.input) - assert.Equal(t, tt.expected, result) - }) - } -} diff --git a/fluid-remote/internal/error/responderror.go b/fluid-remote/internal/error/responderror.go deleted file mode 100755 index 727a4a8f..00000000 --- a/fluid-remote/internal/error/responderror.go +++ /dev/null @@ -1,21 +0,0 @@ -package error - -import ( - "net/http" - - serverJSON "github.com/aspectrr/fluid.sh/fluid-remote/internal/json" -) - -type ErrorResponse struct { - Error string `json:"error"` - Code int `json:"code"` - Details string `json:"details,omitempty"` -} - -func RespondError(w http.ResponseWriter, status int, err error) { - _ = serverJSON.RespondJSON(w, status, ErrorResponse{ - Error: err.Error(), - Code: status, - // details intentionally omitted to avoid leaking internals; add as needed - }) -} diff --git a/fluid-remote/internal/extract/archive.go b/fluid-remote/internal/extract/archive.go deleted file mode 100755 index 35c17e2f..00000000 --- a/fluid-remote/internal/extract/archive.go +++ /dev/null @@ -1,247 +0,0 @@ -package extract - -import ( - "bytes" - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - "time" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/workflow" -) - -// Archiver handles creation of root filesystem archives. -type Archiver struct { - // tarPath is the path to the tar binary. - tarPath string -} - -// ArchiverConfig configures the archiver. -type ArchiverConfig struct { - // TarPath is the path to the tar binary. - // If empty, "tar" is looked up in PATH. - TarPath string -} - -// NewArchiver creates a new Archiver with the given configuration. -func NewArchiver(cfg ArchiverConfig) *Archiver { - tarPath := cfg.TarPath - if tarPath == "" { - tarPath = "tar" - } - return &Archiver{ - tarPath: tarPath, - } -} - -// ArchiveResult contains the result of creating an archive. -type ArchiveResult struct { - // ArchivePath is the path to the created tar archive. - ArchivePath string - - // Size is the size of the archive in bytes. - Size int64 - - // Cleanup is a function to remove the archive. - Cleanup workflow.CleanupFunc -} - -// CreateRootFSArchive creates a tar archive of the sanitized root filesystem. -// The archive preserves numeric ownership and extended attributes. -func (a *Archiver) CreateRootFSArchive(ctx context.Context, sourcePath string, workDir string) (*ArchiveResult, error) { - // Generate archive filename with timestamp - timestamp := time.Now().UTC().Format("20060102T150405Z") - archiveName := fmt.Sprintf("rootfs-%s.tar", timestamp) - archivePath := filepath.Join(workDir, archiveName) - - // Create the archive - if err := a.createTarArchive(ctx, sourcePath, archivePath); err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageCreateArchive, - workflow.ErrArchiveFailed, - fmt.Sprintf("failed to create archive: %v", err), - ) - } - - // Get archive size - info, err := os.Stat(archivePath) - if err != nil { - _ = os.Remove(archivePath) - return nil, workflow.NewWorkflowError( - workflow.StageCreateArchive, - workflow.ErrArchiveFailed, - fmt.Sprintf("failed to stat archive: %v", err), - ) - } - - return &ArchiveResult{ - ArchivePath: archivePath, - Size: info.Size(), - Cleanup: func() error { - return os.Remove(archivePath) - }, - }, nil -} - -// createTarArchive creates a tar archive of the source directory. -func (a *Archiver) createTarArchive(ctx context.Context, sourcePath, archivePath string) error { - // Build tar command with options: - // -c: create archive - // -f: output file - // --numeric-owner: preserve numeric UID/GID (important for container images) - // --xattrs: preserve extended attributes - // --xattrs-include=*: include all xattrs - // --acls: preserve ACLs (if supported) - // --selinux: preserve SELinux contexts (if applicable) - // -C: change to directory before archiving - // .: archive current directory contents - - args := []string{ - "-cf", archivePath, - "--numeric-owner", - } - - // Check if tar supports xattrs - if a.supportsXattrs(ctx) { - args = append(args, "--xattrs", "--xattrs-include=*") - } - - // Check if tar supports ACLs - if a.supportsACLs(ctx) { - args = append(args, "--acls") - } - - // Check if tar supports SELinux - if a.supportsSELinux(ctx) { - args = append(args, "--selinux") - } - - // Add source directory - args = append(args, "-C", sourcePath, ".") - - cmd := exec.CommandContext(ctx, a.tarPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("tar failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// supportsXattrs checks if tar supports --xattrs option. -func (a *Archiver) supportsXattrs(ctx context.Context) bool { - cmd := exec.CommandContext(ctx, a.tarPath, "--xattrs", "--help") - return cmd.Run() == nil -} - -// supportsACLs checks if tar supports --acls option. -func (a *Archiver) supportsACLs(ctx context.Context) bool { - cmd := exec.CommandContext(ctx, a.tarPath, "--acls", "--help") - return cmd.Run() == nil -} - -// supportsSELinux checks if tar supports --selinux option. -func (a *Archiver) supportsSELinux(ctx context.Context) bool { - cmd := exec.CommandContext(ctx, a.tarPath, "--selinux", "--help") - return cmd.Run() == nil -} - -// ExtractArchive extracts a tar archive to the specified destination. -// This is useful for testing or container image import operations. -func (a *Archiver) ExtractArchive(ctx context.Context, archivePath, destPath string) error { - // Ensure destination exists - if err := os.MkdirAll(destPath, 0o755); err != nil { - return fmt.Errorf("failed to create destination directory: %w", err) - } - - args := []string{ - "-xf", archivePath, - "--numeric-owner", - "-C", destPath, - } - - // Add xattrs support if available - if a.supportsXattrs(ctx) { - args = append(args[:2], append([]string{"--xattrs", "--xattrs-include=*"}, args[2:]...)...) - } - - cmd := exec.CommandContext(ctx, a.tarPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("tar extraction failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// GetArchiveSize returns the size of an archive file. -func (a *Archiver) GetArchiveSize(archivePath string) (int64, error) { - info, err := os.Stat(archivePath) - if err != nil { - return 0, err - } - return info.Size(), nil -} - -// ListArchiveContents lists the contents of an archive. -// This is useful for verification and debugging. -func (a *Archiver) ListArchiveContents(ctx context.Context, archivePath string) ([]string, error) { - args := []string{"-tf", archivePath} - - cmd := exec.CommandContext(ctx, a.tarPath, args...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("tar list failed: %w: %s", err, stderr.String()) - } - - // Parse output into list of files - output := stdout.String() - if output == "" { - return []string{}, nil - } - - lines := bytes.Split(stdout.Bytes(), []byte("\n")) - files := make([]string, 0, len(lines)) - for _, line := range lines { - if len(line) > 0 { - files = append(files, string(line)) - } - } - - return files, nil -} - -// VerifyArchive performs basic verification of an archive. -func (a *Archiver) VerifyArchive(ctx context.Context, archivePath string) error { - // Check file exists - info, err := os.Stat(archivePath) - if err != nil { - return fmt.Errorf("archive not found: %w", err) - } - - // Check file is not empty - if info.Size() == 0 { - return fmt.Errorf("archive is empty") - } - - // Try to list contents to verify integrity - args := []string{"-tf", archivePath} - cmd := exec.CommandContext(ctx, a.tarPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("archive verification failed: %w: %s", err, stderr.String()) - } - - return nil -} diff --git a/fluid-remote/internal/extract/mount.go b/fluid-remote/internal/extract/mount.go deleted file mode 100755 index ca90abbd..00000000 --- a/fluid-remote/internal/extract/mount.go +++ /dev/null @@ -1,443 +0,0 @@ -package extract - -import ( - "bytes" - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/workflow" -) - -// MountManager handles qemu-nbd attachment and filesystem mounting. -type MountManager struct { - // qemuNbdPath is the path to the qemu-nbd binary. - qemuNbdPath string - - // nbdDeviceMu protects NBD device allocation. - nbdDeviceMu sync.Mutex - - // usedNbdDevices tracks which NBD devices are currently in use. - usedNbdDevices map[string]bool -} - -// MountConfig configures the mount manager. -type MountConfig struct { - // QemuNbdPath is the path to the qemu-nbd binary. - // If empty, "qemu-nbd" is looked up in PATH. - QemuNbdPath string -} - -// MountResult contains the result of mounting a disk image. -type MountResult struct { - // NBDDevice is the /dev/nbdX device the image is attached to. - NBDDevice string - - // Partition is the partition device (e.g., /dev/nbd0p1). - Partition string - - // MountPoint is the path where the filesystem is mounted. - MountPoint string - - // Cleanup is a function that unmounts and disconnects everything. - Cleanup workflow.CleanupFunc -} - -// NewMountManager creates a new MountManager with the given configuration. -func NewMountManager(cfg MountConfig) *MountManager { - qemuNbdPath := cfg.QemuNbdPath - if qemuNbdPath == "" { - qemuNbdPath = "qemu-nbd" - } - return &MountManager{ - qemuNbdPath: qemuNbdPath, - usedNbdDevices: make(map[string]bool), - } -} - -// MountDisk attaches a disk image via qemu-nbd and mounts the root filesystem. -// The returned MountResult contains a cleanup function that must be called -// to unmount and disconnect the NBD device. -func (m *MountManager) MountDisk(ctx context.Context, diskPath string, workDir string) (*MountResult, error) { - // Verify disk exists - if _, err := os.Stat(diskPath); err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrMountFailed, - fmt.Sprintf("disk image not found: %s", diskPath), - ) - } - - // Find an available NBD device - nbdDevice, err := m.findAvailableNBDDevice() - if err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrNBDAttachFailed, - fmt.Sprintf("no available NBD device: %v", err), - ) - } - - result := &MountResult{ - NBDDevice: nbdDevice, - } - - // Track cleanup steps for rollback - cleanups := workflow.NewCleanupStack() - - // Attach the disk to NBD device - if err := m.attachNBD(ctx, diskPath, nbdDevice); err != nil { - m.releaseNBDDevice(nbdDevice) - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrNBDAttachFailed, - fmt.Sprintf("failed to attach %s to %s: %v", diskPath, nbdDevice, err), - ) - } - cleanups.Push(func() error { - return m.detachNBD(context.Background(), nbdDevice) - }) - - // Run partprobe to detect partitions - if err := m.runPartprobe(ctx, nbdDevice); err != nil { - _ = cleanups.ExecuteAll() - m.releaseNBDDevice(nbdDevice) - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrMountFailed, - fmt.Sprintf("partprobe failed: %v", err), - ) - } - - // Find the root partition - partition, err := m.findRootPartition(ctx, nbdDevice) - if err != nil { - _ = cleanups.ExecuteAll() - m.releaseNBDDevice(nbdDevice) - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrMountFailed, - fmt.Sprintf("failed to find root partition: %v", err), - ) - } - result.Partition = partition - - // Create mount point - mountPoint := filepath.Join(workDir, "rootfs") - if err := os.MkdirAll(mountPoint, 0o755); err != nil { - _ = cleanups.ExecuteAll() - m.releaseNBDDevice(nbdDevice) - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrMountFailed, - fmt.Sprintf("failed to create mount point: %v", err), - ) - } - cleanups.Push(func() error { - return os.RemoveAll(mountPoint) - }) - - // Mount the partition read-only - if err := m.mountPartition(ctx, partition, mountPoint); err != nil { - _ = cleanups.ExecuteAll() - m.releaseNBDDevice(nbdDevice) - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrMountFailed, - fmt.Sprintf("failed to mount %s at %s: %v", partition, mountPoint, err), - ) - } - result.MountPoint = mountPoint - - // Build the final cleanup function that does everything in reverse order - result.Cleanup = func() error { - var errs []error - - // Unmount filesystem - if err := m.unmount(context.Background(), mountPoint); err != nil { - errs = append(errs, fmt.Errorf("unmount %s: %w", mountPoint, err)) - } - - // Remove mount point directory - if err := os.RemoveAll(mountPoint); err != nil { - errs = append(errs, fmt.Errorf("remove mount point: %w", err)) - } - - // Detach NBD device - if err := m.detachNBD(context.Background(), nbdDevice); err != nil { - errs = append(errs, fmt.Errorf("detach NBD: %w", err)) - } - - // Release the NBD device for reuse - m.releaseNBDDevice(nbdDevice) - - if len(errs) > 0 { - return fmt.Errorf("cleanup errors: %v", errs) - } - return nil - } - - return result, nil -} - -// findAvailableNBDDevice finds an available /dev/nbdX device. -func (m *MountManager) findAvailableNBDDevice() (string, error) { - m.nbdDeviceMu.Lock() - defer m.nbdDeviceMu.Unlock() - - // Check for nbd module - if _, err := os.Stat("/sys/module/nbd"); os.IsNotExist(err) { - return "", fmt.Errorf("nbd kernel module not loaded; run 'modprobe nbd max_part=16'") - } - - // Try to find an available NBD device (typically nbd0 through nbd15) - for i := 0; i < 16; i++ { - device := fmt.Sprintf("/dev/nbd%d", i) - - // Skip if we're already using it - if m.usedNbdDevices[device] { - continue - } - - // Check if device exists - if _, err := os.Stat(device); os.IsNotExist(err) { - continue - } - - // Check if device is in use by examining its size - sizePath := fmt.Sprintf("/sys/block/nbd%d/size", i) - data, err := os.ReadFile(sizePath) - if err != nil { - continue - } - - size, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) - if err != nil { - continue - } - - // Size of 0 means the device is not in use - if size == 0 { - m.usedNbdDevices[device] = true - return device, nil - } - } - - return "", fmt.Errorf("all NBD devices are in use") -} - -// releaseNBDDevice marks an NBD device as available for reuse. -func (m *MountManager) releaseNBDDevice(device string) { - m.nbdDeviceMu.Lock() - defer m.nbdDeviceMu.Unlock() - delete(m.usedNbdDevices, device) -} - -// attachNBD attaches a disk image to an NBD device using qemu-nbd. -func (m *MountManager) attachNBD(ctx context.Context, diskPath, nbdDevice string) error { - // Connect the image to the NBD device - // --read-only for safety, --connect to specify the device - args := []string{ - "--read-only", - "--connect", nbdDevice, - "--format", "qcow2", - diskPath, - } - - cmd := exec.CommandContext(ctx, m.qemuNbdPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("qemu-nbd failed: %w: %s", err, stderr.String()) - } - - // Wait a bit for the device to be ready - time.Sleep(500 * time.Millisecond) - - return nil -} - -// detachNBD disconnects an NBD device. -func (m *MountManager) detachNBD(ctx context.Context, nbdDevice string) error { - args := []string{"--disconnect", nbdDevice} - - cmd := exec.CommandContext(ctx, m.qemuNbdPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("qemu-nbd disconnect failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// runPartprobe runs partprobe to detect partitions on the NBD device. -func (m *MountManager) runPartprobe(ctx context.Context, nbdDevice string) error { - cmd := exec.CommandContext(ctx, "partprobe", nbdDevice) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("partprobe failed: %w: %s", err, stderr.String()) - } - - // Wait for partition devices to appear - time.Sleep(500 * time.Millisecond) - - return nil -} - -// findRootPartition attempts to find the root partition on the NBD device. -// It looks for common partition layouts and returns the likely root partition. -func (m *MountManager) findRootPartition(ctx context.Context, nbdDevice string) (string, error) { - // Get the device name without /dev/ prefix - devName := filepath.Base(nbdDevice) - - // Check for partitions in /sys/block// - sysPath := fmt.Sprintf("/sys/block/%s", devName) - - entries, err := os.ReadDir(sysPath) - if err != nil { - return "", fmt.Errorf("failed to read %s: %w", sysPath, err) - } - - var partitions []string - for _, entry := range entries { - name := entry.Name() - // Partition entries start with the device name - if strings.HasPrefix(name, devName+"p") { - partitions = append(partitions, "/dev/"+name) - } - } - - if len(partitions) == 0 { - // No partitions found, might be a whole-disk filesystem - // Try to mount the device directly - return nbdDevice, nil - } - - // Sort partitions and try to find the root partition - // Typically: - // - p1 is often /boot or EFI on modern systems - // - p2 or p3 is often root - // We'll try to identify by checking for common root filesystem indicators - - for _, partition := range partitions { - // Use blkid to check filesystem type - cmd := exec.CommandContext(ctx, "blkid", "-o", "value", "-s", "TYPE", partition) - output, err := cmd.Output() - if err != nil { - continue - } - - fsType := strings.TrimSpace(string(output)) - // Look for ext4, xfs, btrfs which are common root filesystems - if fsType == "ext4" || fsType == "xfs" || fsType == "btrfs" || fsType == "ext3" { - // This is likely the root partition - // We could do more checks (mount and look for /etc, /bin, etc.) - // but for now we'll use the first Linux filesystem we find - // that isn't obviously a boot partition - if !m.isBootPartition(ctx, partition) { - return partition, nil - } - } - } - - // If we couldn't find a definitive root, try the largest partition - if len(partitions) > 0 { - largest := partitions[0] - var largestSize int64 - - for _, partition := range partitions { - partName := filepath.Base(partition) - sizePath := fmt.Sprintf("/sys/block/%s/%s/size", devName, partName) - data, err := os.ReadFile(sizePath) - if err != nil { - continue - } - size, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) - if err != nil { - continue - } - if size > largestSize { - largestSize = size - largest = partition - } - } - return largest, nil - } - - return "", fmt.Errorf("no suitable partition found") -} - -// isBootPartition checks if a partition appears to be a boot partition. -func (m *MountManager) isBootPartition(ctx context.Context, partition string) bool { - // Check partition label or flags - cmd := exec.CommandContext(ctx, "blkid", "-o", "value", "-s", "LABEL", partition) - output, err := cmd.Output() - if err == nil { - label := strings.ToLower(strings.TrimSpace(string(output))) - if strings.Contains(label, "boot") || strings.Contains(label, "efi") { - return true - } - } - - // Also check PARTLABEL for GPT partitions - cmd = exec.CommandContext(ctx, "blkid", "-o", "value", "-s", "PARTLABEL", partition) - output, err = cmd.Output() - if err == nil { - label := strings.ToLower(strings.TrimSpace(string(output))) - if strings.Contains(label, "boot") || strings.Contains(label, "efi") { - return true - } - } - - return false -} - -// mountPartition mounts a partition read-only at the specified mount point. -func (m *MountManager) mountPartition(ctx context.Context, partition, mountPoint string) error { - // Mount read-only with common options - args := []string{ - "-o", "ro,noatime,noexec", - partition, - mountPoint, - } - - cmd := exec.CommandContext(ctx, "mount", args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("mount failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// unmount unmounts a filesystem. -func (m *MountManager) unmount(ctx context.Context, mountPoint string) error { - // First try a regular unmount - cmd := exec.CommandContext(ctx, "umount", mountPoint) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - // If regular unmount fails, try lazy unmount - cmd = exec.CommandContext(ctx, "umount", "-l", mountPoint) - cmd.Stderr = &stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("unmount failed: %w: %s", err, stderr.String()) - } - } - - return nil -} diff --git a/fluid-remote/internal/extract/sanitize.go b/fluid-remote/internal/extract/sanitize.go deleted file mode 100755 index 81cef16b..00000000 --- a/fluid-remote/internal/extract/sanitize.go +++ /dev/null @@ -1,438 +0,0 @@ -package extract - -import ( - "bytes" - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/workflow" -) - -// Sanitizer handles filesystem sanitization for container usage. -type Sanitizer struct { - // verbose enables detailed logging of sanitization steps. - verbose bool -} - -// SanitizerConfig configures the sanitizer. -type SanitizerConfig struct { - // Verbose enables detailed logging. - Verbose bool -} - -// NewSanitizer creates a new Sanitizer with the given configuration. -func NewSanitizer(cfg SanitizerConfig) *Sanitizer { - return &Sanitizer{ - verbose: cfg.Verbose, - } -} - -// SanitizeResult contains the result of filesystem sanitization. -type SanitizeResult struct { - // SanitizedPath is the path to the sanitized filesystem copy. - SanitizedPath string - - // RemovedPaths lists paths that were removed or neutralized. - RemovedPaths []string - - // ModifiedPaths lists paths that were modified. - ModifiedPaths []string - - // Cleanup is a function to remove the sanitized copy. - Cleanup workflow.CleanupFunc -} - -// SanitizeFilesystem creates a sanitized copy of the mounted filesystem -// suitable for container usage. It removes or neutralizes: -// - /boot directory -// - kernel modules (/lib/modules) -// - device nodes under /dev -// - fstab contents -// - swap references -// - systemd services that block container execution -func (s *Sanitizer) SanitizeFilesystem(ctx context.Context, sourcePath string, workDir string) (*SanitizeResult, error) { - // Create a working directory for the sanitized copy - sanitizedPath := filepath.Join(workDir, "sanitized") - if err := os.MkdirAll(sanitizedPath, 0o755); err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageSanitizeFS, - workflow.ErrSanitizeFailed, - fmt.Sprintf("failed to create sanitized directory: %v", err), - ) - } - - result := &SanitizeResult{ - SanitizedPath: sanitizedPath, - RemovedPaths: make([]string, 0), - ModifiedPaths: make([]string, 0), - } - - // Copy the filesystem using rsync for efficiency - // We exclude certain paths during copy rather than copying then deleting - if err := s.copyFilesystem(ctx, sourcePath, sanitizedPath); err != nil { - _ = os.RemoveAll(sanitizedPath) - return nil, workflow.NewWorkflowError( - workflow.StageSanitizeFS, - workflow.ErrSanitizeFailed, - fmt.Sprintf("failed to copy filesystem: %v", err), - ) - } - - // Apply sanitization steps - sanitizers := []struct { - name string - fn func(ctx context.Context, rootPath string, result *SanitizeResult) error - }{ - {"remove boot directory", s.removeBoot}, - {"remove kernel modules", s.removeKernelModules}, - {"clear device nodes", s.clearDeviceNodes}, - {"sanitize fstab", s.sanitizeFstab}, - {"remove swap references", s.removeSwapReferences}, - {"disable blocking systemd services", s.disableBlockingServices}, - {"set container environment marker", s.setContainerMarker}, - } - - for _, sanitizer := range sanitizers { - if err := sanitizer.fn(ctx, sanitizedPath, result); err != nil { - _ = os.RemoveAll(sanitizedPath) - return nil, workflow.NewWorkflowError( - workflow.StageSanitizeFS, - workflow.ErrSanitizeFailed, - fmt.Sprintf("%s failed: %v", sanitizer.name, err), - ) - } - } - - result.Cleanup = func() error { - return os.RemoveAll(sanitizedPath) - } - - return result, nil -} - -// copyFilesystem copies the source filesystem to the destination, -// excluding paths that will be removed anyway. -func (s *Sanitizer) copyFilesystem(ctx context.Context, src, dst string) error { - // Use rsync for efficient copying with exclusions - // Exclude paths we're going to remove anyway to save time and space - excludes := []string{ - "--exclude=/boot/*", - "--exclude=/lib/modules/*", - "--exclude=/dev/*", - "--exclude=/proc/*", - "--exclude=/sys/*", - "--exclude=/run/*", - "--exclude=/tmp/*", - "--exclude=/var/tmp/*", - "--exclude=/var/cache/*", - "--exclude=/var/log/*", - "--exclude=*.swap", - "--exclude=/swapfile", - } - - args := []string{ - "-a", // archive mode (preserves permissions, ownership, etc.) - "--hard-links", // preserve hard links - "--acls", // preserve ACLs - "--xattrs", // preserve extended attributes - "--sparse", // handle sparse files efficiently - } - args = append(args, excludes...) - args = append(args, src+"/", dst+"/") - - cmd := exec.CommandContext(ctx, "rsync", args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - // Try with cp if rsync is not available - return s.copyFilesystemFallback(ctx, src, dst) - } - - return nil -} - -// copyFilesystemFallback uses cp when rsync is not available. -func (s *Sanitizer) copyFilesystemFallback(ctx context.Context, src, dst string) error { - args := []string{ - "-a", // archive mode - "--reflink=auto", // use copy-on-write if available - src + "/.", - dst + "/", - } - - cmd := exec.CommandContext(ctx, "cp", args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("cp failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// removeBoot removes the /boot directory contents. -func (s *Sanitizer) removeBoot(ctx context.Context, rootPath string, result *SanitizeResult) error { - bootPath := filepath.Join(rootPath, "boot") - - // Remove contents but keep the directory - if err := s.clearDirectory(bootPath); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - result.RemovedPaths = append(result.RemovedPaths, "/boot/*") - return nil -} - -// removeKernelModules removes kernel modules from /lib/modules. -func (s *Sanitizer) removeKernelModules(ctx context.Context, rootPath string, result *SanitizeResult) error { - modulesPath := filepath.Join(rootPath, "lib", "modules") - - if err := s.clearDirectory(modulesPath); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - result.RemovedPaths = append(result.RemovedPaths, "/lib/modules/*") - return nil -} - -// clearDeviceNodes removes all device nodes under /dev. -func (s *Sanitizer) clearDeviceNodes(ctx context.Context, rootPath string, result *SanitizeResult) error { - devPath := filepath.Join(rootPath, "dev") - - // Remove contents but keep the directory - if err := s.clearDirectory(devPath); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - // Create minimal /dev entries that containers expect - // The container runtime will populate /dev properly - if err := os.MkdirAll(devPath, 0o755); err != nil { - return err - } - - // Create /dev/null, /dev/zero, /dev/random placeholders - // These are symlinks that the container runtime will handle - devEntries := []string{"null", "zero", "random", "urandom", "tty", "console"} - for _, entry := range devEntries { - placeholder := filepath.Join(devPath, entry) - // Create empty placeholder files - f, err := os.Create(placeholder) - if err != nil { - continue // Non-fatal, container runtime will create these - } - _ = f.Close() - } - - result.RemovedPaths = append(result.RemovedPaths, "/dev/*") - return nil -} - -// sanitizeFstab clears or comments out /etc/fstab entries. -func (s *Sanitizer) sanitizeFstab(ctx context.Context, rootPath string, result *SanitizeResult) error { - fstabPath := filepath.Join(rootPath, "etc", "fstab") - - content, err := os.ReadFile(fstabPath) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - // Comment out all mount entries, keeping only comments - lines := strings.Split(string(content), "\n") - var newLines []string - newLines = append(newLines, "# fstab sanitized for container usage") - newLines = append(newLines, "# Original entries commented out:") - newLines = append(newLines, "") - - for _, line := range lines { - trimmed := strings.TrimSpace(line) - if trimmed == "" || strings.HasPrefix(trimmed, "#") { - newLines = append(newLines, line) - } else { - newLines = append(newLines, "# "+line) - } - } - - if err := os.WriteFile(fstabPath, []byte(strings.Join(newLines, "\n")), 0o644); err != nil { - return err - } - - result.ModifiedPaths = append(result.ModifiedPaths, "/etc/fstab") - return nil -} - -// removeSwapReferences removes or disables swap configuration. -func (s *Sanitizer) removeSwapReferences(ctx context.Context, rootPath string, result *SanitizeResult) error { - // Remove swapfile if it exists - swapfile := filepath.Join(rootPath, "swapfile") - if _, err := os.Stat(swapfile); err == nil { - if err := os.Remove(swapfile); err != nil { - return err - } - result.RemovedPaths = append(result.RemovedPaths, "/swapfile") - } - - // Remove any .swap files in root - entries, err := os.ReadDir(rootPath) - if err != nil { - return err - } - for _, entry := range entries { - if strings.HasSuffix(entry.Name(), ".swap") { - swapPath := filepath.Join(rootPath, entry.Name()) - if err := os.Remove(swapPath); err != nil { - continue // Non-fatal - } - result.RemovedPaths = append(result.RemovedPaths, "/"+entry.Name()) - } - } - - // Disable swap-related systemd units - swapUnits := []string{ - "swap.target", - "dev-*.swap", - } - - // systemdPath := filepath.Join(rootPath, "etc", "systemd", "system") - for _, unit := range swapUnits { - // unitPath := filepath.Join(systemdPath, unit) - // Create a masked symlink to /dev/null - if err := s.maskSystemdUnit(rootPath, unit); err != nil { - continue // Non-fatal - } - } - - return nil -} - -// disableBlockingServices disables systemd services that block container execution. -func (s *Sanitizer) disableBlockingServices(ctx context.Context, rootPath string, result *SanitizeResult) error { - // Services that commonly block container startup or are inappropriate - blockingServices := []string{ - // Hardware/kernel related - "systemd-modules-load.service", - "systemd-sysctl.service", - "systemd-udevd.service", - "systemd-udev-trigger.service", - "systemd-udev-settle.service", - "kmod-static-nodes.service", - "systemd-tmpfiles-setup-dev.service", - - // Filesystem related - "systemd-remount-fs.service", - "systemd-fsck@.service", - "systemd-fsck-root.service", - "local-fs.target", - "local-fs-pre.target", - - // Network hardware related - "NetworkManager-wait-online.service", - "systemd-networkd-wait-online.service", - - // Other blocking services - "plymouth-start.service", - "plymouth-quit.service", - "plymouth-quit-wait.service", - "systemd-machine-id-commit.service", - "systemd-firstboot.service", - "systemd-random-seed.service", - - // Console/TTY related - "getty@.service", - "serial-getty@.service", - "console-getty.service", - "container-getty@.service", - "systemd-ask-password-wall.service", - "systemd-ask-password-console.service", - } - - for _, service := range blockingServices { - if err := s.maskSystemdUnit(rootPath, service); err != nil { - continue // Non-fatal, service may not exist - } - result.ModifiedPaths = append(result.ModifiedPaths, "/etc/systemd/system/"+service) - } - - return nil -} - -// maskSystemdUnit masks a systemd unit by creating a symlink to /dev/null. -func (s *Sanitizer) maskSystemdUnit(rootPath, unitName string) error { - systemdPath := filepath.Join(rootPath, "etc", "systemd", "system") - if err := os.MkdirAll(systemdPath, 0o755); err != nil { - return err - } - - unitPath := filepath.Join(systemdPath, unitName) - - // Remove existing unit/symlink if present - _ = os.Remove(unitPath) - - // Create symlink to /dev/null to mask the unit - return os.Symlink("/dev/null", unitPath) -} - -// setContainerMarker creates markers indicating container environment. -func (s *Sanitizer) setContainerMarker(ctx context.Context, rootPath string, result *SanitizeResult) error { - // Create /run/container marker directory - runPath := filepath.Join(rootPath, "run") - if err := os.MkdirAll(runPath, 0o755); err != nil { - return err - } - - // Create /.dockerenv equivalent for container detection - dockerenvPath := filepath.Join(rootPath, ".dockerenv") - f, err := os.Create(dockerenvPath) - if err != nil { - return err - } - _ = f.Close() - result.ModifiedPaths = append(result.ModifiedPaths, "/.dockerenv") - - // Create /run/.containerenv for Podman detection - containerenvPath := filepath.Join(runPath, ".containerenv") - containerenvContent := `engine="podman" -name="vmclone" -` - if err := os.WriteFile(containerenvPath, []byte(containerenvContent), 0o644); err != nil { - return err - } - result.ModifiedPaths = append(result.ModifiedPaths, "/run/.containerenv") - - return nil -} - -// clearDirectory removes all contents of a directory but keeps the directory itself. -func (s *Sanitizer) clearDirectory(dirPath string) error { - entries, err := os.ReadDir(dirPath) - if err != nil { - return err - } - - for _, entry := range entries { - entryPath := filepath.Join(dirPath, entry.Name()) - if err := os.RemoveAll(entryPath); err != nil { - // Try to continue with other entries - continue - } - } - - return nil -} diff --git a/fluid-remote/internal/extract/snapshot.go b/fluid-remote/internal/extract/snapshot.go deleted file mode 100755 index db01555a..00000000 --- a/fluid-remote/internal/extract/snapshot.go +++ /dev/null @@ -1,129 +0,0 @@ -package extract - -import ( - "context" - "fmt" - "time" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/libvirt" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/model" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/workflow" -) - -// SnapshotManager handles snapshot creation and extraction mode detection. -type SnapshotManager struct { - domainMgr *libvirt.DomainManager -} - -// NewSnapshotManager creates a new SnapshotManager. -func NewSnapshotManager(domainMgr *libvirt.DomainManager) *SnapshotManager { - return &SnapshotManager{ - domainMgr: domainMgr, - } -} - -// ExtractionPlan describes how to extract a VM's filesystem. -type ExtractionPlan struct { - // VMName is the name of the source VM. - VMName string - - // Mode is the extraction mode: "snapshot" for running VMs, "offline" for stopped VMs. - Mode string - - // DiskPath is the path to the disk image to extract from. - // For snapshot mode, this is the backing file of the snapshot. - // For offline mode, this is the VM's primary disk. - DiskPath string - - // SnapshotName is the name of the created snapshot (empty for offline mode). - SnapshotName string - - // Cleanup is a function to call to clean up the snapshot (nil for offline mode). - Cleanup workflow.CleanupFunc -} - -// DetermineExtractionMode determines whether to use snapshot or offline mode -// based on the VM's current state. -func (m *SnapshotManager) DetermineExtractionMode(ctx context.Context, vmName string) (string, error) { - state, err := m.domainMgr.GetDomainState(ctx, vmName) - if err != nil { - return "", fmt.Errorf("failed to get domain state: %w", err) - } - - if state.IsRunning() { - return model.ModeSnapshot, nil - } - return model.ModeOffline, nil -} - -// PrepareExtraction prepares the extraction plan for a VM. -// For running VMs, it creates a disk-only snapshot. -// For stopped VMs, it returns the disk path directly. -func (m *SnapshotManager) PrepareExtraction(ctx context.Context, vmName string) (*ExtractionPlan, error) { - // Get domain info - domainInfo, err := m.domainMgr.LookupDomain(ctx, vmName) - if err != nil { - return nil, fmt.Errorf("failed to lookup domain: %w", err) - } - - // Determine extraction mode - mode, err := m.DetermineExtractionMode(ctx, vmName) - if err != nil { - return nil, err - } - - plan := &ExtractionPlan{ - VMName: vmName, - Mode: mode, - } - - if mode == model.ModeOffline { - // For offline mode, use the disk directly - plan.DiskPath = domainInfo.DiskPath - return plan, nil - } - - // For snapshot mode, create a disk-only snapshot - snapshotName := generateSnapshotName(vmName) - - snapshotInfo, err := m.domainMgr.CreateDiskOnlySnapshot(ctx, vmName, snapshotName) - if err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageCreateSnapshot, - workflow.ErrSnapshotFailed, - fmt.Sprintf("unable to create disk-only snapshot: %v", err), - ) - } - - plan.SnapshotName = snapshotName - plan.DiskPath = snapshotInfo.BackingFile - - // Create cleanup function that commits the snapshot back - plan.Cleanup = func() error { - commitCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - return m.domainMgr.BlockCommit(commitCtx, vmName, "vda", 5*time.Minute) - } - - return plan, nil -} - -// generateSnapshotName generates a unique snapshot name based on VM name and timestamp. -func generateSnapshotName(vmName string) string { - return fmt.Sprintf("clone-%s-%d", vmName, time.Now().UnixNano()) -} - -// CleanupSnapshot removes a snapshot created during extraction. -// This is typically called on successful completion to clean up resources. -func (m *SnapshotManager) CleanupSnapshot(ctx context.Context, vmName string, plan *ExtractionPlan) error { - if plan == nil || plan.Mode == model.ModeOffline { - // Nothing to clean up for offline mode - return nil - } - - if plan.Cleanup != nil { - return plan.Cleanup() - } - - return nil -} diff --git a/fluid-remote/internal/libvirt/domain-stub.go b/fluid-remote/internal/libvirt/domain-stub.go deleted file mode 100755 index 677938af..00000000 --- a/fluid-remote/internal/libvirt/domain-stub.go +++ /dev/null @@ -1,127 +0,0 @@ -//go:build !libvirt -// +build !libvirt - -package libvirt - -import ( - "context" - "errors" - "time" -) - -// Sentinel errors for domain operations. -var ( - ErrDomainNotFound = errors.New("domain not found") - ErrDomainTransient = errors.New("transient domains are not supported") - ErrDomainUnsupported = errors.New("domain configuration not supported") -) - -// DomainManager provides libvirt domain operations using libvirt-go bindings. -// This is a stub implementation that returns errors when libvirt is not available. -type DomainManager struct { - uri string -} - -// DomainInfo contains information about a libvirt domain. -type DomainInfo struct { - Name string - UUID string - State DomainState - Persistent bool - DiskPath string -} - -// DomainState represents the state of a domain. -type DomainState int - -const ( - DomainStateUnknown DomainState = iota - DomainStateRunning - DomainStatePaused - DomainStateShutdown - DomainStateStopped - DomainStateCrashed - DomainStateSuspended -) - -// String returns a human-readable domain state. -func (s DomainState) String() string { - switch s { - case DomainStateRunning: - return "running" - case DomainStatePaused: - return "paused" - case DomainStateShutdown: - return "shutdown" - case DomainStateStopped: - return "stopped" - case DomainStateCrashed: - return "crashed" - case DomainStateSuspended: - return "suspended" - default: - return "unknown" - } -} - -// IsRunning returns true if the domain is in a running state. -func (s DomainState) IsRunning() bool { - return s == DomainStateRunning || s == DomainStatePaused -} - -// SnapshotInfo contains information about a created snapshot. -type SnapshotInfo struct { - Name string - BackingFile string -} - -// NewDomainManager creates a new DomainManager with the given libvirt URI. -// Note: This stub implementation will return errors for all operations. -func NewDomainManager(uri string) *DomainManager { - if uri == "" { - uri = "qemu:///system" - } - return &DomainManager{ - uri: uri, - } -} - -// Connect is a stub that returns an error when libvirt is not available. -func (m *DomainManager) Connect() error { - return ErrLibvirtNotAvailable -} - -// Close is a stub that does nothing when libvirt is not available. -func (m *DomainManager) Close() error { - return nil -} - -// LookupDomain is a stub that returns an error when libvirt is not available. -func (m *DomainManager) LookupDomain(ctx context.Context, name string) (*DomainInfo, error) { - return nil, ErrLibvirtNotAvailable -} - -// GetDomainState is a stub that returns an error when libvirt is not available. -func (m *DomainManager) GetDomainState(ctx context.Context, name string) (DomainState, error) { - return DomainStateUnknown, ErrLibvirtNotAvailable -} - -// CreateDiskOnlySnapshot is a stub that returns an error when libvirt is not available. -func (m *DomainManager) CreateDiskOnlySnapshot(ctx context.Context, domainName, snapshotName string) (*SnapshotInfo, error) { - return nil, ErrLibvirtNotAvailable -} - -// BlockCommit is a stub that returns an error when libvirt is not available. -func (m *DomainManager) BlockCommit(ctx context.Context, domainName, diskTarget string, timeout time.Duration) error { - return ErrLibvirtNotAvailable -} - -// ListDomains is a stub that returns an error when libvirt is not available. -func (m *DomainManager) ListDomains(ctx context.Context) ([]*DomainInfo, error) { - return nil, ErrLibvirtNotAvailable -} - -// GetDiskPath is a stub that returns an error when libvirt is not available. -func (m *DomainManager) GetDiskPath(ctx context.Context, domainName string) (string, error) { - return "", ErrLibvirtNotAvailable -} diff --git a/fluid-remote/internal/libvirt/domain.go b/fluid-remote/internal/libvirt/domain.go deleted file mode 100755 index cf2a8692..00000000 --- a/fluid-remote/internal/libvirt/domain.go +++ /dev/null @@ -1,530 +0,0 @@ -//go:build libvirt -// +build libvirt - -package libvirt - -import ( - "context" - "encoding/xml" - "errors" - "fmt" - "sync" - "time" - - libvirtgo "libvirt.org/go/libvirt" -) - -// DomainManager provides libvirt domain operations using libvirt-go bindings. -type DomainManager struct { - uri string - conn *libvirtgo.Connect - mu sync.Mutex -} - -// DomainInfo contains information about a libvirt domain. -type DomainInfo struct { - Name string - UUID string - State DomainState - Persistent bool - DiskPath string -} - -// DomainState represents the state of a domain. -type DomainState int - -const ( - DomainStateUnknown DomainState = iota - DomainStateRunning - DomainStatePaused - DomainStateShutdown - DomainStateStopped - DomainStateCrashed - DomainStateSuspended -) - -// String returns a human-readable domain state. -func (s DomainState) String() string { - switch s { - case DomainStateRunning: - return "running" - case DomainStatePaused: - return "paused" - case DomainStateShutdown: - return "shutdown" - case DomainStateStopped: - return "stopped" - case DomainStateCrashed: - return "crashed" - case DomainStateSuspended: - return "suspended" - default: - return "unknown" - } -} - -// IsRunning returns true if the domain is in a running state. -func (s DomainState) IsRunning() bool { - return s == DomainStateRunning || s == DomainStatePaused -} - -// SnapshotInfo contains information about a created snapshot. -type SnapshotInfo struct { - Name string - BackingFile string -} - -// Domain XML structures for parsing disk information. -type domainXML struct { - XMLName xml.Name `xml:"domain"` - Name string `xml:"name"` - UUID string `xml:"uuid"` - Devices domainDevices `xml:"devices"` -} - -type domainDevices struct { - Disks []domainDisk `xml:"disk"` -} - -type domainDisk struct { - Type string `xml:"type,attr"` - Device string `xml:"device,attr"` - Driver domainDiskDriver `xml:"driver"` - Source domainDiskSource `xml:"source"` - Target domainDiskTarget `xml:"target"` -} - -type domainDiskDriver struct { - Name string `xml:"name,attr"` - Type string `xml:"type,attr"` -} - -type domainDiskSource struct { - File string `xml:"file,attr"` - Dev string `xml:"dev,attr"` -} - -type domainDiskTarget struct { - Dev string `xml:"dev,attr"` - Bus string `xml:"bus,attr"` -} - -// NewDomainManager creates a new DomainManager with the given libvirt URI. -func NewDomainManager(uri string) *DomainManager { - if uri == "" { - uri = "qemu:///system" - } - return &DomainManager{ - uri: uri, - } -} - -// Connect establishes a connection to libvirt. -func (m *DomainManager) Connect() error { - m.mu.Lock() - defer m.mu.Unlock() - - if m.conn != nil { - // Check if connection is still alive - if alive, _ := m.conn.IsAlive(); alive { - return nil - } - // Connection dead, close and reconnect - _, _ = m.conn.Close() - m.conn = nil - } - - conn, err := libvirtgo.NewConnect(m.uri) - if err != nil { - return fmt.Errorf("failed to connect to libvirt at %s: %w", m.uri, err) - } - m.conn = conn - return nil -} - -// Close closes the libvirt connection. -func (m *DomainManager) Close() error { - m.mu.Lock() - defer m.mu.Unlock() - - if m.conn != nil { - _, err := m.conn.Close() - m.conn = nil - return err - } - return nil -} - -// ensureConnected ensures we have an active connection. -func (m *DomainManager) ensureConnected() error { - return m.Connect() -} - -// LookupDomain looks up a domain by name and returns its information. -// Returns an error if the domain is transient or not found. -func (m *DomainManager) LookupDomain(ctx context.Context, name string) (*DomainInfo, error) { - if err := m.ensureConnected(); err != nil { - return nil, err - } - - m.mu.Lock() - dom, err := m.conn.LookupDomainByName(name) - m.mu.Unlock() - - if err != nil { - var libvirtErr libvirtgo.Error - if errors.As(err, &libvirtErr) { - if libvirtErr.Code == libvirtgo.ERR_NO_DOMAIN { - return nil, fmt.Errorf("domain %q not found: %w", name, ErrDomainNotFound) - } - } - return nil, fmt.Errorf("failed to lookup domain %q: %w", name, err) - } - defer func() { _ = dom.Free() }() - - // Check if domain is persistent (not transient) - persistent, err := dom.IsPersistent() - if err != nil { - return nil, fmt.Errorf("failed to check if domain is persistent: %w", err) - } - if !persistent { - return nil, fmt.Errorf("domain %q is transient: %w", name, ErrDomainTransient) - } - - // Get domain UUID - uuid, err := dom.GetUUIDString() - if err != nil { - return nil, fmt.Errorf("failed to get domain UUID: %w", err) - } - - // Get domain state - state, _, err := dom.GetState() - if err != nil { - return nil, fmt.Errorf("failed to get domain state: %w", err) - } - - // Get domain XML to extract disk path - xmlDesc, err := dom.GetXMLDesc(0) - if err != nil { - return nil, fmt.Errorf("failed to get domain XML: %w", err) - } - - diskPath, err := extractDiskPath(xmlDesc) - if err != nil { - return nil, fmt.Errorf("failed to extract disk path: %w", err) - } - - return &DomainInfo{ - Name: name, - UUID: uuid, - State: mapLibvirtState(state), - Persistent: persistent, - DiskPath: diskPath, - }, nil -} - -// GetDomainState returns the current state of a domain. -func (m *DomainManager) GetDomainState(ctx context.Context, name string) (DomainState, error) { - if err := m.ensureConnected(); err != nil { - return DomainStateUnknown, err - } - - m.mu.Lock() - dom, err := m.conn.LookupDomainByName(name) - m.mu.Unlock() - - if err != nil { - return DomainStateUnknown, fmt.Errorf("failed to lookup domain %q: %w", name, err) - } - defer func() { _ = dom.Free() }() - - state, _, err := dom.GetState() - if err != nil { - return DomainStateUnknown, fmt.Errorf("failed to get domain state: %w", err) - } - - return mapLibvirtState(state), nil -} - -// CreateDiskOnlySnapshot creates an external, disk-only snapshot without metadata. -// This is safe for running VMs and does not pause or stop the VM. -// Returns the snapshot info including the path to the new overlay file. -func (m *DomainManager) CreateDiskOnlySnapshot(ctx context.Context, domainName, snapshotName string) (*SnapshotInfo, error) { - if err := m.ensureConnected(); err != nil { - return nil, err - } - - m.mu.Lock() - dom, err := m.conn.LookupDomainByName(domainName) - m.mu.Unlock() - - if err != nil { - return nil, fmt.Errorf("failed to lookup domain %q: %w", domainName, err) - } - defer func() { _ = dom.Free() }() - - // Get current disk path from domain XML - xmlDesc, err := dom.GetXMLDesc(0) - if err != nil { - return nil, fmt.Errorf("failed to get domain XML: %w", err) - } - - currentDiskPath, err := extractDiskPath(xmlDesc) - if err != nil { - return nil, fmt.Errorf("failed to extract current disk path: %w", err) - } - - // The backing file for the container will be the current disk - // After snapshot, libvirt creates a new overlay and the current disk becomes backing - backingFile := currentDiskPath - - // Build snapshot XML for disk-only, external snapshot - // The snapshot file path will be auto-generated by libvirt - snapshotXML := fmt.Sprintf(` - - %s - Disk-only snapshot for container cloning - - - -`, snapshotName) - - // Create the snapshot with flags: - // - DISK_ONLY: Only snapshot the disk, not memory - // - ATOMIC: All-or-nothing operation - // - NO_METADATA: Don't store snapshot metadata in libvirt - flags := libvirtgo.DomainSnapshotCreateFlags(libvirtgo.DOMAIN_SNAPSHOT_CREATE_DISK_ONLY) | - libvirtgo.DomainSnapshotCreateFlags(libvirtgo.DOMAIN_SNAPSHOT_CREATE_ATOMIC) | - libvirtgo.DomainSnapshotCreateFlags(libvirtgo.DOMAIN_SNAPSHOT_CREATE_NO_METADATA) - - m.mu.Lock() - _, err = dom.CreateSnapshotXML(snapshotXML, flags) - m.mu.Unlock() - - if err != nil { - return nil, fmt.Errorf("failed to create disk-only snapshot: %w", err) - } - - return &SnapshotInfo{ - Name: snapshotName, - BackingFile: backingFile, - }, nil -} - -// BlockCommit merges a snapshot overlay back into its backing file and removes the overlay. -// This is used for cleanup after cloning or on rollback. -func (m *DomainManager) BlockCommit(ctx context.Context, domainName, diskTarget string, timeout time.Duration) error { - if err := m.ensureConnected(); err != nil { - return err - } - - m.mu.Lock() - dom, err := m.conn.LookupDomainByName(domainName) - m.mu.Unlock() - - if err != nil { - return fmt.Errorf("failed to lookup domain %q: %w", domainName, err) - } - defer func() { _ = dom.Free() }() - - // Start block commit - merge active layer into backing file - flags := libvirtgo.DomainBlockCommitFlags(libvirtgo.DOMAIN_BLOCK_COMMIT_ACTIVE) | libvirtgo.DomainBlockCommitFlags(libvirtgo.DOMAIN_BLOCK_COMMIT_DELETE) - - m.mu.Lock() - err = dom.BlockCommit(diskTarget, "", "", 0, flags) - m.mu.Unlock() - - if err != nil { - return fmt.Errorf("failed to start block commit: %w", err) - } - - // Wait for block commit to complete - deadline := time.Now().Add(timeout) - for time.Now().Before(deadline) { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - m.mu.Lock() - info, err := dom.GetBlockJobInfo(diskTarget, 0) - m.mu.Unlock() - - if err != nil { - // Job may have completed - break - } - - if info.Type == 0 { - // No job running, commit completed - break - } - - time.Sleep(500 * time.Millisecond) - } - - // Pivot to the base image if needed - - m.mu.Lock() - - err = dom.BlockJobAbort(diskTarget, libvirtgo.DomainBlockJobAbortFlags(libvirtgo.DOMAIN_BLOCK_JOB_ABORT_PIVOT)) - - m.mu.Unlock() - - // Ignore error if job already completed or pivot not needed - - _ = err - - return nil -} - -// ListDomains returns information about all domains (VMs) in libvirt. - -// It lists both running and stopped persistent domains. - -func (m *DomainManager) ListDomains(ctx context.Context) ([]*DomainInfo, error) { - if err := m.ensureConnected(); err != nil { - return nil, err - } - - m.mu.Lock() - - // List all persistent domains (both active and inactive) - - domains, err := m.conn.ListAllDomains(libvirtgo.ConnectListAllDomainsFlags(libvirtgo.CONNECT_LIST_DOMAINS_PERSISTENT)) - - m.mu.Unlock() - - if err != nil { - return nil, fmt.Errorf("failed to list domains: %w", err) - } - - var result []*DomainInfo - - for _, dom := range domains { - - name, err := dom.GetName() - if err != nil { - - _ = dom.Free() - - continue - - } - - uuid, err := dom.GetUUIDString() - if err != nil { - - _ = dom.Free() - - continue - - } - - state, _, err := dom.GetState() - if err != nil { - - _ = dom.Free() - - continue - - } - - persistent, _ := dom.IsPersistent() - - // Get disk path from domain XML - - var diskPath string - - xmlDesc, err := dom.GetXMLDesc(0) - - if err == nil { - diskPath, _ = extractDiskPath(xmlDesc) - } - - result = append(result, &DomainInfo{ - Name: name, - - UUID: uuid, - - State: mapLibvirtState(state), - - Persistent: persistent, - - DiskPath: diskPath, - }) - - _ = dom.Free() - - } - - return result, nil -} - -// GetDiskPath returns the primary disk path for a domain. -func (m *DomainManager) GetDiskPath(ctx context.Context, domainName string) (string, error) { - if err := m.ensureConnected(); err != nil { - return "", err - } - - m.mu.Lock() - dom, err := m.conn.LookupDomainByName(domainName) - m.mu.Unlock() - - if err != nil { - return "", fmt.Errorf("failed to lookup domain %q: %w", domainName, err) - } - defer func() { _ = dom.Free() }() - - xmlDesc, err := dom.GetXMLDesc(0) - if err != nil { - return "", fmt.Errorf("failed to get domain XML: %w", err) - } - - return extractDiskPath(xmlDesc) -} - -// extractDiskPath parses domain XML and extracts the primary disk file path. -func extractDiskPath(xmlDesc string) (string, error) { - var domain domainXML - if err := xml.Unmarshal([]byte(xmlDesc), &domain); err != nil { - return "", fmt.Errorf("failed to parse domain XML: %w", err) - } - - // Find the first disk device (typically vda) - for _, disk := range domain.Devices.Disks { - if disk.Device == "disk" && disk.Source.File != "" { - return disk.Source.File, nil - } - } - - return "", fmt.Errorf("no disk device found in domain XML") -} - -// mapLibvirtState converts libvirt domain state to our DomainState type. -func mapLibvirtState(state libvirtgo.DomainState) DomainState { - switch state { - case libvirtgo.DOMAIN_RUNNING: - return DomainStateRunning - case libvirtgo.DOMAIN_PAUSED: - return DomainStatePaused - case libvirtgo.DOMAIN_SHUTDOWN: - return DomainStateShutdown - case libvirtgo.DOMAIN_SHUTOFF: - return DomainStateStopped - case libvirtgo.DOMAIN_CRASHED: - return DomainStateCrashed - case libvirtgo.DOMAIN_PMSUSPENDED: - return DomainStateSuspended - default: - return DomainStateUnknown - } -} - -// Sentinel errors for domain operations. -var ( - ErrDomainNotFound = errors.New("domain not found") - ErrDomainTransient = errors.New("transient domains are not supported") - ErrDomainUnsupported = errors.New("domain configuration not supported") -) diff --git a/fluid-remote/internal/libvirt/helpers.go b/fluid-remote/internal/libvirt/helpers.go deleted file mode 100644 index 2fe75665..00000000 --- a/fluid-remote/internal/libvirt/helpers.go +++ /dev/null @@ -1,265 +0,0 @@ -package libvirt - -import ( - "crypto/rand" - "fmt" - "strconv" - "strings" - - "github.com/beevik/etree" -) - -// generateMACAddressHelper generates a random MAC address with the locally administered bit set. -// Uses the 52:54:00 prefix which is commonly used by QEMU/KVM. -func generateMACAddressHelper() string { - buf := make([]byte, 3) - _, _ = rand.Read(buf) - return fmt.Sprintf("52:54:00:%02x:%02x:%02x", buf[0], buf[1], buf[2]) -} - -// modifyClonedXMLHelper takes the XML from a source domain and adapts it for a new cloned domain. -// It sets a new name, UUID, disk path, MAC address, and cloud-init ISO path. -// If cloudInitISO is provided, any existing CDROM device is updated to use it, ensuring the -// cloned VM gets a unique instance-id and fresh network configuration via cloud-init. -func modifyClonedXMLHelper(sourceXML, newName, newDiskPath, cloudInitISO string, cpu, memoryMB int, network string) (string, error) { - doc := etree.NewDocument() - if err := doc.ReadFromString(sourceXML); err != nil { - return "", fmt.Errorf("parse source XML: %w", err) - } - - root := doc.Root() - if root == nil { - return "", fmt.Errorf("invalid XML: no root element") - } - - // Update VM name - nameElem := root.SelectElement("name") - if nameElem == nil { - return "", fmt.Errorf("invalid XML: missing element") - } - nameElem.SetText(newName) - - // Remove UUID - if uuidElem := root.SelectElement("uuid"); uuidElem != nil { - root.RemoveChild(uuidElem) - } - - // Update CPU - if cpu > 0 { - if vcpuElem := root.SelectElement("vcpu"); vcpuElem != nil { - vcpuElem.SetText(strconv.Itoa(cpu)) - } - } - - // Update Memory - if memoryMB > 0 { - memKiB := strconv.Itoa(memoryMB * 1024) - if memElem := root.SelectElement("memory"); memElem != nil { - memElem.SetText(memKiB) - } - if currMemElem := root.SelectElement("currentMemory"); currMemElem != nil { - currMemElem.SetText(memKiB) - } - } - - // Update disk path for the main virtual disk (vda) - var diskReplaced bool - for _, disk := range root.FindElements("./devices/disk[@device='disk']") { - if target := disk.SelectElement("target"); target != nil { - if bus := target.SelectAttr("bus"); bus != nil && bus.Value == "virtio" { - if source := disk.SelectElement("source"); source != nil { - source.SelectAttr("file").Value = newDiskPath - diskReplaced = true - break - } - } - } - } - if !diskReplaced { - return "", fmt.Errorf("could not find a virtio disk in the source XML to replace") - } - - // Handle cloud-init CDROM: update existing or add new one - // This is critical for cloned VMs - they need a unique instance-id to trigger - // cloud-init re-initialization, including DHCP network configuration - if cloudInitISO != "" { - devices := root.SelectElement("devices") - if devices == nil { - return "", fmt.Errorf("invalid XML: missing element") - } - - // Look for existing CDROM device to update - var cdromUpdated bool - for _, disk := range root.FindElements("./devices/disk[@device='cdrom']") { - if source := disk.SelectElement("source"); source != nil { - if fileAttr := source.SelectAttr("file"); fileAttr != nil { - fileAttr.Value = cloudInitISO - cdromUpdated = true - break - } - } - } - - // If no existing CDROM, add one with SCSI controller - if !cdromUpdated { - // Add SCSI controller if not present - hasScsiController := false - for _, ctrl := range root.FindElements("./devices/controller[@type='scsi']") { - if model := ctrl.SelectAttr("model"); model != nil && model.Value == "virtio-scsi" { - hasScsiController = true - break - } - } - if !hasScsiController { - scsiCtrl := devices.CreateElement("controller") - scsiCtrl.CreateAttr("type", "scsi") - scsiCtrl.CreateAttr("model", "virtio-scsi") - } - - // Add CDROM device - cdrom := devices.CreateElement("disk") - cdrom.CreateAttr("type", "file") - cdrom.CreateAttr("device", "cdrom") - - driver := cdrom.CreateElement("driver") - driver.CreateAttr("name", "qemu") - driver.CreateAttr("type", "raw") - - source := cdrom.CreateElement("source") - source.CreateAttr("file", cloudInitISO) - - target := cdrom.CreateElement("target") - target.CreateAttr("dev", "sda") - target.CreateAttr("bus", "scsi") - - cdrom.CreateElement("readonly") - } - } - - // Update network interface: set new MAC and remove PCI address - if iface := root.FindElement("./devices/interface"); iface != nil { - macElem := iface.SelectElement("mac") - if macElem != nil { - if addrAttr := macElem.SelectAttr("address"); addrAttr != nil { - addrAttr.Value = generateMACAddressHelper() - } - } else { - macElem = iface.CreateElement("mac") - macElem.CreateAttr("address", generateMACAddressHelper()) - } - - if addrElem := iface.SelectElement("address"); addrElem != nil { - iface.RemoveChild(addrElem) - } - - // Update network source if provided - if network != "" && iface.SelectAttrValue("type", "") == "network" { - if source := iface.SelectElement("source"); source != nil { - if netAttr := source.SelectAttr("network"); netAttr != nil { - netAttr.Value = network - } else { - source.CreateAttr("network", network) - } - } else { - source := iface.CreateElement("source") - source.CreateAttr("network", network) - } - } - } else { - // Handle socket_vmnet case (qemu:commandline) - var cmdline *etree.Element - for _, child := range root.ChildElements() { - if child.Tag == "commandline" && child.Space == "qemu" { - cmdline = child - break - } - } - - if cmdline != nil { - for _, child := range cmdline.ChildElements() { - if child.Tag == "arg" && child.Space == "qemu" { - if valAttr := child.SelectAttr("value"); valAttr != nil { - if strings.HasPrefix(valAttr.Value, "virtio-net-pci") && strings.Contains(valAttr.Value, "mac=") { - parts := strings.Split(valAttr.Value, ",") - newParts := make([]string, 0, len(parts)) - macUpdated := false - for _, part := range parts { - if strings.HasPrefix(part, "mac=") { - newParts = append(newParts, "mac="+generateMACAddressHelper()) - macUpdated = true - } else { - newParts = append(newParts, part) - } - } - if macUpdated { - valAttr.Value = strings.Join(newParts, ",") - break - } - } - } - } - } - } - } - - // Remove existing graphics password - if graphics := root.FindElement("./devices/graphics"); graphics != nil { - graphics.RemoveAttr("passwd") - } - - // Remove existing sound devices - for _, sound := range root.FindElements("./devices/sound") { - root.SelectElement("devices").RemoveChild(sound) - } - - doc.Indent(2) - newXML, err := doc.WriteToString() - if err != nil { - return "", fmt.Errorf("failed to write modified XML: %w", err) - } - - return newXML, nil -} - -// parseDomIfAddrIPv4WithMACHelper parses virsh domifaddr output and returns both IP and MAC address. -func parseDomIfAddrIPv4WithMACHelper(s string) (ip string, mac string) { - lines := strings.Split(s, "\n") - for _, l := range lines { - l = strings.TrimSpace(l) - if l == "" || strings.HasPrefix(l, "Name") || strings.HasPrefix(l, "-") { - continue - } - parts := strings.Fields(l) - if len(parts) >= 4 && parts[2] == "ipv4" { - mac = parts[1] - addr := parts[3] - if i := strings.IndexByte(addr, '/'); i > 0 { - ip = addr[:i] - } else { - ip = addr - } - return ip, mac - } - } - return "", "" -} - -// parseVMStateHelper converts virsh domstate output to VMState. -// VMState is defined in virsh.go/virsh-stub.go -func parseVMStateHelper(output string) VMState { - state := strings.TrimSpace(output) - switch state { - case "running": - return VMStateRunning - case "paused": - return VMStatePaused - case "shut off": - return VMStateShutOff - case "crashed": - return VMStateCrashed - case "pmsuspended": - return VMStateSuspended - default: - return VMStateUnknown - } -} diff --git a/fluid-remote/internal/libvirt/helpers_test.go b/fluid-remote/internal/libvirt/helpers_test.go deleted file mode 100644 index c1b4ef4a..00000000 --- a/fluid-remote/internal/libvirt/helpers_test.go +++ /dev/null @@ -1,290 +0,0 @@ -package libvirt - -import ( - "strings" - "testing" -) - -func TestModifyClonedXMLHelper_UpdatesCloudInitISO(t *testing.T) { - // Test that modifyClonedXMLHelper updates existing CDROM device to use new cloud-init ISO - sourceXML := ` - test-vm - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - hvm - - - - - - - - - - - - - - - - - -
- - -` - - newXML, err := modifyClonedXMLHelper(sourceXML, "sbx-clone123", - "/var/lib/libvirt/images/jobs/sbx-clone123/disk-overlay.qcow2", - "/var/lib/libvirt/images/jobs/sbx-clone123/cloud-init.iso", - 2, 2048, "default") - if err != nil { - t.Fatalf("modifyClonedXMLHelper() error = %v", err) - } - - // Should have updated name - if !strings.Contains(newXML, "sbx-clone123") { - t.Error("modifyClonedXMLHelper() did not update VM name") - } - - // Should have updated disk path - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-clone123/disk-overlay.qcow2") { - t.Error("modifyClonedXMLHelper() did not update disk path") - } - - // CRITICAL: Should have updated cloud-init ISO path (not the old /tmp/test-vm-seed.img) - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-clone123/cloud-init.iso") { - t.Errorf("modifyClonedXMLHelper() did not update cloud-init ISO path in XML:\n%s", newXML) - } - - // Should NOT contain the old cloud-init ISO path - if strings.Contains(newXML, "/tmp/test-vm-seed.img") { - t.Errorf("modifyClonedXMLHelper() still contains old cloud-init ISO path in XML:\n%s", newXML) - } - - // UUID should be removed - if strings.Contains(newXML, "12345678-1234-1234-1234-123456789012") { - t.Error("modifyClonedXMLHelper() did not remove UUID") - } - - // MAC address should be different from source - if strings.Contains(newXML, "52:54:00:11:22:33") { - t.Error("modifyClonedXMLHelper() did not generate new MAC address") - } -} - -func TestModifyClonedXMLHelper_AddsCloudInitCDROM(t *testing.T) { - // Test that modifyClonedXMLHelper adds CDROM device when source VM has none - sourceXML := ` - test-vm-no-cdrom - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - hvm - - - - - - - - - - - - - -` - - newXML, err := modifyClonedXMLHelper(sourceXML, "sbx-new", - "/var/lib/libvirt/images/jobs/sbx-new/disk.qcow2", - "/var/lib/libvirt/images/jobs/sbx-new/cloud-init.iso", - 2, 2048, "default") - if err != nil { - t.Fatalf("modifyClonedXMLHelper() error = %v", err) - } - - // Should have added CDROM device with cloud-init ISO - if !strings.Contains(newXML, `device="cdrom"`) { - t.Errorf("modifyClonedXMLHelper() did not add CDROM device in XML:\n%s", newXML) - } - - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-new/cloud-init.iso") { - t.Errorf("modifyClonedXMLHelper() did not add cloud-init ISO path in XML:\n%s", newXML) - } - - // Should have added SCSI controller for the CDROM - if !strings.Contains(newXML, `type="scsi"`) { - t.Errorf("modifyClonedXMLHelper() did not add SCSI controller in XML:\n%s", newXML) - } -} - -func TestModifyClonedXMLHelper_NoCloudInitISO(t *testing.T) { - // Test that modifyClonedXMLHelper works without cloud-init ISO (empty string) - sourceXML := ` - test-vm - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - hvm - - - - - - - - - - - - - - - - - - -` - - // Empty cloudInitISO - should not modify CDROM - newXML, err := modifyClonedXMLHelper(sourceXML, "sbx-no-cloud", - "/var/lib/libvirt/images/jobs/sbx-no-cloud/disk.qcow2", - "", // empty cloud-init ISO - 2, 2048, "default") - if err != nil { - t.Fatalf("modifyClonedXMLHelper() error = %v", err) - } - - // Old CDROM path should still be there (unchanged) - if !strings.Contains(newXML, "/tmp/old-seed.img") { - t.Errorf("modifyClonedXMLHelper() modified CDROM when cloudInitISO was empty:\n%s", newXML) - } - - // Name and disk should still be updated - if !strings.Contains(newXML, "sbx-no-cloud") { - t.Error("modifyClonedXMLHelper() did not update VM name") - } - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-no-cloud/disk.qcow2") { - t.Error("modifyClonedXMLHelper() did not update disk path") - } -} - -func TestGenerateMACAddressHelper(t *testing.T) { - mac := generateMACAddressHelper() - - // Should start with QEMU prefix - if !strings.HasPrefix(mac, "52:54:00:") { - t.Errorf("generateMACAddressHelper() = %q, want prefix '52:54:00:'", mac) - } - - // Should be valid format (17 chars: xx:xx:xx:xx:xx:xx) - if len(mac) != 17 { - t.Errorf("generateMACAddressHelper() = %q, want 17 chars", mac) - } - - // Should have 5 colons - if strings.Count(mac, ":") != 5 { - t.Errorf("generateMACAddressHelper() = %q, want 5 colons", mac) - } - - // Generate another one - should be different (random) - mac2 := generateMACAddressHelper() - if mac == mac2 { - t.Errorf("generateMACAddressHelper() returned same MAC twice: %q", mac) - } -} - -func TestParseDomIfAddrIPv4WithMACHelper(t *testing.T) { - tests := []struct { - name string - input string - expectedIP string - expectedMAC string - }{ - { - name: "valid output with IP and MAC", - input: `Name MAC address Protocol Address -------------------------------------------------------------------------------- - vnet0 52:54:00:ab:cd:ef ipv4 192.168.122.100/24`, - expectedIP: "192.168.122.100", - expectedMAC: "52:54:00:ab:cd:ef", - }, - { - name: "output with multiple interfaces", - input: `Name MAC address Protocol Address -------------------------------------------------------------------------------- - vnet0 52:54:00:11:22:33 ipv4 192.168.122.50/24 - vnet1 52:54:00:aa:bb:cc ipv4 10.0.0.5/24`, - expectedIP: "192.168.122.50", - expectedMAC: "52:54:00:11:22:33", - }, - { - name: "empty output", - input: "", - expectedIP: "", - expectedMAC: "", - }, - { - name: "no IPv4 address", - input: `Name MAC address Protocol Address --------------------------------------------------------------------------------`, - expectedIP: "", - expectedMAC: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ip, mac := parseDomIfAddrIPv4WithMACHelper(tt.input) - if ip != tt.expectedIP { - t.Errorf("parseDomIfAddrIPv4WithMACHelper() IP = %q, want %q", ip, tt.expectedIP) - } - if mac != tt.expectedMAC { - t.Errorf("parseDomIfAddrIPv4WithMACHelper() MAC = %q, want %q", mac, tt.expectedMAC) - } - }) - } -} - -func TestParseVMStateHelper(t *testing.T) { - tests := []struct { - name string - output string - expected VMState - }{ - { - name: "running state", - output: "running\n", - expected: VMStateRunning, - }, - { - name: "shut off state", - output: "shut off\n", - expected: VMStateShutOff, - }, - { - name: "paused state", - output: "paused\n", - expected: VMStatePaused, - }, - { - name: "unknown state", - output: "weird-state\n", - expected: VMStateUnknown, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := parseVMStateHelper(tt.output) - if result != tt.expected { - t.Errorf("parseVMStateHelper(%q) = %v, want %v", tt.output, result, tt.expected) - } - }) - } -} diff --git a/fluid-remote/internal/libvirt/multihost.go b/fluid-remote/internal/libvirt/multihost.go deleted file mode 100644 index 491a6e19..00000000 --- a/fluid-remote/internal/libvirt/multihost.go +++ /dev/null @@ -1,463 +0,0 @@ -package libvirt - -import ( - "bufio" - "context" - "errors" - "fmt" - "log/slog" - "os/exec" - "strings" - "sync" - "time" - "unicode" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/config" -) - -const ( - // DefaultSSHUser is the default SSH user for remote hosts. - DefaultSSHUser = "root" - // DefaultSSHPort is the default SSH port. - DefaultSSHPort = 22 - // DefaultHostQueryTimeout is the default per-host query timeout. - DefaultHostQueryTimeout = 30 * time.Second - // MaxShellInputLength is the maximum allowed length for shell input. - MaxShellInputLength = 4096 -) - -// MultiHostDomainInfo extends DomainInfo with host identification. -type MultiHostDomainInfo struct { - Name string - UUID string - State DomainState - Persistent bool - DiskPath string - HostName string // Display name of the host - HostAddress string // IP or hostname of the host -} - -// HostError represents an error from querying a specific host. -type HostError struct { - HostName string `json:"host_name"` - HostAddress string `json:"host_address"` - Error string `json:"error"` -} - -// MultiHostListResult contains the aggregated result from querying all hosts. -type MultiHostListResult struct { - Domains []*MultiHostDomainInfo - HostErrors []HostError -} - -// SSHRunner executes commands on a remote host via SSH. -// This interface enables testing without actual SSH connections. -type SSHRunner interface { - Run(ctx context.Context, address, user string, port int, command string) (string, error) -} - -// defaultSSHRunner implements SSHRunner using actual SSH commands. -type defaultSSHRunner struct{} - -func (r *defaultSSHRunner) Run(ctx context.Context, address, user string, port int, command string) (string, error) { - args := []string{ - "-o", "BatchMode=yes", - "-o", "StrictHostKeyChecking=accept-new", - "-o", "ConnectTimeout=10", - "-p", fmt.Sprintf("%d", port), - fmt.Sprintf("%s@%s", user, address), - command, - } - - cmd := exec.CommandContext(ctx, "ssh", args...) - output, err := cmd.CombinedOutput() - if err != nil { - return "", fmt.Errorf("ssh command failed: %w (output: %s)", err, string(output)) - } - - return string(output), nil -} - -// MultiHostDomainManager queries multiple libvirt hosts via SSH. -type MultiHostDomainManager struct { - hosts []config.HostConfig - logger *slog.Logger - sshRunner SSHRunner -} - -// NewMultiHostDomainManager creates a new MultiHostDomainManager. -func NewMultiHostDomainManager(hosts []config.HostConfig, logger *slog.Logger) *MultiHostDomainManager { - return &MultiHostDomainManager{ - hosts: hosts, - logger: logger, - sshRunner: &defaultSSHRunner{}, - } -} - -// NewMultiHostDomainManagerWithRunner creates a MultiHostDomainManager with a custom SSH runner. -// This is primarily useful for testing. -func NewMultiHostDomainManagerWithRunner(hosts []config.HostConfig, logger *slog.Logger, runner SSHRunner) *MultiHostDomainManager { - return &MultiHostDomainManager{ - hosts: hosts, - logger: logger, - sshRunner: runner, - } -} - -// ListDomains queries all configured hosts in parallel and aggregates VM listings. -// Returns all VMs found along with any host errors encountered. -func (m *MultiHostDomainManager) ListDomains(ctx context.Context) (*MultiHostListResult, error) { - if len(m.hosts) == 0 { - return &MultiHostListResult{}, nil - } - - type hostResult struct { - domains []*MultiHostDomainInfo - err *HostError - } - - results := make(chan hostResult, len(m.hosts)) - var wg sync.WaitGroup - - for _, host := range m.hosts { - wg.Add(1) - go func(h config.HostConfig) { - defer wg.Done() - - domains, err := m.queryHost(ctx, h) - if err != nil { - m.logger.Warn("failed to query host", - "host_name", h.Name, - "host_address", h.Address, - "error", err, - ) - results <- hostResult{ - err: &HostError{ - HostName: h.Name, - HostAddress: h.Address, - Error: err.Error(), - }, - } - return - } - results <- hostResult{domains: domains} - }(host) - } - - // Close results channel when all goroutines complete - go func() { - wg.Wait() - close(results) - }() - - // Aggregate results - var allDomains []*MultiHostDomainInfo - var hostErrors []HostError - - for result := range results { - if result.err != nil { - hostErrors = append(hostErrors, *result.err) - } else { - allDomains = append(allDomains, result.domains...) - } - } - - return &MultiHostListResult{ - Domains: allDomains, - HostErrors: hostErrors, - }, nil -} - -// queryHost queries a single host for its VM list via SSH. -func (m *MultiHostDomainManager) queryHost(ctx context.Context, host config.HostConfig) ([]*MultiHostDomainInfo, error) { - // Apply defaults - sshUser := host.SSHUser - if sshUser == "" { - sshUser = DefaultSSHUser - } - sshPort := host.SSHPort - if sshPort == 0 { - sshPort = DefaultSSHPort - } - queryTimeout := host.QueryTimeout - if queryTimeout == 0 { - queryTimeout = DefaultHostQueryTimeout - } - - // Create context with timeout - queryCtx, cancel := context.WithTimeout(ctx, queryTimeout) - defer cancel() - - // Get list of VM names - vmNames, err := m.runSSHCommand(queryCtx, host.Address, sshUser, sshPort, - "virsh list --all --name") - if err != nil { - return nil, fmt.Errorf("list VMs: %w", err) - } - - // Parse VM names (one per line, skip empty lines) - var names []string - scanner := bufio.NewScanner(strings.NewReader(vmNames)) - for scanner.Scan() { - name := strings.TrimSpace(scanner.Text()) - if name != "" { - names = append(names, name) - } - } - - if len(names) == 0 { - return nil, nil - } - - // Get details for each VM - var domains []*MultiHostDomainInfo - for _, name := range names { - domain, err := m.getDomainInfo(queryCtx, host, sshUser, sshPort, name) - if err != nil { - m.logger.Debug("failed to get domain info", - "host", host.Name, - "domain", name, - "error", err, - ) - // Continue with other VMs even if one fails - continue - } - domains = append(domains, domain) - } - - return domains, nil -} - -// getDomainInfo gets detailed information for a single domain. -func (m *MultiHostDomainManager) getDomainInfo(ctx context.Context, host config.HostConfig, sshUser string, sshPort int, name string) (*MultiHostDomainInfo, error) { - escapedName, err := shellEscape(name) - if err != nil { - return nil, fmt.Errorf("invalid domain name: %w", err) - } - - // Get domain info using virsh dominfo - output, err := m.runSSHCommand(ctx, host.Address, sshUser, sshPort, - fmt.Sprintf("virsh dominfo %s", escapedName)) - if err != nil { - return nil, fmt.Errorf("dominfo: %w", err) - } - - domain := &MultiHostDomainInfo{ - Name: name, - HostName: host.Name, - HostAddress: host.Address, - } - - // Parse dominfo output - scanner := bufio.NewScanner(strings.NewReader(output)) - for scanner.Scan() { - line := scanner.Text() - parts := strings.SplitN(line, ":", 2) - if len(parts) != 2 { - continue - } - key := strings.TrimSpace(parts[0]) - value := strings.TrimSpace(parts[1]) - - switch key { - case "UUID": - domain.UUID = value - case "State": - domain.State = parseVirshState(value) - case "Persistent": - domain.Persistent = value == "yes" - } - } - - // Get disk path using virsh domblklist (reuse escapedName from above) - diskOutput, err := m.runSSHCommand(ctx, host.Address, sshUser, sshPort, - fmt.Sprintf("virsh domblklist %s --details", escapedName)) - if err == nil { - domain.DiskPath = parseDiskPath(diskOutput) - } - - return domain, nil -} - -// runSSHCommand executes a command on a remote host via SSH. -func (m *MultiHostDomainManager) runSSHCommand(ctx context.Context, address, user string, port int, command string) (string, error) { - return m.sshRunner.Run(ctx, address, user, port, command) -} - -// parseVirshState converts virsh state string to DomainState. -func parseVirshState(state string) DomainState { - switch strings.ToLower(state) { - case "running": - return DomainStateRunning - case "paused": - return DomainStatePaused - case "shut off": - return DomainStateStopped - case "shutdown": - return DomainStateShutdown - case "crashed": - return DomainStateCrashed - case "pmsuspended": - return DomainStateSuspended - default: - return DomainStateUnknown - } -} - -// parseDiskPath extracts the primary disk path from virsh domblklist output. -func parseDiskPath(output string) string { - // Output format: - // Type Device Target Source - // ------------------------------------------------ - // file disk vda /var/lib/libvirt/images/vm.qcow2 - scanner := bufio.NewScanner(strings.NewReader(output)) - lineNum := 0 - for scanner.Scan() { - lineNum++ - // Skip header lines - if lineNum <= 2 { - continue - } - line := scanner.Text() - fields := strings.Fields(line) - if len(fields) >= 4 && fields[1] == "disk" { - return fields[3] - } - } - return "" -} - -// ErrShellInputTooLong is returned when input exceeds MaxShellInputLength. -var ErrShellInputTooLong = errors.New("shell input exceeds maximum length") - -// ErrShellInputNullByte is returned when input contains null bytes. -var ErrShellInputNullByte = errors.New("shell input contains null byte") - -// ErrShellInputControlChar is returned when input contains control characters. -var ErrShellInputControlChar = errors.New("shell input contains control character") - -// validateShellInput checks input for dangerous characters before shell escaping. -func validateShellInput(s string) error { - if len(s) > MaxShellInputLength { - return ErrShellInputTooLong - } - for _, r := range s { - if r == 0 { - return ErrShellInputNullByte - } - // Reject control characters (0x00-0x1F) except tab (0x09) and newline (0x0A) - if unicode.IsControl(r) && r != '\t' && r != '\n' { - return ErrShellInputControlChar - } - } - return nil -} - -// shellEscape escapes a string for safe use in shell commands. -// Returns an error if the input contains dangerous characters. -func shellEscape(s string) (string, error) { - if err := validateShellInput(s); err != nil { - return "", err - } - // Wrap in single quotes and escape existing single quotes - return "'" + strings.ReplaceAll(s, "'", "'\"'\"'") + "'", nil -} - -// FindHostForVM searches all configured hosts to find which one has the given VM. -// Returns the host config if found, or an error if the VM is not found on any host. -func (m *MultiHostDomainManager) FindHostForVM(ctx context.Context, vmName string) (*config.HostConfig, error) { - if len(m.hosts) == 0 { - return nil, fmt.Errorf("no hosts configured") - } - - type findResult struct { - host *config.HostConfig - found bool - err error - } - - results := make(chan findResult, len(m.hosts)) - var wg sync.WaitGroup - - for i := range m.hosts { - wg.Add(1) - go func(h *config.HostConfig) { - defer wg.Done() - - found, err := m.hostHasVM(ctx, *h, vmName) - if err != nil { - m.logger.Debug("error checking host for VM", - "host", h.Name, - "vm_name", vmName, - "error", err, - ) - results <- findResult{err: err} - return - } - if found { - results <- findResult{host: h, found: true} - } else { - results <- findResult{found: false} - } - }(&m.hosts[i]) - } - - go func() { - wg.Wait() - close(results) - }() - - // Collect results - return first host that has the VM - var lastErr error - for result := range results { - if result.found { - return result.host, nil - } - if result.err != nil { - lastErr = result.err - } - } - - if lastErr != nil { - return nil, fmt.Errorf("VM %q not found on any host (last error: %w)", vmName, lastErr) - } - return nil, fmt.Errorf("VM %q not found on any configured host", vmName) -} - -// hostHasVM checks if a specific host has the given VM. -func (m *MultiHostDomainManager) hostHasVM(ctx context.Context, host config.HostConfig, vmName string) (bool, error) { - escapedName, err := shellEscape(vmName) - if err != nil { - return false, fmt.Errorf("invalid VM name: %w", err) - } - - sshUser := host.SSHUser - if sshUser == "" { - sshUser = DefaultSSHUser - } - sshPort := host.SSHPort - if sshPort == 0 { - sshPort = DefaultSSHPort - } - queryTimeout := host.QueryTimeout - if queryTimeout == 0 { - queryTimeout = DefaultHostQueryTimeout - } - - queryCtx, cancel := context.WithTimeout(ctx, queryTimeout) - defer cancel() - - // Check if VM exists using virsh dominfo - _, err = m.runSSHCommand(queryCtx, host.Address, sshUser, sshPort, - fmt.Sprintf("virsh dominfo %s", escapedName)) - if err != nil { - // If virsh dominfo fails, the VM doesn't exist on this host - return false, nil - } - return true, nil -} - -// GetHosts returns the configured hosts. -func (m *MultiHostDomainManager) GetHosts() []config.HostConfig { - return m.hosts -} diff --git a/fluid-remote/internal/libvirt/multihost_test.go b/fluid-remote/internal/libvirt/multihost_test.go deleted file mode 100644 index 05162345..00000000 --- a/fluid-remote/internal/libvirt/multihost_test.go +++ /dev/null @@ -1,817 +0,0 @@ -package libvirt - -import ( - "context" - "errors" - "fmt" - "log/slog" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/config" -) - -// mockSSHRunner implements SSHRunner for testing. -type mockSSHRunner struct { - mu sync.Mutex - responses map[string]string // command -> response - errors map[string]error // command -> error - defaultError error - delay time.Duration - callCount atomic.Int64 - callLog []mockSSHCall -} - -type mockSSHCall struct { - Address string - User string - Port int - Command string -} - -func newMockSSHRunner() *mockSSHRunner { - return &mockSSHRunner{ - responses: make(map[string]string), - errors: make(map[string]error), - } -} - -func (m *mockSSHRunner) Run(ctx context.Context, address, user string, port int, command string) (string, error) { - m.callCount.Add(1) - - m.mu.Lock() - m.callLog = append(m.callLog, mockSSHCall{ - Address: address, - User: user, - Port: port, - Command: command, - }) - m.mu.Unlock() - - if m.delay > 0 { - select { - case <-time.After(m.delay): - case <-ctx.Done(): - return "", ctx.Err() - } - } - - // Check for address-specific errors first - if err, ok := m.errors[address]; ok { - return "", err - } - - // Check for command-specific responses - if resp, ok := m.responses[command]; ok { - return resp, nil - } - - if m.defaultError != nil { - return "", m.defaultError - } - - return "", nil -} - -func (m *mockSSHRunner) setResponse(command, response string) { - m.mu.Lock() - defer m.mu.Unlock() - m.responses[command] = response -} - -func (m *mockSSHRunner) setHostError(address string, err error) { - m.mu.Lock() - defer m.mu.Unlock() - m.errors[address] = err -} - -func (m *mockSSHRunner) getCalls() []mockSSHCall { - m.mu.Lock() - defer m.mu.Unlock() - result := make([]mockSSHCall, len(m.callLog)) - copy(result, m.callLog) - return result -} - -func TestParseVirshState(t *testing.T) { - tests := []struct { - input string - expected DomainState - }{ - {"running", DomainStateRunning}, - {"Running", DomainStateRunning}, - {"RUNNING", DomainStateRunning}, - {"paused", DomainStatePaused}, - {"shut off", DomainStateStopped}, - {"shutdown", DomainStateShutdown}, - {"crashed", DomainStateCrashed}, - {"pmsuspended", DomainStateSuspended}, - {"unknown", DomainStateUnknown}, - {"", DomainStateUnknown}, - } - - for _, tt := range tests { - t.Run(tt.input, func(t *testing.T) { - result := parseVirshState(tt.input) - if result != tt.expected { - t.Errorf("parseVirshState(%q) = %v, want %v", tt.input, result, tt.expected) - } - }) - } -} - -func TestParseDiskPath(t *testing.T) { - tests := []struct { - name string - input string - expected string - }{ - { - name: "standard output", - input: `Type Device Target Source ------------------------------------------------- -file disk vda /var/lib/libvirt/images/test.qcow2 -file cdrom sda -`, - expected: "/var/lib/libvirt/images/test.qcow2", - }, - { - name: "multiple disks", - input: `Type Device Target Source ------------------------------------------------- -file disk vda /var/lib/libvirt/images/root.qcow2 -file disk vdb /var/lib/libvirt/images/data.qcow2`, - expected: "/var/lib/libvirt/images/root.qcow2", - }, - { - name: "empty output", - input: "", - expected: "", - }, - { - name: "no disks", - input: `Type Device Target Source -------------------------------------------------`, - expected: "", - }, - { - name: "cdrom only", - input: `Type Device Target Source ------------------------------------------------- -file cdrom sda /path/to/iso.iso`, - expected: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := parseDiskPath(tt.input) - if result != tt.expected { - t.Errorf("parseDiskPath() = %q, want %q", result, tt.expected) - } - }) - } -} - -func TestShellEscape(t *testing.T) { - tests := []struct { - input string - expected string - wantErr bool - }{ - {"simple", "'simple'", false}, - {"with spaces", "'with spaces'", false}, - {"with'quote", "'with'\"'\"'quote'", false}, - {"", "''", false}, - {"test-vm-01", "'test-vm-01'", false}, - {"with\ttab", "'with\ttab'", false}, - {"with\nnewline", "'with\nnewline'", false}, - } - - for _, tt := range tests { - t.Run(tt.input, func(t *testing.T) { - result, err := shellEscape(tt.input) - if (err != nil) != tt.wantErr { - t.Errorf("shellEscape(%q) error = %v, wantErr %v", tt.input, err, tt.wantErr) - return - } - if result != tt.expected { - t.Errorf("shellEscape(%q) = %q, want %q", tt.input, result, tt.expected) - } - }) - } -} - -func TestShellEscapeValidation(t *testing.T) { - tests := []struct { - name string - input string - wantErr error - }{ - { - name: "null byte", - input: "test\x00value", - wantErr: ErrShellInputNullByte, - }, - { - name: "control character bell", - input: "test\x07value", - wantErr: ErrShellInputControlChar, - }, - { - name: "control character escape", - input: "test\x1bvalue", - wantErr: ErrShellInputControlChar, - }, - { - name: "control character carriage return", - input: "test\rvalue", - wantErr: ErrShellInputControlChar, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := shellEscape(tt.input) - if err == nil { - t.Errorf("shellEscape(%q) expected error, got nil", tt.input) - return - } - if err != tt.wantErr { - t.Errorf("shellEscape(%q) error = %v, want %v", tt.input, err, tt.wantErr) - } - }) - } -} - -func TestShellEscapeMaxLength(t *testing.T) { - // Test input at max length (should succeed) - atMax := make([]byte, MaxShellInputLength) - for i := range atMax { - atMax[i] = 'a' - } - _, err := shellEscape(string(atMax)) - if err != nil { - t.Errorf("shellEscape at max length should succeed, got error: %v", err) - } - - // Test input over max length (should fail) - overMax := make([]byte, MaxShellInputLength+1) - for i := range overMax { - overMax[i] = 'a' - } - _, err = shellEscape(string(overMax)) - if err != ErrShellInputTooLong { - t.Errorf("shellEscape over max length should return ErrShellInputTooLong, got: %v", err) - } -} - -func TestValidateShellInput(t *testing.T) { - tests := []struct { - name string - input string - wantErr error - }{ - {"valid simple", "simple", nil}, - {"valid with spaces", "with spaces", nil}, - {"valid with tab", "with\ttab", nil}, - {"valid with newline", "with\nnewline", nil}, - {"invalid null byte", "test\x00value", ErrShellInputNullByte}, - {"invalid bell", "test\x07value", ErrShellInputControlChar}, - {"invalid escape", "test\x1bvalue", ErrShellInputControlChar}, - {"invalid backspace", "test\x08value", ErrShellInputControlChar}, - {"invalid form feed", "test\x0cvalue", ErrShellInputControlChar}, - {"invalid carriage return", "test\rvalue", ErrShellInputControlChar}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := validateShellInput(tt.input) - if err != tt.wantErr { - t.Errorf("validateShellInput(%q) = %v, want %v", tt.input, err, tt.wantErr) - } - }) - } -} - -func TestNewMultiHostDomainManager(t *testing.T) { - manager := NewMultiHostDomainManager(nil, nil) - if manager == nil { - t.Fatal("NewMultiHostDomainManager returned nil") - } - if manager.hosts != nil { - t.Error("Expected nil hosts slice") - } -} - -func TestGetHosts(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - {Name: "host2", Address: "192.168.1.2"}, - } - manager := NewMultiHostDomainManager(hosts, nil) - - result := manager.GetHosts() - if len(result) != 2 { - t.Errorf("Expected 2 hosts, got %d", len(result)) - } - if result[0].Name != "host1" { - t.Errorf("Expected first host name to be 'host1', got %s", result[0].Name) - } -} - -// TestListDomainsAllHostsUnreachable tests the case when all configured hosts fail. -func TestListDomainsAllHostsUnreachable(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - {Name: "host2", Address: "192.168.1.2"}, - {Name: "host3", Address: "192.168.1.3"}, - } - - mock := newMockSSHRunner() - mock.defaultError = errors.New("connection refused") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - result, err := manager.ListDomains(ctx) - // ListDomains returns an error aggregation, not a top-level error - if err != nil { - t.Fatalf("ListDomains should not return top-level error: %v", err) - } - - if result == nil { - t.Fatal("Expected non-nil result") - } - - // All hosts should have failed - if len(result.HostErrors) != 3 { - t.Errorf("Expected 3 host errors, got %d", len(result.HostErrors)) - } - - // No domains should be returned - if len(result.Domains) != 0 { - t.Errorf("Expected 0 domains, got %d", len(result.Domains)) - } - - // Verify each host error is recorded - hostErrorMap := make(map[string]bool) - for _, he := range result.HostErrors { - hostErrorMap[he.HostName] = true - if he.Error == "" { - t.Errorf("Host error for %s should have error message", he.HostName) - } - } - - for _, h := range hosts { - if !hostErrorMap[h.Name] { - t.Errorf("Expected error for host %s", h.Name) - } - } -} - -// TestListDomainsPartialHostFailure tests when some hosts succeed and some fail. -func TestListDomainsPartialHostFailure(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - {Name: "host2", Address: "192.168.1.2"}, - } - - mock := newMockSSHRunner() - // host1 fails - mock.setHostError("192.168.1.1", errors.New("connection refused")) - // host2 succeeds with VMs - mock.setResponse("virsh list --all --name", "vm1\nvm2\n") - mock.setResponse("virsh dominfo 'vm1'", "UUID: 1234\nState: running\nPersistent: yes\n") - mock.setResponse("virsh dominfo 'vm2'", "UUID: 5678\nState: shut off\nPersistent: yes\n") - mock.setResponse("virsh domblklist 'vm1' --details", "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm1.qcow2\n") - mock.setResponse("virsh domblklist 'vm2' --details", "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm2.qcow2\n") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - result, err := manager.ListDomains(ctx) - if err != nil { - t.Fatalf("ListDomains should not return top-level error: %v", err) - } - - // One host should have failed - if len(result.HostErrors) != 1 { - t.Errorf("Expected 1 host error, got %d", len(result.HostErrors)) - } - - if result.HostErrors[0].HostName != "host1" { - t.Errorf("Expected host1 to fail, got %s", result.HostErrors[0].HostName) - } - - // VMs from host2 should be returned - if len(result.Domains) != 2 { - t.Errorf("Expected 2 domains, got %d", len(result.Domains)) - } -} - -// TestSSHConnectionTimeout tests that SSH timeouts are handled correctly. -func TestSSHConnectionTimeout(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "slow-host", Address: "192.168.1.100"}, - } - - mock := newMockSSHRunner() - mock.delay = 5 * time.Second // Simulate slow response - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - // Use a context with short timeout - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - - result, err := manager.ListDomains(ctx) - if err != nil { - t.Fatalf("ListDomains should not return top-level error: %v", err) - } - - // The host should have timed out - if len(result.HostErrors) != 1 { - t.Errorf("Expected 1 host error due to timeout, got %d", len(result.HostErrors)) - } - - if len(result.HostErrors) > 0 && result.HostErrors[0].HostName != "slow-host" { - t.Errorf("Expected slow-host to fail, got %s", result.HostErrors[0].HostName) - } -} - -// TestFindHostForVMAllHostsUnreachable tests FindHostForVM when all hosts fail. -func TestFindHostForVMAllHostsUnreachable(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - {Name: "host2", Address: "192.168.1.2"}, - } - - mock := newMockSSHRunner() - mock.defaultError = errors.New("connection timed out") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - _, err := manager.FindHostForVM(ctx, "test-vm") - - if err == nil { - t.Fatal("FindHostForVM should return error when all hosts are unreachable") - } - - // Error should mention the VM name - if !errors.Is(err, nil) && err.Error() == "" { - t.Error("Expected non-empty error message") - } -} - -// TestFindHostForVMNotFoundOnAnyHost tests when VM doesn't exist on any host. -func TestFindHostForVMNotFoundOnAnyHost(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - {Name: "host2", Address: "192.168.1.2"}, - } - - mock := newMockSSHRunner() - // All hosts respond but VM not found (dominfo fails) - mock.defaultError = errors.New("domain not found") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - _, err := manager.FindHostForVM(ctx, "nonexistent-vm") - - if err == nil { - t.Fatal("FindHostForVM should return error when VM not found") - } -} - -// TestConcurrentVMOperationsOnSameHost tests thread safety during concurrent queries. -func TestConcurrentVMOperationsOnSameHost(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - } - - mock := newMockSSHRunner() - mock.setResponse("virsh list --all --name", "vm1\nvm2\nvm3\n") - mock.setResponse("virsh dominfo 'vm1'", "UUID: 1111\nState: running\nPersistent: yes\n") - mock.setResponse("virsh dominfo 'vm2'", "UUID: 2222\nState: running\nPersistent: yes\n") - mock.setResponse("virsh dominfo 'vm3'", "UUID: 3333\nState: running\nPersistent: yes\n") - mock.setResponse("virsh domblklist 'vm1' --details", "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm1.qcow2\n") - mock.setResponse("virsh domblklist 'vm2' --details", "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm2.qcow2\n") - mock.setResponse("virsh domblklist 'vm3' --details", "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm3.qcow2\n") - mock.delay = 10 * time.Millisecond // Small delay to create overlap - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - // Run multiple concurrent ListDomains operations - const concurrency = 10 - var wg sync.WaitGroup - errors := make(chan error, concurrency) - results := make(chan *MultiHostListResult, concurrency) - - ctx := context.Background() - for i := 0; i < concurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - result, err := manager.ListDomains(ctx) - if err != nil { - errors <- err - return - } - results <- result - }() - } - - wg.Wait() - close(errors) - close(results) - - // Check for any errors - for err := range errors { - t.Errorf("Concurrent operation failed: %v", err) - } - - // Verify all results are consistent - var resultCount int - for result := range results { - resultCount++ - if len(result.Domains) != 3 { - t.Errorf("Expected 3 domains, got %d", len(result.Domains)) - } - } - - if resultCount != concurrency { - t.Errorf("Expected %d results, got %d", concurrency, resultCount) - } -} - -// TestConcurrentFindHostForVM tests concurrent FindHostForVM operations. -func TestConcurrentFindHostForVM(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - {Name: "host2", Address: "192.168.1.2"}, - {Name: "host3", Address: "192.168.1.3"}, - } - - mock := newMockSSHRunner() - // VM exists on host2 only - mock.setHostError("192.168.1.1", errors.New("domain not found")) - mock.setHostError("192.168.1.3", errors.New("domain not found")) - mock.setResponse("virsh dominfo 'target-vm'", "UUID: abc123\nState: running\n") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - // Run multiple concurrent FindHostForVM operations - const concurrency = 5 - var wg sync.WaitGroup - foundHosts := make(chan *config.HostConfig, concurrency) - foundErrors := make(chan error, concurrency) - - ctx := context.Background() - for i := 0; i < concurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - host, err := manager.FindHostForVM(ctx, "target-vm") - if err != nil { - foundErrors <- err - return - } - foundHosts <- host - }() - } - - wg.Wait() - close(foundHosts) - close(foundErrors) - - // All should find host2 - for host := range foundHosts { - if host.Name != "host2" { - t.Errorf("Expected host2, got %s", host.Name) - } - } - - // Should have no errors - for err := range foundErrors { - t.Errorf("Unexpected error: %v", err) - } -} - -// TestListDomainsEmptyHosts tests behavior with no hosts configured. -func TestListDomainsEmptyHosts(t *testing.T) { - manager := NewMultiHostDomainManager(nil, nil) - - ctx := context.Background() - result, err := manager.ListDomains(ctx) - if err != nil { - t.Fatalf("ListDomains with empty hosts should not error: %v", err) - } - - if result == nil { - t.Fatal("Expected non-nil result") - } - - if len(result.Domains) != 0 { - t.Errorf("Expected 0 domains, got %d", len(result.Domains)) - } - - if len(result.HostErrors) != 0 { - t.Errorf("Expected 0 host errors, got %d", len(result.HostErrors)) - } -} - -// TestFindHostForVMNoHostsConfigured tests FindHostForVM with no hosts. -func TestFindHostForVMNoHostsConfigured(t *testing.T) { - manager := NewMultiHostDomainManager(nil, nil) - - ctx := context.Background() - _, err := manager.FindHostForVM(ctx, "any-vm") - - if err == nil { - t.Fatal("Expected error when no hosts configured") - } - - expectedMsg := "no hosts configured" - if err.Error() != expectedMsg { - t.Errorf("Expected error %q, got %q", expectedMsg, err.Error()) - } -} - -// TestSSHDefaultsApplied tests that SSH defaults are correctly applied. -func TestSSHDefaultsApplied(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, // No SSHUser or SSHPort set - } - - mock := newMockSSHRunner() - mock.setResponse("virsh list --all --name", "") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - _, _ = manager.ListDomains(ctx) - - calls := mock.getCalls() - if len(calls) == 0 { - t.Fatal("Expected at least one SSH call") - } - - // Verify defaults were applied - if calls[0].User != DefaultSSHUser { - t.Errorf("Expected default SSH user %q, got %q", DefaultSSHUser, calls[0].User) - } - - if calls[0].Port != DefaultSSHPort { - t.Errorf("Expected default SSH port %d, got %d", DefaultSSHPort, calls[0].Port) - } -} - -// TestSSHCustomPortAndUser tests that custom SSH settings are used. -func TestSSHCustomPortAndUser(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1", SSHUser: "admin", SSHPort: 2222}, - } - - mock := newMockSSHRunner() - mock.setResponse("virsh list --all --name", "") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - _, _ = manager.ListDomains(ctx) - - calls := mock.getCalls() - if len(calls) == 0 { - t.Fatal("Expected at least one SSH call") - } - - if calls[0].User != "admin" { - t.Errorf("Expected SSH user 'admin', got %q", calls[0].User) - } - - if calls[0].Port != 2222 { - t.Errorf("Expected SSH port 2222, got %d", calls[0].Port) - } -} - -// TestListDomainsWithDomainInfoFailure tests graceful handling when dominfo fails for one VM. -func TestListDomainsWithDomainInfoFailure(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - } - - mock := &selectiveMockSSHRunner{ - responses: map[string]mockResponse{ - "virsh list --all --name": {output: "vm1\nvm2\nvm3\n"}, - "virsh dominfo 'vm1'": {output: "UUID: 1111\nState: running\nPersistent: yes\n"}, - "virsh dominfo 'vm2'": {err: errors.New("domain info failed")}, - "virsh dominfo 'vm3'": {output: "UUID: 3333\nState: running\nPersistent: yes\n"}, - "virsh domblklist 'vm1' --details": {output: "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm1.qcow2\n"}, - "virsh domblklist 'vm3' --details": {output: "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm3.qcow2\n"}, - }, - } - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - result, err := manager.ListDomains(ctx) - if err != nil { - t.Fatalf("ListDomains should not return top-level error: %v", err) - } - - // Should get 2 VMs (vm1 and vm3), vm2 failed - if len(result.Domains) != 2 { - t.Errorf("Expected 2 domains (vm2 should be skipped), got %d", len(result.Domains)) - } - - // No host errors since the host itself is reachable - if len(result.HostErrors) != 0 { - t.Errorf("Expected 0 host errors, got %d", len(result.HostErrors)) - } -} - -// TestCustomQueryTimeout tests that per-host QueryTimeout is respected. -func TestCustomQueryTimeout(t *testing.T) { - // Host with short custom timeout - hosts := []config.HostConfig{ - {Name: "fast-host", Address: "192.168.1.1", QueryTimeout: 50 * time.Millisecond}, - } - - mock := newMockSSHRunner() - mock.delay = 200 * time.Millisecond // Exceeds the custom timeout - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - result, err := manager.ListDomains(ctx) - if err != nil { - t.Fatalf("ListDomains should not return top-level error: %v", err) - } - - // The host should have timed out due to custom timeout - if len(result.HostErrors) != 1 { - t.Errorf("Expected 1 host error due to custom timeout, got %d", len(result.HostErrors)) - } -} - -// TestDefaultQueryTimeoutUsedWhenNotSet tests that default timeout is used when QueryTimeout is 0. -func TestDefaultQueryTimeoutUsedWhenNotSet(t *testing.T) { - // Host without custom timeout (uses default) - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, // QueryTimeout = 0, should use default - } - - mock := newMockSSHRunner() - mock.setResponse("virsh list --all --name", "") - // No delay - should complete within default timeout - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - result, err := manager.ListDomains(ctx) - if err != nil { - t.Fatalf("ListDomains should not return error: %v", err) - } - - // Should succeed with no errors - if len(result.HostErrors) != 0 { - t.Errorf("Expected 0 host errors, got %d", len(result.HostErrors)) - } -} - -// selectiveMockSSHRunner allows command-specific responses. -type selectiveMockSSHRunner struct { - mu sync.Mutex - responses map[string]mockResponse -} - -type mockResponse struct { - output string - err error -} - -func (m *selectiveMockSSHRunner) Run(ctx context.Context, address, user string, port int, command string) (string, error) { - m.mu.Lock() - defer m.mu.Unlock() - - if resp, ok := m.responses[command]; ok { - return resp.output, resp.err - } - return "", fmt.Errorf("no mock response for command: %s", command) -} diff --git a/fluid-remote/internal/libvirt/remote.go b/fluid-remote/internal/libvirt/remote.go deleted file mode 100644 index ab6800ee..00000000 --- a/fluid-remote/internal/libvirt/remote.go +++ /dev/null @@ -1,749 +0,0 @@ -package libvirt - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "log/slog" - "os/exec" - "strings" - "time" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/config" -) - -// RemoteVirshManager implements Manager for remote libvirt hosts via SSH. -// It executes virsh and related commands on a remote host. -type RemoteVirshManager struct { - host config.HostConfig - cfg Config - logger *slog.Logger -} - -// NewRemoteVirshManager creates a new RemoteVirshManager for the given host. -func NewRemoteVirshManager(host config.HostConfig, cfg Config, logger *slog.Logger) *RemoteVirshManager { - if cfg.DefaultVCPUs == 0 { - cfg.DefaultVCPUs = 2 - } - if cfg.DefaultMemoryMB == 0 { - cfg.DefaultMemoryMB = 2048 - } - if logger == nil { - logger = slog.Default() - } - return &RemoteVirshManager{ - host: host, - cfg: cfg, - logger: logger, - } -} - -// CloneVM creates a linked-clone VM on the remote host. -func (m *RemoteVirshManager) CloneVM(ctx context.Context, baseImage, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) { - return DomainRef{}, fmt.Errorf("CloneVM not implemented for remote hosts - use CloneFromVM instead") -} - -// CloneFromVM creates a linked-clone VM from an existing VM on the remote host. -func (m *RemoteVirshManager) CloneFromVM(ctx context.Context, sourceVMName, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) { - if newVMName == "" { - return DomainRef{}, fmt.Errorf("new VM name is required") - } - if sourceVMName == "" { - return DomainRef{}, fmt.Errorf("source VM name is required") - } - - // Validate inputs for shell escaping - escapedSourceVM, err := shellEscape(sourceVMName) - if err != nil { - return DomainRef{}, fmt.Errorf("invalid source VM name: %w", err) - } - escapedNewVM, err := shellEscape(newVMName) - if err != nil { - return DomainRef{}, fmt.Errorf("invalid new VM name: %w", err) - } - - if cpu <= 0 { - cpu = m.cfg.DefaultVCPUs - } - if memoryMB <= 0 { - memoryMB = m.cfg.DefaultMemoryMB - } - if network == "" { - network = m.cfg.DefaultNetwork - } - - m.logger.Info("cloning VM on remote host", - "host", m.host.Name, - "source_vm", sourceVMName, - "new_vm", newVMName, - ) - - // Get source VM's disk path - out, err := m.runSSH(ctx, fmt.Sprintf("virsh domblklist %s --details", escapedSourceVM)) - if err != nil { - return DomainRef{}, fmt.Errorf("lookup source VM %q: %w", sourceVMName, err) - } - - basePath := "" - lines := strings.Split(out, "\n") - for _, line := range lines { - fields := strings.Fields(line) - if len(fields) >= 4 && fields[0] == "file" && fields[1] == "disk" { - basePath = fields[3] - break - } - } - if basePath == "" { - return DomainRef{}, fmt.Errorf("could not find disk path for source VM %q", sourceVMName) - } - - // Validate and escape paths - jobDir := fmt.Sprintf("%s/%s", m.cfg.WorkDir, newVMName) - escapedJobDir, err := shellEscape(jobDir) - if err != nil { - return DomainRef{}, fmt.Errorf("invalid job directory path: %w", err) - } - - // Create job directory on remote host - if _, err := m.runSSH(ctx, fmt.Sprintf("mkdir -p %s", escapedJobDir)); err != nil { - return DomainRef{}, fmt.Errorf("create job dir: %w", err) - } - - // Create overlay disk - overlayPath := fmt.Sprintf("%s/disk-overlay.qcow2", jobDir) - escapedBasePath, err := shellEscape(basePath) - if err != nil { - return DomainRef{}, fmt.Errorf("invalid base path: %w", err) - } - escapedOverlayPath, err := shellEscape(overlayPath) - if err != nil { - return DomainRef{}, fmt.Errorf("invalid overlay path: %w", err) - } - if _, err := m.runSSH(ctx, fmt.Sprintf("qemu-img create -f qcow2 -F qcow2 -b %s %s", - escapedBasePath, escapedOverlayPath)); err != nil { - return DomainRef{}, fmt.Errorf("create overlay: %w", err) - } - - // Generate a unique cloud-init ISO for the cloned VM on the remote host - // This ensures the clone gets a new instance-id and DHCP network config - cloudInitISO := fmt.Sprintf("%s/cloud-init.iso", jobDir) - if err := m.buildCloudInitSeedOnRemote(ctx, newVMName, jobDir, cloudInitISO); err != nil { - // Log warning but don't fail - VM might still work if source didn't use cloud-init - m.logger.Warn("failed to build cloud-init seed for clone, continuing without it", - "vm", newVMName, - "error", err, - ) - cloudInitISO = "" // Don't try to attach a non-existent ISO - } - - // Dump source VM XML and modify it - sourceXML, err := m.runSSH(ctx, fmt.Sprintf("virsh dumpxml %s", escapedSourceVM)) - if err != nil { - return DomainRef{}, fmt.Errorf("dumpxml source vm: %w", err) - } - - newXML, err := modifyClonedXMLHelper(sourceXML, newVMName, overlayPath, cloudInitISO, cpu, memoryMB, network) - if err != nil { - return DomainRef{}, fmt.Errorf("modify cloned xml: %w", err) - } - - // Write domain XML to remote host using base64 to avoid shell escaping issues - xmlPath := fmt.Sprintf("%s/domain.xml", jobDir) - escapedXMLPath, err := shellEscape(xmlPath) - if err != nil { - return DomainRef{}, fmt.Errorf("invalid XML path: %w", err) - } - encodedXML := base64.StdEncoding.EncodeToString([]byte(newXML)) - if _, err := m.runSSH(ctx, fmt.Sprintf("echo %s | base64 -d > %s", encodedXML, escapedXMLPath)); err != nil { - return DomainRef{}, fmt.Errorf("write domain xml: %w", err) - } - - // Define the domain - if _, err := m.runSSH(ctx, fmt.Sprintf("virsh define %s", escapedXMLPath)); err != nil { - return DomainRef{}, fmt.Errorf("virsh define: %w", err) - } - - // Get UUID - out, err = m.runSSH(ctx, fmt.Sprintf("virsh domuuid %s", escapedNewVM)) - if err != nil { - return DomainRef{Name: newVMName}, nil - } - - return DomainRef{Name: newVMName, UUID: strings.TrimSpace(out)}, nil -} - -// InjectSSHKey injects an SSH public key on the remote host. -func (m *RemoteVirshManager) InjectSSHKey(ctx context.Context, sandboxName, username, publicKey string) error { - if sandboxName == "" { - return fmt.Errorf("sandboxName is required") - } - if username == "" { - username = "sandbox" - } - if strings.TrimSpace(publicKey) == "" { - return fmt.Errorf("publicKey is required") - } - - jobDir := fmt.Sprintf("%s/%s", m.cfg.WorkDir, sandboxName) - overlay := fmt.Sprintf("%s/disk-overlay.qcow2", jobDir) - - // Validate inputs for shell escaping - escapedOverlay, err := shellEscape(overlay) - if err != nil { - return fmt.Errorf("invalid overlay path: %w", err) - } - escapedUsername, err := shellEscape(username) - if err != nil { - return fmt.Errorf("invalid username: %w", err) - } - escapedPublicKey, err := shellEscape(publicKey) - if err != nil { - return fmt.Errorf("invalid public key: %w", err) - } - - switch strings.ToLower(m.cfg.SSHKeyInjectMethod) { - case "virt-customize": - cmdArgs := fmt.Sprintf("virt-customize -a %s --run-command 'id -u %s >/dev/null 2>&1 || useradd -m -s /bin/bash %s' --ssh-inject '%s:string:%s'", - escapedOverlay, - escapedUsername, - escapedUsername, - escapedUsername, - escapedPublicKey, - ) - if _, err := m.runSSH(ctx, cmdArgs); err != nil { - return fmt.Errorf("virt-customize inject: %w", err) - } - default: - return fmt.Errorf("unsupported SSHKeyInjectMethod for remote: %s", m.cfg.SSHKeyInjectMethod) - } - return nil -} - -// StartVM starts a VM on the remote host. -func (m *RemoteVirshManager) StartVM(ctx context.Context, vmName string) error { - if vmName == "" { - return fmt.Errorf("vmName is required") - } - - escapedName, err := shellEscape(vmName) - if err != nil { - return fmt.Errorf("invalid VM name: %w", err) - } - - m.logger.Info("starting VM on remote host", - "host", m.host.Name, - "vm_name", vmName, - ) - - _, err = m.runSSH(ctx, fmt.Sprintf("virsh start %s", escapedName)) - if err != nil { - return fmt.Errorf("virsh start: %w", err) - } - return nil -} - -// StopVM stops a VM on the remote host. -func (m *RemoteVirshManager) StopVM(ctx context.Context, vmName string, force bool) error { - if vmName == "" { - return fmt.Errorf("vmName is required") - } - - escapedName, err := shellEscape(vmName) - if err != nil { - return fmt.Errorf("invalid VM name: %w", err) - } - - cmd := "shutdown" - if force { - cmd = "destroy" - } - - _, err = m.runSSH(ctx, fmt.Sprintf("virsh %s %s", cmd, escapedName)) - return err -} - -// DestroyVM destroys and undefines a VM on the remote host. -func (m *RemoteVirshManager) DestroyVM(ctx context.Context, vmName string) error { - if vmName == "" { - return fmt.Errorf("vmName is required") - } - - escapedName, err := shellEscape(vmName) - if err != nil { - return fmt.Errorf("invalid VM name: %w", err) - } - - // Best-effort destroy if running - _, _ = m.runSSH(ctx, fmt.Sprintf("virsh destroy %s", escapedName)) - - // Undefine - if _, err := m.runSSH(ctx, fmt.Sprintf("virsh undefine %s", escapedName)); err != nil { - // Continue to remove files - _ = err - } - - // Remove workspace - jobDir := fmt.Sprintf("%s/%s", m.cfg.WorkDir, vmName) - escapedJobDir, err := shellEscape(jobDir) - if err != nil { - return fmt.Errorf("invalid job directory path: %w", err) - } - _, _ = m.runSSH(ctx, fmt.Sprintf("rm -rf %s", escapedJobDir)) - - return nil -} - -// CreateSnapshot creates a snapshot on the remote host. -func (m *RemoteVirshManager) CreateSnapshot(ctx context.Context, vmName, snapshotName string, external bool) (SnapshotRef, error) { - if vmName == "" || snapshotName == "" { - return SnapshotRef{}, fmt.Errorf("vmName and snapshotName are required") - } - - escapedVMName, err := shellEscape(vmName) - if err != nil { - return SnapshotRef{}, fmt.Errorf("invalid VM name: %w", err) - } - escapedSnapshotName, err := shellEscape(snapshotName) - if err != nil { - return SnapshotRef{}, fmt.Errorf("invalid snapshot name: %w", err) - } - - if external { - jobDir := fmt.Sprintf("%s/%s", m.cfg.WorkDir, vmName) - snapPath := fmt.Sprintf("%s/snap-%s.qcow2", jobDir, snapshotName) - escapedSnapPath, err := shellEscape(snapPath) - if err != nil { - return SnapshotRef{}, fmt.Errorf("invalid snapshot path: %w", err) - } - args := fmt.Sprintf("virsh snapshot-create-as %s %s --disk-only --atomic --no-metadata --diskspec vda,file=%s", - escapedVMName, escapedSnapshotName, escapedSnapPath) - if _, err := m.runSSH(ctx, args); err != nil { - return SnapshotRef{}, fmt.Errorf("external snapshot create: %w", err) - } - return SnapshotRef{Name: snapshotName, Kind: "EXTERNAL", Ref: snapPath}, nil - } - - if _, err := m.runSSH(ctx, fmt.Sprintf("virsh snapshot-create-as %s %s", - escapedVMName, escapedSnapshotName)); err != nil { - return SnapshotRef{}, fmt.Errorf("internal snapshot create: %w", err) - } - return SnapshotRef{Name: snapshotName, Kind: "INTERNAL", Ref: snapshotName}, nil -} - -// DiffSnapshot returns a diff plan for the remote host. -func (m *RemoteVirshManager) DiffSnapshot(ctx context.Context, vmName, fromSnapshot, toSnapshot string) (*FSComparePlan, error) { - if vmName == "" || fromSnapshot == "" || toSnapshot == "" { - return nil, fmt.Errorf("vmName, fromSnapshot and toSnapshot are required") - } - - plan := &FSComparePlan{ - VMName: vmName, - FromSnapshot: fromSnapshot, - ToSnapshot: toSnapshot, - Notes: []string{"Remote host snapshot diffing - manual intervention required"}, - } - return plan, nil -} - -// GetIPAddress discovers the IP address of a VM on the remote host. -func (m *RemoteVirshManager) GetIPAddress(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - if vmName == "" { - return "", "", fmt.Errorf("vmName is required") - } - - escapedName, err := shellEscape(vmName) - if err != nil { - return "", "", fmt.Errorf("invalid VM name: %w", err) - } - - m.logger.Info("discovering IP on remote host", - "host", m.host.Name, - "vm_name", vmName, - "timeout", timeout, - ) - - deadline := time.Now().Add(timeout) - attempt := 0 - - for { - attempt++ - out, err := m.runSSH(ctx, fmt.Sprintf("virsh domifaddr %s --source lease", escapedName)) - if err == nil { - ip, mac := parseDomIfAddrIPv4WithMACHelper(out) - if ip != "" { - m.logger.Info("IP discovered on remote host", - "host", m.host.Name, - "vm_name", vmName, - "ip", ip, - "mac", mac, - ) - return ip, mac, nil - } - } - - if time.Now().After(deadline) { - break - } - time.Sleep(2 * time.Second) - } - - return "", "", fmt.Errorf("ip address not found within timeout on remote host %s", m.host.Name) -} - -// GetVMState returns the state of a VM on the remote host. -func (m *RemoteVirshManager) GetVMState(ctx context.Context, vmName string) (VMState, error) { - if vmName == "" { - return VMStateUnknown, fmt.Errorf("vmName is required") - } - - escapedName, err := shellEscape(vmName) - if err != nil { - return VMStateUnknown, fmt.Errorf("invalid VM name: %w", err) - } - - out, err := m.runSSH(ctx, fmt.Sprintf("virsh domstate %s", escapedName)) - if err != nil { - return VMStateUnknown, fmt.Errorf("get vm state: %w", err) - } - return parseVMStateHelper(out), nil -} - -// ValidateSourceVM performs pre-flight checks on a source VM on the remote host. -func (m *RemoteVirshManager) ValidateSourceVM(ctx context.Context, vmName string) (*VMValidationResult, error) { - if vmName == "" { - return nil, fmt.Errorf("vmName is required") - } - - escapedName, err := shellEscape(vmName) - if err != nil { - return nil, fmt.Errorf("invalid VM name: %w", err) - } - - result := &VMValidationResult{ - Valid: true, - Warnings: []string{}, - Errors: []string{}, - } - - // Check VM state - state, err := m.GetVMState(ctx, vmName) - if err != nil { - result.Valid = false - result.Errors = append(result.Errors, fmt.Sprintf("Failed to get VM state: %v", err)) - return result, nil - } - result.State = state - - // Check MAC address using domiflist - out, err := m.runSSH(ctx, fmt.Sprintf("virsh domiflist %s", escapedName)) - if err != nil { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not get network interfaces: %v", err)) - } else { - lines := strings.Split(out, "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - if line == "" || strings.HasPrefix(line, "Interface") || strings.HasPrefix(line, "-") { - continue - } - fields := strings.Fields(line) - if len(fields) >= 5 { - mac := fields[4] - if strings.Count(mac, ":") == 5 { - result.MACAddress = mac - result.HasNetwork = true - break - } - } - } - if result.MACAddress == "" { - result.Warnings = append(result.Warnings, - "Could not find MAC address - source VM may not have a network interface") - } - } - - // Check IP address if running - switch state { - case VMStateRunning: - out, err = m.runSSH(ctx, fmt.Sprintf("virsh domifaddr %s --source lease", escapedName)) - if err == nil { - ip, mac := parseDomIfAddrIPv4WithMACHelper(out) - if ip != "" { - result.IPAddress = ip - if mac != "" && result.MACAddress == "" { - result.MACAddress = mac - result.HasNetwork = true - } - } else { - result.Warnings = append(result.Warnings, - "Source VM is running but has no IP address assigned") - result.Warnings = append(result.Warnings, - "This may indicate cloud-init or DHCP issues - cloned sandboxes may also fail to get IPs") - } - } - case VMStateShutOff: - result.Warnings = append(result.Warnings, - "Source VM is shut off - cannot verify network configuration (IP/DHCP)") - } - - return result, nil -} - -// CheckHostResources validates that the remote host has sufficient resources. -func (m *RemoteVirshManager) CheckHostResources(ctx context.Context, requiredCPUs, requiredMemoryMB int) (*ResourceCheckResult, error) { - result := &ResourceCheckResult{ - Valid: true, - RequiredCPUs: requiredCPUs, - RequiredMemoryMB: requiredMemoryMB, - Warnings: []string{}, - Errors: []string{}, - } - - // Check CPUs using virsh nodeinfo - out, err := m.runSSH(ctx, "virsh nodeinfo") - if err == nil { - for _, line := range strings.Split(out, "\n") { - if strings.HasPrefix(line, "CPU(s):") { - fields := strings.Fields(line) - if len(fields) >= 2 { - _, _ = fmt.Sscanf(fields[1], "%d", &result.AvailableCPUs) - } - } - } - if requiredCPUs > result.AvailableCPUs { - result.Valid = false - result.Errors = append(result.Errors, - fmt.Sprintf("Insufficient CPUs on %s: need %d but only %d available", - m.host.Name, requiredCPUs, result.AvailableCPUs)) - } - } else { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not check CPUs on %s: %v", m.host.Name, err)) - } - - // Check memory using virsh nodememstats - out, err = m.runSSH(ctx, "virsh nodememstats") - if err == nil { - for _, line := range strings.Split(out, "\n") { - fields := strings.Fields(line) - if len(fields) >= 2 { - var val int64 - _, _ = fmt.Sscanf(fields[len(fields)-2], "%d", &val) - switch { - case strings.Contains(fields[0], "total"): - result.TotalMemoryMB = val / 1024 - case strings.Contains(fields[0], "free"): - result.AvailableMemoryMB = val / 1024 - } - } - } - - if result.TotalMemoryMB > 0 { - if int64(requiredMemoryMB) > result.AvailableMemoryMB { - result.Valid = false - result.Errors = append(result.Errors, - fmt.Sprintf("Insufficient memory on %s: need %d MB but only %d MB available", - m.host.Name, requiredMemoryMB, result.AvailableMemoryMB)) - } else if float64(requiredMemoryMB) > float64(result.AvailableMemoryMB)*0.8 { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Low memory warning on %s: requesting %d MB of %d MB available", - m.host.Name, requiredMemoryMB, result.AvailableMemoryMB)) - } - } - } else { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not check memory on %s: %v", m.host.Name, err)) - } - - // Check disk space - workDir := m.cfg.WorkDir - if workDir == "" { - workDir = "/var/lib/libvirt/images/sandboxes" - } - escapedWorkDir, err := shellEscape(workDir) - if err == nil { - out, err = m.runSSH(ctx, fmt.Sprintf("df -m %s | tail -1 | awk '{print $4}'", escapedWorkDir)) - if err == nil { - var available int64 - _, _ = fmt.Sscanf(strings.TrimSpace(out), "%d", &available) - result.AvailableDiskMB = available - - if available < 1024 { - result.Valid = false - result.Errors = append(result.Errors, - fmt.Sprintf("Insufficient disk space on %s: only %d MB available in %s", - m.host.Name, available, workDir)) - } else if available < 10*1024 { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Low disk space warning on %s: only %d MB available in %s", - m.host.Name, available, workDir)) - } - } else { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not check disk space on %s: %v", m.host.Name, err)) - } - } - - return result, nil -} - -// runSSH executes a command on the remote host via SSH. -func (m *RemoteVirshManager) runSSH(ctx context.Context, command string) (string, error) { - sshUser := m.host.SSHUser - if sshUser == "" { - sshUser = "root" - } - sshPort := m.host.SSHPort - if sshPort == 0 { - sshPort = 22 - } - - args := []string{ - "-o", "BatchMode=yes", - "-o", "StrictHostKeyChecking=accept-new", - "-o", "ConnectTimeout=10", - "-p", fmt.Sprintf("%d", sshPort), - fmt.Sprintf("%s@%s", sshUser, m.host.Address), - command, - } - - var stdout, stderr bytes.Buffer - cmd := exec.CommandContext(ctx, "ssh", args...) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - err := cmd.Run() - if err != nil { - errStr := strings.TrimSpace(stderr.String()) - if errStr != "" { - return stdout.String(), fmt.Errorf("%w: %s", err, errStr) - } - return stdout.String(), err - } - - return strings.TrimSpace(stdout.String()), nil -} - -// HostConfig returns the host configuration for this manager. -func (m *RemoteVirshManager) HostConfig() config.HostConfig { - return m.host -} - -// buildCloudInitSeedOnRemote creates a cloud-init ISO on the remote host. -// The key purpose is to provide a NEW instance-id that differs from what's stored -// on the cloned disk. This forces cloud-init to re-run its initialization, -// including network configuration for the clone's new MAC address. -func (m *RemoteVirshManager) buildCloudInitSeedOnRemote(ctx context.Context, vmName, jobDir, outISO string) error { - // Build cloud-init user-data with DHCP networking - userData := `#cloud-config -# Cloud-init config for cloned VMs -# This triggers cloud-init to re-run network configuration - -# Ensure networking is configured via DHCP -network: - version: 2 - ethernets: - id0: - match: - driver: virtio* - dhcp4: true -` - - // If SSH CA is configured, add sandbox user and SSH CA trust - if m.cfg.SSHCAPubKey != "" { - userData += fmt.Sprintf(` -# Create sandbox user for managed SSH credentials -users: - - default - - name: sandbox - sudo: ALL=(ALL) NOPASSWD:ALL - shell: /bin/bash - lock_passwd: true - -# Write SSH CA public key -write_files: - - path: /etc/ssh/ssh_ca.pub - content: | - %s - permissions: '0644' - owner: root:root - -# Configure sshd to trust the CA -runcmd: - - | - if [ -s /etc/ssh/ssh_ca.pub ]; then - if ! grep -q "TrustedUserCAKeys" /etc/ssh/sshd_config; then - echo "TrustedUserCAKeys /etc/ssh/ssh_ca.pub" >> /etc/ssh/sshd_config - systemctl restart sshd || systemctl restart ssh || true - fi - fi -`, m.cfg.SSHCAPubKey) - } - - // Use a unique instance-id based on the VM name - metaData := fmt.Sprintf(`instance-id: %s -local-hostname: %s -`, vmName, vmName) - - // Escape paths for shell - escapedOutISO, err := shellEscape(outISO) - if err != nil { - return fmt.Errorf("invalid ISO path: %w", err) - } - - // Write user-data and meta-data to remote host using base64 - userDataB64 := base64.StdEncoding.EncodeToString([]byte(userData)) - metaDataB64 := base64.StdEncoding.EncodeToString([]byte(metaData)) - - userDataPath := fmt.Sprintf("%s/user-data", jobDir) - metaDataPath := fmt.Sprintf("%s/meta-data", jobDir) - escapedUserDataPath, err := shellEscape(userDataPath) - if err != nil { - return fmt.Errorf("invalid user-data path: %w", err) - } - escapedMetaDataPath, err := shellEscape(metaDataPath) - if err != nil { - return fmt.Errorf("invalid meta-data path: %w", err) - } - - if _, err := m.runSSH(ctx, fmt.Sprintf("echo %s | base64 -d > %s", userDataB64, escapedUserDataPath)); err != nil { - return fmt.Errorf("write user-data: %w", err) - } - if _, err := m.runSSH(ctx, fmt.Sprintf("echo %s | base64 -d > %s", metaDataB64, escapedMetaDataPath)); err != nil { - return fmt.Errorf("write meta-data: %w", err) - } - - // Try cloud-localds first, then genisoimage, then mkisofs - isoCmd := fmt.Sprintf(` -if command -v cloud-localds >/dev/null 2>&1; then - cloud-localds %s %s %s -elif command -v genisoimage >/dev/null 2>&1; then - genisoimage -output %s -volid cidata -joliet -rock %s %s -elif command -v mkisofs >/dev/null 2>&1; then - mkisofs -output %s -V cidata -J -R %s %s -else - echo "No ISO creation tool found" >&2 - exit 1 -fi -`, escapedOutISO, escapedUserDataPath, escapedMetaDataPath, - escapedOutISO, escapedUserDataPath, escapedMetaDataPath, - escapedOutISO, escapedUserDataPath, escapedMetaDataPath) - - if _, err := m.runSSH(ctx, isoCmd); err != nil { - return fmt.Errorf("create cloud-init ISO: %w", err) - } - - // Verify ISO was created - if _, err := m.runSSH(ctx, fmt.Sprintf("test -f %s", escapedOutISO)); err != nil { - return fmt.Errorf("cloud-init ISO not created at %s", outISO) - } - - m.logger.Info("created cloud-init ISO on remote host", - "host", m.host.Name, - "vm", vmName, - "iso", outISO, - ) - - return nil -} diff --git a/fluid-remote/internal/libvirt/virsh-stub.go b/fluid-remote/internal/libvirt/virsh-stub.go deleted file mode 100755 index 56099e7c..00000000 --- a/fluid-remote/internal/libvirt/virsh-stub.go +++ /dev/null @@ -1,266 +0,0 @@ -//go:build !libvirt -// +build !libvirt - -package libvirt - -import ( - "context" - "errors" - "log/slog" - "os" - "time" -) - -// ErrLibvirtNotAvailable is returned by all stub methods when libvirt support is not compiled in. -var ErrLibvirtNotAvailable = errors.New("libvirt support not available: rebuild with -tags libvirt") - -// Manager defines the VM orchestration operations we support against libvirt/KVM via virsh. -type Manager interface { - // CloneVM creates a linked-clone VM from a golden base image and defines a libvirt domain for it. - // cpu and memoryMB are the VM shape. network is the libvirt network name (e.g., "default"). - CloneVM(ctx context.Context, baseImage, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) - - // CloneFromVM creates a linked-clone VM from an existing VM's disk. - // It looks up the source VM by name in libvirt, retrieves its disk path, - // and creates an overlay pointing to that disk as the backing file. - CloneFromVM(ctx context.Context, sourceVMName, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) - - // InjectSSHKey injects an SSH public key for a user into the VM disk before boot. - // The mechanism is determined by configuration (e.g., virt-customize or cloud-init seed). - InjectSSHKey(ctx context.Context, sandboxName, username, publicKey string) error - - // StartVM boots a defined domain. - StartVM(ctx context.Context, vmName string) error - - // StopVM gracefully shuts down a domain, or forces if force is true. - StopVM(ctx context.Context, vmName string, force bool) error - - // DestroyVM undefines the domain and removes its workspace (overlay files, domain XML, seeds). - // If the domain is running, it will be destroyed first. - DestroyVM(ctx context.Context, vmName string) error - - // CreateSnapshot creates a snapshot with the given name. - // If external is true, attempts a disk-only external snapshot. - CreateSnapshot(ctx context.Context, vmName, snapshotName string, external bool) (SnapshotRef, error) - - // DiffSnapshot prepares a plan to compare two snapshots' filesystems. - // The returned plan includes advice or prepared mounts where possible. - DiffSnapshot(ctx context.Context, vmName, fromSnapshot, toSnapshot string) (*FSComparePlan, error) - - // GetIPAddress attempts to fetch the VM's primary IP via libvirt leases. - // Returns the IP address and MAC address of the VM's primary interface. - GetIPAddress(ctx context.Context, vmName string, timeout time.Duration) (ip string, mac string, err error) - - // GetVMState returns the current state of a VM using virsh domstate. - GetVMState(ctx context.Context, vmName string) (VMState, error) - - // ValidateSourceVM performs pre-flight checks on a source VM before cloning. - // Returns a ValidationResult with warnings and errors about the VM's readiness. - ValidateSourceVM(ctx context.Context, vmName string) (*VMValidationResult, error) - - // CheckHostResources validates that the host has sufficient resources for a new sandbox. - // Returns a ResourceCheckResult with available resources and any warnings. - CheckHostResources(ctx context.Context, requiredCPUs, requiredMemoryMB int) (*ResourceCheckResult, error) -} - -// VMValidationResult contains the results of validating a source VM. -type VMValidationResult struct { - Valid bool `json:"valid"` - State VMState `json:"state"` - HasNetwork bool `json:"has_network"` - MACAddress string `json:"mac_address,omitempty"` - IPAddress string `json:"ip_address,omitempty"` - Warnings []string `json:"warnings,omitempty"` - Errors []string `json:"errors,omitempty"` -} - -// ResourceCheckResult contains the results of host resource validation. -type ResourceCheckResult struct { - Valid bool `json:"valid"` - AvailableMemoryMB int64 `json:"available_memory_mb"` - TotalMemoryMB int64 `json:"total_memory_mb"` - AvailableCPUs int `json:"available_cpus"` - AvailableDiskMB int64 `json:"available_disk_mb"` - RequiredMemoryMB int `json:"required_memory_mb"` - RequiredCPUs int `json:"required_cpus"` - Warnings []string `json:"warnings,omitempty"` - Errors []string `json:"errors,omitempty"` -} - -// VMState represents possible VM states from virsh domstate. -type VMState string - -const ( - VMStateRunning VMState = "running" - VMStatePaused VMState = "paused" - VMStateShutOff VMState = "shut off" - VMStateCrashed VMState = "crashed" - VMStateSuspended VMState = "pmsuspended" - VMStateUnknown VMState = "unknown" -) - -// Config controls how the virsh-based manager interacts with the host. -type Config struct { - LibvirtURI string // e.g., qemu:///system - BaseImageDir string // e.g., /var/lib/libvirt/images/base - WorkDir string // e.g., /var/lib/libvirt/images/jobs - DefaultNetwork string // e.g., default - SSHKeyInjectMethod string // "virt-customize" or "cloud-init" - CloudInitMetaTemplate string // optional meta-data template for cloud-init seed - - // SSH CA public key for managed credentials. - SSHCAPubKey string - - // SSH ProxyJump host for reaching VMs on an isolated network. - SSHProxyJump string - - // Optional explicit paths to binaries; if empty these are looked up in PATH. - VirshPath string - QemuImgPath string - VirtCustomizePath string - QemuNbdPath string - - // Socket VMNet configuration (macOS only) - SocketVMNetWrapper string // e.g., /path/to/qemu-socket-vmnet-wrapper.sh - - // Domain defaults - DefaultVCPUs int - DefaultMemoryMB int -} - -// DomainRef is a minimal reference to a libvirt domain (VM). -type DomainRef struct { - Name string - UUID string -} - -// SnapshotRef references a snapshot created for a domain. -type SnapshotRef struct { - Name string - // Kind: "INTERNAL" or "EXTERNAL" - Kind string - // Ref is driver-specific; could be an internal UUID or a file path for external snapshots. - Ref string -} - -// FSComparePlan describes a plan for diffing two snapshots' filesystems. -type FSComparePlan struct { - VMName string - FromSnapshot string - ToSnapshot string - - // Best-effort mount points (if prepared); may be empty strings when not mounted automatically. - FromMount string - ToMount string - - // Devices or files used; informative. - FromRef string - ToRef string - - // Free-form notes with instructions if the manager couldn't mount automatically. - Notes []string -} - -// VirshManager implements Manager using virsh/qemu-img/qemu-nbd/virt-customize and simple domain XML. -// This is a stub implementation that returns errors when libvirt is not available. -type VirshManager struct { - cfg Config - logger *slog.Logger -} - -// ConfigFromEnv returns a Config populated from environment variables. -func ConfigFromEnv() Config { - return Config{ - LibvirtURI: os.Getenv("LIBVIRT_URI"), - BaseImageDir: os.Getenv("BASE_IMAGE_DIR"), - WorkDir: os.Getenv("SANDBOX_WORKDIR"), - DefaultNetwork: os.Getenv("LIBVIRT_NETWORK"), - SSHKeyInjectMethod: os.Getenv("SSH_KEY_INJECT_METHOD"), - } -} - -// NewVirshManager creates a new VirshManager with the provided config. -// Note: This stub implementation will return errors for all operations. -func NewVirshManager(cfg Config, logger *slog.Logger) *VirshManager { - return &VirshManager{cfg: cfg, logger: logger} -} - -// NewFromEnv builds a Config from environment variables and returns a manager. -// Note: This stub implementation will return errors for all operations. -func NewFromEnv() *VirshManager { - cfg := Config{ - DefaultVCPUs: 2, - DefaultMemoryMB: 2048, - } - return NewVirshManager(cfg, nil) -} - -// CloneVM is a stub that returns an error when libvirt is not available. -func (m *VirshManager) CloneVM(ctx context.Context, baseImage, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) { - return DomainRef{}, ErrLibvirtNotAvailable -} - -// CloneFromVM is a stub that returns an error when libvirt is not available. -func (m *VirshManager) CloneFromVM(ctx context.Context, sourceVMName, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) { - return DomainRef{}, ErrLibvirtNotAvailable -} - -// InjectSSHKey is a stub that returns an error when libvirt is not available. -func (m *VirshManager) InjectSSHKey(ctx context.Context, sandboxName, username, publicKey string) error { - return ErrLibvirtNotAvailable -} - -// StartVM is a stub that returns an error when libvirt is not available. -func (m *VirshManager) StartVM(ctx context.Context, vmName string) error { - return ErrLibvirtNotAvailable -} - -// StopVM is a stub that returns an error when libvirt is not available. -func (m *VirshManager) StopVM(ctx context.Context, vmName string, force bool) error { - return ErrLibvirtNotAvailable -} - -// DestroyVM is a stub that returns an error when libvirt is not available. -func (m *VirshManager) DestroyVM(ctx context.Context, vmName string) error { - return ErrLibvirtNotAvailable -} - -// CreateSnapshot is a stub that returns an error when libvirt is not available. -func (m *VirshManager) CreateSnapshot(ctx context.Context, vmName, snapshotName string, external bool) (SnapshotRef, error) { - return SnapshotRef{}, ErrLibvirtNotAvailable -} - -// DiffSnapshot is a stub that returns an error when libvirt is not available. -func (m *VirshManager) DiffSnapshot(ctx context.Context, vmName, fromSnapshot, toSnapshot string) (*FSComparePlan, error) { - return nil, ErrLibvirtNotAvailable -} - -// GetIPAddress is a stub that returns an error when libvirt is not available. -func (m *VirshManager) GetIPAddress(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "", "", ErrLibvirtNotAvailable -} - -// GetVMState is a stub that returns an error when libvirt is not available. -func (m *VirshManager) GetVMState(ctx context.Context, vmName string) (VMState, error) { - return VMStateUnknown, ErrLibvirtNotAvailable -} - -// GetVMMAC is a stub that returns an error when libvirt is not available. -func (m *VirshManager) GetVMMAC(ctx context.Context, vmName string) (string, error) { - return "", ErrLibvirtNotAvailable -} - -// ReleaseDHCPLease is a stub that returns an error when libvirt is not available. -func (m *VirshManager) ReleaseDHCPLease(ctx context.Context, network, mac string) error { - return ErrLibvirtNotAvailable -} - -// ValidateSourceVM is a stub that returns an error when libvirt is not available. -func (m *VirshManager) ValidateSourceVM(ctx context.Context, vmName string) (*VMValidationResult, error) { - return nil, ErrLibvirtNotAvailable -} - -// CheckHostResources is a stub that returns an error when libvirt is not available. -func (m *VirshManager) CheckHostResources(ctx context.Context, requiredCPUs, requiredMemoryMB int) (*ResourceCheckResult, error) { - return nil, ErrLibvirtNotAvailable -} diff --git a/fluid-remote/internal/libvirt/virsh.go b/fluid-remote/internal/libvirt/virsh.go deleted file mode 100755 index d25f1874..00000000 --- a/fluid-remote/internal/libvirt/virsh.go +++ /dev/null @@ -1,1927 +0,0 @@ -//go:build libvirt -// +build libvirt - -package libvirt - -import ( - "bytes" - "context" - "crypto/rand" - "fmt" - "log" - "log/slog" - "os" - "os/exec" - "path/filepath" - "strings" - "text/template" - "time" -) - -// generateMACAddress generates a random MAC address with the locally administered bit set. -// Uses the 52:54:00 prefix which is commonly used by QEMU/KVM. -func generateMACAddress() string { - buf := make([]byte, 3) - _, _ = rand.Read(buf) - return fmt.Sprintf("52:54:00:%02x:%02x:%02x", buf[0], buf[1], buf[2]) -} - -// Manager defines the VM orchestration operations we support against libvirt/KVM via virsh. -type Manager interface { - // CloneVM creates a linked-clone VM from a golden base image and defines a libvirt domain for it. - // cpu and memoryMB are the VM shape. network is the libvirt network name (e.g., "default"). - CloneVM(ctx context.Context, baseImage, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) - - // CloneFromVM creates a linked-clone VM from an existing VM's disk. - // It looks up the source VM by name in libvirt, retrieves its disk path, - // and creates an overlay pointing to that disk as the backing file. - CloneFromVM(ctx context.Context, sourceVMName, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) - - // InjectSSHKey injects an SSH public key for a user into the VM disk before boot. - // The mechanism is determined by configuration (e.g., virt-customize or cloud-init seed). - InjectSSHKey(ctx context.Context, sandboxName, username, publicKey string) error - - // StartVM boots a defined domain. - StartVM(ctx context.Context, vmName string) error - - // StopVM gracefully shuts down a domain, or forces if force is true. - StopVM(ctx context.Context, vmName string, force bool) error - - // DestroyVM undefines the domain and removes its workspace (overlay files, domain XML, seeds). - // If the domain is running, it will be destroyed first. - DestroyVM(ctx context.Context, vmName string) error - - // CreateSnapshot creates a snapshot with the given name. - // If external is true, attempts a disk-only external snapshot. - CreateSnapshot(ctx context.Context, vmName, snapshotName string, external bool) (SnapshotRef, error) - - // DiffSnapshot prepares a plan to compare two snapshots' filesystems. - // The returned plan includes advice or prepared mounts where possible. - DiffSnapshot(ctx context.Context, vmName, fromSnapshot, toSnapshot string) (*FSComparePlan, error) - - // GetIPAddress attempts to fetch the VM's primary IP via libvirt leases. - // Returns the IP address and MAC address of the VM's primary interface. - GetIPAddress(ctx context.Context, vmName string, timeout time.Duration) (ip string, mac string, err error) - - // GetVMState returns the current state of a VM using virsh domstate. - GetVMState(ctx context.Context, vmName string) (VMState, error) - - // ValidateSourceVM performs pre-flight checks on a source VM before cloning. - // Returns a ValidationResult with warnings and errors about the VM's readiness. - ValidateSourceVM(ctx context.Context, vmName string) (*VMValidationResult, error) - - // CheckHostResources validates that the host has sufficient resources for a new sandbox. - // Returns a ResourceCheckResult with available resources and any warnings. - CheckHostResources(ctx context.Context, requiredCPUs, requiredMemoryMB int) (*ResourceCheckResult, error) -} - -// VMValidationResult contains the results of validating a source VM. -type VMValidationResult struct { - Valid bool `json:"valid"` - State VMState `json:"state"` - HasNetwork bool `json:"has_network"` - MACAddress string `json:"mac_address,omitempty"` - IPAddress string `json:"ip_address,omitempty"` - Warnings []string `json:"warnings,omitempty"` - Errors []string `json:"errors,omitempty"` -} - -// ResourceCheckResult contains the results of host resource validation. -type ResourceCheckResult struct { - Valid bool `json:"valid"` - AvailableMemoryMB int64 `json:"available_memory_mb"` - TotalMemoryMB int64 `json:"total_memory_mb"` - AvailableCPUs int `json:"available_cpus"` - AvailableDiskMB int64 `json:"available_disk_mb"` - RequiredMemoryMB int `json:"required_memory_mb"` - RequiredCPUs int `json:"required_cpus"` - Warnings []string `json:"warnings,omitempty"` - Errors []string `json:"errors,omitempty"` -} - -// VMState represents possible VM states from virsh domstate. -type VMState string - -const ( - VMStateRunning VMState = "running" - VMStatePaused VMState = "paused" - VMStateShutOff VMState = "shut off" - VMStateCrashed VMState = "crashed" - VMStateSuspended VMState = "pmsuspended" - VMStateUnknown VMState = "unknown" -) - -// Config controls how the virsh-based manager interacts with the host. -type Config struct { - LibvirtURI string // e.g., qemu:///system - BaseImageDir string // e.g., /var/lib/libvirt/images/base - WorkDir string // e.g., /var/lib/libvirt/images/jobs - DefaultNetwork string // e.g., default - SSHKeyInjectMethod string // "virt-customize" or "cloud-init" - CloudInitMetaTemplate string // optional meta-data template for cloud-init seed - - // SSH CA public key for managed credentials. - // If set, this will be injected into VMs via cloud-init so they trust - // certificates signed by this CA. - SSHCAPubKey string - - // SSH ProxyJump host for reaching VMs on an isolated network. - // Format: "user@host:port" or just "host" for default user/port. - // If set, SSH commands will use -J flag to proxy through this host. - SSHProxyJump string - - // Optional explicit paths to binaries; if empty these are looked up in PATH. - VirshPath string - QemuImgPath string - VirtCustomizePath string - QemuNbdPath string - - // Socket VMNet configuration (macOS only) - // If DefaultNetwork is "socket_vmnet", this wrapper script is used as the emulator. - // The wrapper should invoke qemu through socket_vmnet_client. - SocketVMNetWrapper string // e.g., /path/to/qemu-socket-vmnet-wrapper.sh - - // Domain defaults - DefaultVCPUs int - DefaultMemoryMB int -} - -// DomainRef is a minimal reference to a libvirt domain (VM). -type DomainRef struct { - Name string - UUID string -} - -// SnapshotRef references a snapshot created for a domain. -type SnapshotRef struct { - Name string - // Kind: "INTERNAL" or "EXTERNAL" - Kind string - // Ref is driver-specific; could be an internal UUID or a file path for external snapshots. - Ref string -} - -// FSComparePlan describes a plan for diffing two snapshots' filesystems. -type FSComparePlan struct { - VMName string - FromSnapshot string - ToSnapshot string - - // Best-effort mount points (if prepared); may be empty strings when not mounted automatically. - FromMount string - ToMount string - - // Devices or files used; informative. - FromRef string - ToRef string - - // Free-form notes with instructions if the manager couldn't mount automatically. - Notes []string -} - -// VirshManager implements Manager using virsh/qemu-img/qemu-nbd/virt-customize and simple domain XML. -type VirshManager struct { - cfg Config - logger *slog.Logger -} - -// NewVirshManager creates a new VirshManager with the provided config and optional logger. -// If logger is nil, slog.Default() is used. -func NewVirshManager(cfg Config, logger *slog.Logger) *VirshManager { - // Fill sensible defaults - if cfg.DefaultVCPUs == 0 { - cfg.DefaultVCPUs = 2 - } - if cfg.DefaultMemoryMB == 0 { - cfg.DefaultMemoryMB = 2048 - } - if logger == nil { - logger = slog.Default() - } - return &VirshManager{cfg: cfg, logger: logger} -} - -// NewFromEnv builds a Config from environment variables and returns a manager. -// LIBVIRT_URI, BASE_IMAGE_DIR, SANDBOX_WORKDIR, LIBVIRT_NETWORK, SSH_KEY_INJECT_METHOD -func NewFromEnv() *VirshManager { - cfg := Config{ - LibvirtURI: getenvDefault("LIBVIRT_URI", "qemu:///system"), - BaseImageDir: getenvDefault("BASE_IMAGE_DIR", "/var/lib/libvirt/images/base"), - WorkDir: getenvDefault("SANDBOX_WORKDIR", "/var/lib/libvirt/images/jobs"), - DefaultNetwork: getenvDefault("LIBVIRT_NETWORK", "default"), - SSHKeyInjectMethod: getenvDefault("SSH_KEY_INJECT_METHOD", "virt-customize"), - SSHCAPubKey: readSSHCAPubKey(getenvDefault("SSH_CA_PUB_KEY_PATH", "")), - SSHProxyJump: getenvDefault("SSH_PROXY_JUMP", ""), - DefaultVCPUs: intFromEnv("DEFAULT_VCPUS", 2), - DefaultMemoryMB: intFromEnv("DEFAULT_MEMORY_MB", 2048), - } - return NewVirshManager(cfg, nil) -} - -// ConfigFromEnv returns a Config populated from environment variables. -func ConfigFromEnv() Config { - return Config{ - LibvirtURI: getenvDefault("LIBVIRT_URI", "qemu:///system"), - BaseImageDir: getenvDefault("BASE_IMAGE_DIR", "/var/lib/libvirt/images/base"), - WorkDir: getenvDefault("SANDBOX_WORKDIR", "/var/lib/libvirt/images/jobs"), - DefaultNetwork: getenvDefault("LIBVIRT_NETWORK", "default"), - SSHKeyInjectMethod: getenvDefault("SSH_KEY_INJECT_METHOD", "virt-customize"), - SSHCAPubKey: readSSHCAPubKey(getenvDefault("SSH_CA_PUB_KEY_PATH", "")), - SSHProxyJump: getenvDefault("SSH_PROXY_JUMP", ""), - SocketVMNetWrapper: getenvDefault("SOCKET_VMNET_WRAPPER", ""), - DefaultVCPUs: intFromEnv("DEFAULT_VCPUS", 2), - DefaultMemoryMB: intFromEnv("DEFAULT_MEMORY_MB", 2048), - } -} - -// readSSHCAPubKey reads the SSH CA public key from a file path. -// Returns empty string if path is empty or file cannot be read. -func readSSHCAPubKey(path string) string { - if path == "" { - return "" - } - data, err := os.ReadFile(path) - if err != nil { - return "" - } - return strings.TrimSpace(string(data)) -} - -func (m *VirshManager) CloneVM(ctx context.Context, baseImage, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) { - if newVMName == "" { - return DomainRef{}, fmt.Errorf("new VM name is required") - } - if baseImage == "" { - return DomainRef{}, fmt.Errorf("base image is required") - } - if cpu <= 0 { - cpu = m.cfg.DefaultVCPUs - } - if memoryMB <= 0 { - memoryMB = m.cfg.DefaultMemoryMB - } - if network == "" { - network = m.cfg.DefaultNetwork - } - - basePath := filepath.Join(m.cfg.BaseImageDir, baseImage) - if _, err := os.Stat(basePath); err != nil { - return DomainRef{}, fmt.Errorf("base image not accessible: %s: %w", basePath, err) - } - - jobDir := filepath.Join(m.cfg.WorkDir, newVMName) - if err := os.MkdirAll(jobDir, 0o755); err != nil { - return DomainRef{}, fmt.Errorf("create job dir: %w", err) - } - - overlayPath := filepath.Join(jobDir, "disk-overlay.qcow2") - qemuImg := m.binPath("qemu-img", m.cfg.QemuImgPath) - if _, err := m.run(ctx, qemuImg, "create", "-f", "qcow2", "-F", "qcow2", "-b", basePath, overlayPath); err != nil { - return DomainRef{}, fmt.Errorf("create overlay: %w", err) - } - - // Create minimal domain XML referencing overlay disk and network. - xmlPath := filepath.Join(jobDir, "domain.xml") - xml, err := renderDomainXML(domainXMLParams{ - Name: newVMName, - MemoryMB: memoryMB, - VCPUs: cpu, - DiskPath: overlayPath, - Network: network, - BootOrder: []string{"hd", "cdrom", "network"}, - }) - log.Println("Generated domain XML:", xml) - if err != nil { - return DomainRef{}, fmt.Errorf("render domain xml: %w", err) - } - if err := os.WriteFile(xmlPath, []byte(xml), 0o644); err != nil { - return DomainRef{}, fmt.Errorf("write domain xml: %w", err) - } - - // virsh define - virsh := m.binPath("virsh", m.cfg.VirshPath) - if _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "define", xmlPath); err != nil { - return DomainRef{}, fmt.Errorf("virsh define: %w", err) - } - - // Fetch UUID - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domuuid", newVMName) - if err != nil { - // Best-effort: If domuuid fails, we still return Name. - return DomainRef{Name: newVMName}, nil - } - return DomainRef{Name: newVMName, UUID: strings.TrimSpace(out)}, nil -} - -// CloneFromVM creates a linked-clone VM from an existing VM's disk. -// It looks up the source VM by name, retrieves its disk path, and creates an overlay. -func (m *VirshManager) CloneFromVM(ctx context.Context, sourceVMName, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) { - if newVMName == "" { - return DomainRef{}, fmt.Errorf("new VM name is required") - } - if sourceVMName == "" { - return DomainRef{}, fmt.Errorf("source VM name is required") - } - if cpu <= 0 { - cpu = m.cfg.DefaultVCPUs - } - if memoryMB <= 0 { - memoryMB = m.cfg.DefaultMemoryMB - } - if network == "" { - network = m.cfg.DefaultNetwork - } - - // Look up the source VM's disk path using virsh domblklist - virsh := m.binPath("virsh", m.cfg.VirshPath) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domblklist", sourceVMName, "--details") - if err != nil { - return DomainRef{}, fmt.Errorf("lookup source VM %q: %w", sourceVMName, err) - } - - // Parse domblklist output to find the disk path - basePath := "" - lines := strings.Split(out, "\n") - for _, line := range lines { - fields := strings.Fields(line) - if len(fields) >= 4 && fields[0] == "file" && fields[1] == "disk" { - basePath = fields[3] - break - } - } - if basePath == "" { - return DomainRef{}, fmt.Errorf("could not find disk path for source VM %q", sourceVMName) - } - - // Verify the disk exists - if _, err := os.Stat(basePath); err != nil { - return DomainRef{}, fmt.Errorf("source VM disk not accessible: %s: %w", basePath, err) - } - - jobDir := filepath.Join(m.cfg.WorkDir, newVMName) - if err := os.MkdirAll(jobDir, 0o755); err != nil { - return DomainRef{}, fmt.Errorf("create job dir: %w", err) - } - - overlayPath := filepath.Join(jobDir, "disk-overlay.qcow2") - qemuImg := m.binPath("qemu-img", m.cfg.QemuImgPath) - if _, err := m.run(ctx, qemuImg, "create", "-f", "qcow2", "-F", "qcow2", "-b", basePath, overlayPath); err != nil { - return DomainRef{}, fmt.Errorf("create overlay: %w", err) - } - - // Generate a unique cloud-init ISO for the cloned VM - // This ensures the clone gets a new instance-id and DHCP network config, - // regardless of how the source VM's cloud-init was configured (static IP, MAC, etc.) - cloudInitISO := filepath.Join(jobDir, "cloud-init.iso") - if err := m.buildCloudInitSeedForClone(ctx, newVMName, cloudInitISO); err != nil { - // Log warning but don't fail - VM might still work if source didn't use cloud-init - log.Printf("WARNING: failed to build cloud-init seed for clone %s: %v", newVMName, err) - cloudInitISO = "" // Don't try to attach a non-existent ISO - } - - // Dump the source VM's XML and modify it for the new VM - sourceXML, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "dumpxml", sourceVMName) - if err != nil { - return DomainRef{}, fmt.Errorf("dumpxml source vm: %w", err) - } - - newXML, err := modifyClonedXMLHelper(sourceXML, newVMName, overlayPath, cloudInitISO, cpu, memoryMB, network) - if err != nil { - return DomainRef{}, fmt.Errorf("modify cloned xml: %w", err) - } - - xmlPath := filepath.Join(jobDir, "domain.xml") - if err := os.WriteFile(xmlPath, []byte(newXML), 0o644); err != nil { - return DomainRef{}, fmt.Errorf("write domain xml: %w", err) - } - - // virsh define - if _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "define", xmlPath); err != nil { - return DomainRef{}, fmt.Errorf("virsh define: %w", err) - } - - // Fetch UUID - out, err = m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domuuid", newVMName) - if err != nil { - return DomainRef{Name: newVMName}, nil - } - return DomainRef{Name: newVMName, UUID: strings.TrimSpace(out)}, nil -} - -// modifyClonedXML takes the XML from a source domain and adapts it for a new cloned domain. -// It sets a new name, UUID, disk path, MAC address, and cloud-init ISO path. It removes the -//
element from the network interface to prevent PCI slot conflicts. -// If cloudInitISO is provided, any existing CDROM device is updated to use it, ensuring the -// cloned VM gets a unique instance-id and fresh network configuration via cloud-init. -func (m *VirshManager) InjectSSHKey(ctx context.Context, sandboxName, username, publicKey string) error { - if sandboxName == "" { - return fmt.Errorf("sandboxName is required") - } - if username == "" { - username = defaultGuestUser(sandboxName) - } - if strings.TrimSpace(publicKey) == "" { - return fmt.Errorf("publicKey is required") - } - - jobDir := filepath.Join(m.cfg.WorkDir, sandboxName) - overlay := filepath.Join(jobDir, "disk-overlay.qcow2") - if _, err := os.Stat(overlay); err != nil { - return fmt.Errorf("overlay not found for VM %s: %w", sandboxName, err) - } - - switch strings.ToLower(m.cfg.SSHKeyInjectMethod) { - case "virt-customize": - // Requires libguestfs tools on host. - virtCustomize := m.binPath("virt-customize", m.cfg.VirtCustomizePath) - // Ensure account exists and inject key. This is offline before first boot. - cmdArgs := []string{ - "-a", overlay, - "--run-command", fmt.Sprintf("id -u %s >/dev/null 2>&1 || useradd -m -s /bin/bash %s", shEscape(username), shEscape(username)), - "--ssh-inject", fmt.Sprintf("%s:string:%s", username, publicKey), - } - if _, err := m.run(ctx, virtCustomize, cmdArgs...); err != nil { - return fmt.Errorf("virt-customize inject: %w", err) - } - case "cloud-init": - // Build a NoCloud seed with the provided key and attach as CD-ROM. - seedISO := filepath.Join(jobDir, "seed.iso") - if err := m.buildCloudInitSeed(ctx, sandboxName, username, publicKey, seedISO); err != nil { - return fmt.Errorf("build cloud-init seed: %w", err) - } - // Attach seed ISO to domain XML (adds a CDROM) and redefine the domain. - xmlPath := filepath.Join(jobDir, "domain.xml") - if err := m.attachISOToDomainXML(xmlPath, seedISO); err != nil { - return fmt.Errorf("attach seed iso to domain xml: %w", err) - } - virsh := m.binPath("virsh", m.cfg.VirshPath) - if _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "define", xmlPath); err != nil { - return fmt.Errorf("re-define domain with seed: %w", err) - } - default: - return fmt.Errorf("unsupported SSHKeyInjectMethod: %s", m.cfg.SSHKeyInjectMethod) - } - return nil -} - -func (m *VirshManager) StartVM(ctx context.Context, vmName string) error { - if vmName == "" { - return fmt.Errorf("vmName is required") - } - - m.logger.Info("starting VM", - "vm_name", vmName, - "libvirt_uri", m.cfg.LibvirtURI, - ) - - virsh := m.binPath("virsh", m.cfg.VirshPath) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "start", vmName) - if err != nil { - m.logger.Error("failed to start VM", - "vm_name", vmName, - "error", err, - "output", out, - ) - return err - } - - m.logger.Debug("virsh start command completed", - "vm_name", vmName, - "output", out, - ) - - // Verify VM actually started by checking state - state, stateErr := m.GetVMState(ctx, vmName) - if stateErr != nil { - m.logger.Warn("unable to verify VM state after start", - "vm_name", vmName, - "error", stateErr, - ) - } else { - m.logger.Info("VM state after start command", - "vm_name", vmName, - "state", state, - ) - if state != VMStateRunning { - m.logger.Warn("VM not in running state after start command", - "vm_name", vmName, - "actual_state", state, - "expected_state", VMStateRunning, - "hint", "On ARM Macs with Lima, VMs may fail to start due to CPU mode limitations", - ) - } - } - - return nil -} - -// GetVMState returns the current state of a VM using virsh domstate. -func (m *VirshManager) GetVMState(ctx context.Context, vmName string) (VMState, error) { - if vmName == "" { - return VMStateUnknown, fmt.Errorf("vmName is required") - } - virsh := m.binPath("virsh", m.cfg.VirshPath) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domstate", vmName) - if err != nil { - return VMStateUnknown, fmt.Errorf("get vm state: %w", err) - } - return parseVMState(out), nil -} - -// parseVMState converts virsh domstate output to VMState. -func parseVMState(output string) VMState { - state := strings.TrimSpace(output) - switch state { - case "running": - return VMStateRunning - case "paused": - return VMStatePaused - case "shut off": - return VMStateShutOff - case "crashed": - return VMStateCrashed - case "pmsuspended": - return VMStateSuspended - default: - return VMStateUnknown - } -} - -// GetVMMAC returns the MAC address of the VM's primary network interface. -// This is useful for DHCP lease management. -func (m *VirshManager) GetVMMAC(ctx context.Context, vmName string) (string, error) { - if vmName == "" { - return "", fmt.Errorf("vmName is required") - } - virsh := m.binPath("virsh", m.cfg.VirshPath) - - // Use domiflist to get interface info (works even if VM is not running) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domiflist", vmName) - if err != nil { - return "", fmt.Errorf("get vm interfaces: %w", err) - } - - // Parse domiflist output: - // Interface Type Source Model MAC - // ------------------------------------------------------- - // - network default virtio 52:54:00:6b:3c:86 - lines := strings.Split(out, "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - if line == "" || strings.HasPrefix(line, "Interface") || strings.HasPrefix(line, "-") { - continue - } - fields := strings.Fields(line) - if len(fields) >= 5 { - mac := fields[4] - // Validate MAC format - if strings.Count(mac, ":") == 5 { - return mac, nil - } - } - } - return "", fmt.Errorf("no MAC address found for VM %s", vmName) -} - -// ReleaseDHCPLease attempts to release the DHCP lease for a given MAC address. -// This helps prevent IP conflicts when VMs are rapidly created and destroyed. -// It tries multiple methods: -// 1. Remove static DHCP host entry (if any) -// 2. Use dhcp_release utility to release dynamic lease -// 3. Remove from lease file directly as fallback -func (m *VirshManager) ReleaseDHCPLease(ctx context.Context, network, mac string) error { - if network == "" { - network = m.cfg.DefaultNetwork - } - if mac == "" { - return fmt.Errorf("MAC address is required") - } - - virsh := m.binPath("virsh", m.cfg.VirshPath) - - // Try to remove any static DHCP host entry (if exists) - // This is a best-effort operation - it may fail if no static entry exists - hostXML := fmt.Sprintf("", mac) - _, _ = m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, - "net-update", network, "delete", "ip-dhcp-host", hostXML, "--live", "--config") - - // Get the bridge interface name for the network (e.g., virbr0) - bridgeName, ip := m.getNetworkBridgeAndLeaseIP(ctx, network, mac) - - if bridgeName != "" && ip != "" { - // Try dhcp_release utility first (cleanest method) - // dhcp_release - if _, err := m.run(ctx, "dhcp_release", bridgeName, ip, mac); err == nil { - m.logger.Info("released DHCP lease via dhcp_release", - "network", network, - "bridge", bridgeName, - "ip", ip, - "mac", mac, - ) - return nil - } - - // Fallback: try to remove from lease file directly - if err := m.removeLeaseFromFile(network, mac); err == nil { - m.logger.Info("removed DHCP lease from lease file", - "network", network, - "mac", mac, - ) - return nil - } - } - - m.logger.Debug("DHCP lease release attempted (may not have fully succeeded)", - "network", network, - "mac", mac, - ) - - return nil -} - -// getNetworkBridgeAndLeaseIP returns the bridge interface name and leased IP for a MAC address. -func (m *VirshManager) getNetworkBridgeAndLeaseIP(ctx context.Context, network, mac string) (bridge, ip string) { - virsh := m.binPath("virsh", m.cfg.VirshPath) - - // Get bridge name from network XML - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "net-info", network) - if err == nil { - for _, line := range strings.Split(out, "\n") { - if strings.HasPrefix(line, "Bridge:") { - parts := strings.Fields(line) - if len(parts) >= 2 { - bridge = parts[1] - } - } - } - } - - // Get IP from DHCP leases - out, err = m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "net-dhcp-leases", network) - if err == nil { - // Parse output: - // Expiry Time MAC address Protocol IP address Hostname Client ID - // 2024-01-08 12:00:00 52:54:00:6b:3c:86 ipv4 192.168.122.63/24 vm-name - - for _, line := range strings.Split(out, "\n") { - if strings.Contains(line, mac) { - fields := strings.Fields(line) - // Fields: [date, time, mac, protocol, ip/cidr, hostname, clientid] - if len(fields) >= 5 { - ipCIDR := fields[4] - if idx := strings.Index(ipCIDR, "/"); idx > 0 { - ip = ipCIDR[:idx] - } else { - ip = ipCIDR - } - } - } - } - } - - return bridge, ip -} - -// removeLeaseFromFile removes a DHCP lease entry from the dnsmasq lease file. -func (m *VirshManager) removeLeaseFromFile(network, mac string) error { - // Lease file is typically at /var/lib/libvirt/dnsmasq/.leases - leaseFile := fmt.Sprintf("/var/lib/libvirt/dnsmasq/%s.leases", network) - - data, err := os.ReadFile(leaseFile) - if err != nil { - return fmt.Errorf("read lease file: %w", err) - } - - // Lease file format: - // Example: 1704672000 52:54:00:6b:3c:86 192.168.122.63 vm-name * - var newLines []string - found := false - for _, line := range strings.Split(string(data), "\n") { - if strings.TrimSpace(line) == "" { - continue - } - if strings.Contains(line, mac) { - found = true - continue // Skip this line (remove the lease) - } - newLines = append(newLines, line) - } - - if !found { - return fmt.Errorf("lease not found for MAC %s", mac) - } - - // Write back the modified lease file - newData := strings.Join(newLines, "\n") - if len(newLines) > 0 { - newData += "\n" - } - if err := os.WriteFile(leaseFile, []byte(newData), 0o644); err != nil { - return fmt.Errorf("write lease file: %w", err) - } - - return nil -} - -func (m *VirshManager) StopVM(ctx context.Context, vmName string, force bool) error { - if vmName == "" { - return fmt.Errorf("vmName is required") - } - virsh := m.binPath("virsh", m.cfg.VirshPath) - if force { - _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "destroy", vmName) - return err - } - _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "shutdown", vmName) - return err -} - -func (m *VirshManager) DestroyVM(ctx context.Context, vmName string) error { - if vmName == "" { - return fmt.Errorf("vmName is required") - } - - // Get MAC address before destroying (for DHCP lease cleanup) - mac, macErr := m.GetVMMAC(ctx, vmName) - if macErr != nil { - m.logger.Debug("could not get MAC address for DHCP cleanup", - "vm_name", vmName, - "error", macErr, - ) - } - - virsh := m.binPath("virsh", m.cfg.VirshPath) - // Best-effort destroy if running - _, _ = m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "destroy", vmName) - // Undefine - if _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "undefine", vmName); err != nil { - // continue to remove files even if undefine fails - _ = err - } - - // Release DHCP lease to prevent IP conflicts with future VMs - if mac != "" { - if err := m.ReleaseDHCPLease(ctx, m.cfg.DefaultNetwork, mac); err != nil { - m.logger.Debug("failed to release DHCP lease", - "vm_name", vmName, - "mac", mac, - "error", err, - ) - } else { - m.logger.Info("released DHCP lease", - "vm_name", vmName, - "mac", mac, - ) - } - } - - // Remove workspace - jobDir := filepath.Join(m.cfg.WorkDir, vmName) - if err := os.RemoveAll(jobDir); err != nil { - return fmt.Errorf("cleanup job dir: %w", err) - } - return nil -} - -func (m *VirshManager) CreateSnapshot(ctx context.Context, vmName, snapshotName string, external bool) (SnapshotRef, error) { - if vmName == "" || snapshotName == "" { - return SnapshotRef{}, fmt.Errorf("vmName and snapshotName are required") - } - virsh := m.binPath("virsh", m.cfg.VirshPath) - - if external { - // External disk-only snapshot. - jobDir := filepath.Join(m.cfg.WorkDir, vmName) - snapPath := filepath.Join(jobDir, fmt.Sprintf("snap-%s.qcow2", snapshotName)) - // NOTE: This is a simplified attempt; real-world disk-only snapshots may need - // additional options and disk target identification. - args := []string{ - "--connect", m.cfg.LibvirtURI, "snapshot-create-as", vmName, snapshotName, - "--disk-only", "--atomic", "--no-metadata", - "--diskspec", fmt.Sprintf("vda,file=%s", snapPath), - } - if _, err := m.run(ctx, virsh, args...); err != nil { - return SnapshotRef{}, fmt.Errorf("external snapshot create: %w", err) - } - return SnapshotRef{Name: snapshotName, Kind: "EXTERNAL", Ref: snapPath}, nil - } - - // Internal snapshot (managed by libvirt/qemu). - if _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "snapshot-create-as", vmName, snapshotName); err != nil { - return SnapshotRef{}, fmt.Errorf("internal snapshot create: %w", err) - } - return SnapshotRef{Name: snapshotName, Kind: "INTERNAL", Ref: snapshotName}, nil -} - -func (m *VirshManager) DiffSnapshot(ctx context.Context, vmName, fromSnapshot, toSnapshot string) (*FSComparePlan, error) { - if vmName == "" || fromSnapshot == "" || toSnapshot == "" { - return nil, fmt.Errorf("vmName, fromSnapshot and toSnapshot are required") - } - - // Implementation shell: - // Strategy options: - // 1) For internal snapshots: use qemu-nbd with snapshot selection to mount and diff trees. - // 2) For external snapshots: mount the two qcow2 snapshot files via qemu-nbd. - // - // Because snapshot storage varies, we return advisory plan data and notes. - plan := &FSComparePlan{ - VMName: vmName, - FromSnapshot: fromSnapshot, - ToSnapshot: toSnapshot, - Notes: []string{}, - } - - // Attempt to detect external snapshot files in job dir. - jobDir := filepath.Join(m.cfg.WorkDir, vmName) - fromPath := filepath.Join(jobDir, fmt.Sprintf("snap-%s.qcow2", fromSnapshot)) - toPath := filepath.Join(jobDir, fmt.Sprintf("snap-%s.qcow2", toSnapshot)) - if fileExists(fromPath) && fileExists(toPath) { - plan.FromRef = fromPath - plan.ToRef = toPath - plan.Notes = append(plan.Notes, - "External snapshots detected. You can mount them with qemu-nbd and diff the trees.", - fmt.Sprintf("sudo modprobe nbd max_part=16 && sudo qemu-nbd --connect=/dev/nbd0 %s", shEscape(fromPath)), - fmt.Sprintf("sudo qemu-nbd --connect=/dev/nbd1 %s", shEscape(toPath)), - "sudo mount /dev/nbd0p1 /mnt/from && sudo mount /dev/nbd1p1 /mnt/to", - "Then run: sudo diff -ruN /mnt/from /mnt/to or use rsync --dry-run to list changes.", - "Be sure to umount and disconnect nbd after.", - ) - return plan, nil - } - - // Fallback: internal snapshots guidance. - plan.Notes = append(plan.Notes, - "Internal snapshots assumed. Use qemu-nbd with -s to select snapshot, then mount and diff.", - "For example: qemu-nbd may support --snapshot= (varies by version) or use qemu-img to create temporary exports.", - "Alternatively, boot the VM into each snapshot separately and export filesystem states.", - ) - return plan, nil -} - -func (m *VirshManager) GetIPAddress(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - if vmName == "" { - return "", "", fmt.Errorf("vmName is required") - } - - m.logger.Info("discovering IP address", - "vm_name", vmName, - "timeout", timeout, - "network", m.cfg.DefaultNetwork, - ) - - // First check VM state - if not running, IP discovery will definitely fail - state, stateErr := m.GetVMState(ctx, vmName) - if stateErr == nil && state != VMStateRunning { - m.logger.Warn("attempting IP discovery on non-running VM", - "vm_name", vmName, - "state", state, - "hint", "VM must be in 'running' state to have an IP address", - ) - } - - // For socket_vmnet, use ARP-based discovery - if m.cfg.DefaultNetwork == "socket_vmnet" { - return m.getIPAddressViaARP(ctx, vmName, timeout) - } - - // For regular libvirt networks, use lease-based discovery - return m.getIPAddressViaLease(ctx, vmName, timeout) -} - -// getIPAddressViaLease discovers IP using libvirt DHCP lease information. -// This works for libvirt-managed networks (default, NAT, etc.) -func (m *VirshManager) getIPAddressViaLease(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - virsh := m.binPath("virsh", m.cfg.VirshPath) - deadline := time.Now().Add(timeout) - startTime := time.Now() - attempt := 0 - for { - attempt++ - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domifaddr", vmName, "--source", "lease") - if err == nil { - ip, mac := parseDomIfAddrIPv4WithMAC(out) - if ip != "" { - m.logger.Info("IP address discovered via lease", - "vm_name", vmName, - "ip_address", ip, - "mac_address", mac, - "attempts", attempt, - "elapsed", time.Since(startTime), - ) - return ip, mac, nil - } - } - - // Log progress every 10 attempts (20 seconds) - if attempt%10 == 0 { - m.logger.Debug("IP discovery in progress (lease)", - "vm_name", vmName, - "attempts", attempt, - "elapsed", time.Since(startTime), - "domifaddr_output", out, - ) - } - - if time.Now().After(deadline) { - break - } - time.Sleep(2 * time.Second) - } - - // Final state check for better error message - finalState, _ := m.GetVMState(ctx, vmName) - m.logger.Error("IP address discovery failed (lease)", - "vm_name", vmName, - "timeout", timeout, - "attempts", attempt, - "final_vm_state", finalState, - ) - - return "", "", fmt.Errorf("ip address not found within timeout (VM state: %s)", finalState) -} - -// getIPAddressViaARP discovers IP using ARP table lookup. -// This is used for socket_vmnet on macOS where libvirt doesn't manage DHCP. -func (m *VirshManager) getIPAddressViaARP(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - // First, get the VM's MAC address from the domain XML - mac, err := m.getVMMAC(ctx, vmName) - if err != nil { - m.logger.Error("failed to get VM MAC address for ARP lookup", - "vm_name", vmName, - "error", err, - ) - return "", "", fmt.Errorf("failed to get VM MAC address: %w", err) - } - - m.logger.Info("starting ARP-based IP discovery", - "vm_name", vmName, - "mac_address", mac, - "timeout", timeout, - ) - - deadline := time.Now().Add(timeout) - startTime := time.Now() - attempt := 0 - for { - attempt++ - ip, err := lookupIPByMAC(mac) - if err == nil && ip != "" { - m.logger.Info("IP address discovered via ARP", - "vm_name", vmName, - "ip_address", ip, - "mac_address", mac, - "attempts", attempt, - "elapsed", time.Since(startTime), - ) - return ip, mac, nil - } - - // Log progress every 10 attempts (20 seconds) - if attempt%10 == 0 { - m.logger.Debug("IP discovery in progress (ARP)", - "vm_name", vmName, - "mac_address", mac, - "attempts", attempt, - "elapsed", time.Since(startTime), - ) - } - - if time.Now().After(deadline) { - break - } - time.Sleep(2 * time.Second) - } - - // Final state check for better error message - finalState, _ := m.GetVMState(ctx, vmName) - m.logger.Error("IP address discovery failed (ARP)", - "vm_name", vmName, - "mac_address", mac, - "timeout", timeout, - "attempts", attempt, - "final_vm_state", finalState, - ) - - return "", "", fmt.Errorf("ip address not found in ARP table within timeout (VM state: %s, MAC: %s)", finalState, mac) -} - -// --- Helpers --- - -func (m *VirshManager) binPath(defaultName, override string) string { - if override != "" { - return override - } - return defaultName -} - -func (m *VirshManager) run(ctx context.Context, bin string, args ...string) (string, error) { - var stdout, stderr bytes.Buffer - // Provide a default timeout if the context has none. - if _, ok := ctx.Deadline(); !ok { - ctx2, cancel := context.WithTimeout(ctx, 120*time.Second) - defer cancel() - ctx = ctx2 - } - cmd := exec.CommandContext(ctx, bin, args...) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - // Pass LIBVIRT_DEFAULT_URI for convenience when set. - env := os.Environ() - if m.cfg.LibvirtURI != "" { - env = append(env, "LIBVIRT_DEFAULT_URI="+m.cfg.LibvirtURI) - } - cmd.Env = env - - err := cmd.Run() - outStr := strings.TrimSpace(stdout.String()) - if err != nil { - errStr := strings.TrimSpace(stderr.String()) - if errStr != "" { - return outStr, fmt.Errorf("%s %s failed: %w: %s", bin, strings.Join(args, " "), err, errStr) - } - return outStr, fmt.Errorf("%s %s failed: %w", bin, strings.Join(args, " "), err) - } - return outStr, nil -} - -func getenvDefault(k, def string) string { - v := os.Getenv(k) - if v == "" { - return def - } - return v -} - -func intFromEnv(k string, def int) int { - v := os.Getenv(k) - if v == "" { - return def - } - var parsed int - _, err := fmt.Sscanf(v, "%d", &parsed) - if err != nil { - return def - } - return parsed -} - -func fileExists(p string) bool { - st, err := os.Stat(p) - return err == nil && !st.IsDir() -} - -func shEscape(s string) string { - // naive escape for use inside run-command; rely on controlled inputs. - s = strings.ReplaceAll(s, `'`, `'\'\'`) - return s -} - -func defaultGuestUser(vmName string) string { - // Heuristic default depending on distro naming conventions. - // Adjust as needed by calling code. - if strings.Contains(strings.ToLower(vmName), "ubuntu") { - return "ubuntu" - } - if strings.Contains(strings.ToLower(vmName), "centos") || strings.Contains(strings.ToLower(vmName), "rhel") { - return "centos" - } - return "cloud-user" -} - -func parseDomIfAddrIPv4WithMAC(s string) (ip string, mac string) { - // virsh domifaddr output example: - // Name MAC address Protocol Address - // ---------------------------------------------------------------------------- - // vnet0 52:54:00:6b:3c:86 ipv4 192.168.122.63/24 - lines := strings.Split(s, "\n") - for _, l := range lines { - l = strings.TrimSpace(l) - if l == "" || strings.HasPrefix(l, "Name") || strings.HasPrefix(l, "-") { - continue - } - parts := strings.Fields(l) - if len(parts) >= 4 && parts[2] == "ipv4" { - mac = parts[1] - addr := parts[3] - if i := strings.IndexByte(addr, '/'); i > 0 { - ip = addr[:i] - } else { - ip = addr - } - return ip, mac - } - } - return "", "" -} - -// getVMMAC extracts the MAC address from a VM's domain XML. -// For socket_vmnet VMs, the MAC is in the qemu:commandline section. -// For regular VMs, it's in the interface element. -func (m *VirshManager) getVMMAC(ctx context.Context, vmName string) (string, error) { - virsh := m.binPath("virsh", m.cfg.VirshPath) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "dumpxml", vmName) - if err != nil { - return "", fmt.Errorf("failed to get domain XML: %w", err) - } - - // Try to find MAC in qemu:commandline (socket_vmnet) - // Look for: - if strings.Contains(out, "qemu:commandline") { - lines := strings.Split(out, "\n") - for _, line := range lines { - if strings.Contains(line, "virtio-net-pci") && strings.Contains(line, "mac=") { - // Extract MAC from value="...mac=52:54:00:xx:xx:xx..." - start := strings.Index(line, "mac=") - if start != -1 { - start += 4 // skip "mac=" - end := start + 17 // MAC address is 17 chars (xx:xx:xx:xx:xx:xx) - if end <= len(line) { - mac := line[start:end] - // Validate it looks like a MAC - if strings.Count(mac, ":") == 5 { - return mac, nil - } - } - } - } - } - } - - // Try to find MAC in regular interface element - // Look for: - lines := strings.Split(out, "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - if strings.HasPrefix(line, " 00) - normalizedArpMAC := normalizeMAC(arpMAC) - - if normalizedArpMAC == mac { - // Extract IP from (x.x.x.x) - for _, p := range parts { - if strings.HasPrefix(p, "(") && strings.HasSuffix(p, ")") { - ip := p[1 : len(p)-1] - // Validate it looks like an IP - if strings.Count(ip, ".") == 3 { - return ip, nil - } - } - } - } - } - - return "", fmt.Errorf("MAC %s not found in ARP table", mac) -} - -// normalizeMAC normalizes a MAC address by ensuring each octet has two digits. -// e.g., "52:54:0:ab:cd:ef" -> "52:54:00:ab:cd:ef" -func normalizeMAC(mac string) string { - parts := strings.Split(mac, ":") - if len(parts) != 6 { - return mac - } - for i, p := range parts { - if len(p) == 1 { - parts[i] = "0" + p - } - } - return strings.Join(parts, ":") -} - -// --- Domain XML rendering --- - -type domainXMLParams struct { - Name string - MemoryMB int - VCPUs int - DiskPath string - CloudInitISO string // Optional path to cloud-init ISO for networking config - Network string // "default", "user", "socket_vmnet", or custom network name - SocketVMNetPath string // Path to socket_vmnet socket (used when Network="socket_vmnet") - Emulator string // Optional custom emulator path (e.g., wrapper script for socket_vmnet) - BootOrder []string - Arch string // e.g., "x86_64" or "aarch64" - Machine string // e.g., "pc-q35-6.2" or "virt" - DomainType string // e.g., "kvm" or "qemu" - MACAddress string // Optional MAC address for the network interface -} - -func renderDomainXML(p domainXMLParams) (string, error) { - // Set defaults if not provided - if p.Arch == "" { - p.Arch = "x86_64" - } - if p.Machine == "" { - if p.Arch == "aarch64" { - p.Machine = "virt" - } else { - p.Machine = "pc-q35-6.2" - } - } - if p.DomainType == "" { - p.DomainType = "kvm" - } - // Generate MAC address if not provided and using socket_vmnet - if p.MACAddress == "" { - p.MACAddress = generateMACAddress() - } - // Default socket_vmnet path - if p.Network == "socket_vmnet" && p.SocketVMNetPath == "" { - p.SocketVMNetPath = "/opt/homebrew/var/run/socket_vmnet" - } - - // A minimal domain XML; adjust virtio model as needed by your environment. - // Use conditional sections for architecture-specific elements. - // For socket_vmnet, we need the qemu namespace for commandline passthrough. - const tpl = ` - - {{ .Name }} - {{ .MemoryMB }} - {{ .VCPUs }} -{{- if eq .Arch "aarch64" }} - - hvm - - - -{{- else }} - - hvm - - - -{{- end }} - - -{{- if eq .Arch "aarch64" }} - -{{- else }} - - -{{- end }} - -{{- if and (eq .Arch "aarch64") (eq .DomainType "qemu") }} - - cortex-a72 - -{{- else }} - -{{- end }} - -{{- if .Emulator }} - {{ .Emulator }} -{{- end }} - - - - - -{{- if .CloudInitISO }} - - - - - - - -{{- end }} - -{{- if eq .Arch "aarch64" }} - -{{- end }} -{{- if eq .Network "socket_vmnet" }} - -{{- else if or (eq .Network "user") (eq .Network "") }} - - - -{{- else }} - - - - -{{- end }} - - -{{- if ne .Arch "aarch64" }} - -{{- end }} - - /dev/urandom - - -{{- if eq .Network "socket_vmnet" }} - - - - - - -{{- end }} - -` - var b bytes.Buffer - t := template.Must(template.New("domain").Parse(tpl)) - if err := t.Execute(&b, p); err != nil { - return "", err - } - return b.String(), nil -} - -// attachISOToDomainXML is a simple XML string replacement to add a CD-ROM pointing to seed ISO. -// For a production system, consider parsing XML and building a proper DOM. -func (m *VirshManager) attachISOToDomainXML(xmlPath, isoPath string) error { - data, err := os.ReadFile(xmlPath) - if err != nil { - return err - } - xml := string(data) - needle := "" - cdrom := fmt.Sprintf(` - - - - - - `, isoPath) - if strings.Contains(xml, cdrom) { - // already attached - return nil - } - xml = strings.Replace(xml, needle, cdrom+"\n "+needle, 1) - return os.WriteFile(xmlPath, []byte(xml), 0o644) -} - -// buildCloudInitSeed creates a NoCloud seed ISO with a single user and SSH key. -// Requires cloud-localds (cloud-image-utils) on the host if implemented via external tool. -// This implementation writes user-data/meta-data and attempts to use genisoimage or mkisofs. -func (m *VirshManager) buildCloudInitSeed(ctx context.Context, vmName, username, publicKey, outISO string) error { - jobDir := filepath.Dir(outISO) - userData := fmt.Sprintf(`#cloud-config -users: - - name: %s - sudo: ALL=(ALL) NOPASSWD:ALL - groups: users, admin, sudo - shell: /bin/bash - ssh_authorized_keys: - - %s -`, username, publicKey) - - metaData := fmt.Sprintf(`instance-id: %s -local-hostname: %s -`, vmName, vmName) - - userDataPath := filepath.Join(jobDir, "user-data") - metaDataPath := filepath.Join(jobDir, "meta-data") - if err := os.WriteFile(userDataPath, []byte(userData), 0o644); err != nil { - return fmt.Errorf("write user-data: %w", err) - } - if err := os.WriteFile(metaDataPath, []byte(metaData), 0o644); err != nil { - return fmt.Errorf("write meta-data: %w", err) - } - - // Try cloud-localds if available - if hasBin("cloud-localds") { - if _, err := m.run(ctx, "cloud-localds", outISO, userDataPath, metaDataPath); err == nil { - return nil - } - } - - // Fallback to genisoimage/mkisofs - if hasBin("genisoimage") { - // genisoimage -output seed.iso -volid cidata -joliet -rock user-data meta-data - _, err := m.run(ctx, "genisoimage", "-output", outISO, "-volid", "cidata", "-joliet", "-rock", userDataPath, metaDataPath) - return err - } - if hasBin("mkisofs") { - _, err := m.run(ctx, "mkisofs", "-output", outISO, "-V", "cidata", "-J", "-R", userDataPath, metaDataPath) - return err - } - - return fmt.Errorf("cloud-init seed build tools not found: need cloud-localds or genisoimage/mkisofs") -} - -// buildCloudInitSeedForClone creates a cloud-init ISO for a cloned VM. -// The key purpose is to provide a NEW instance-id that differs from what's stored -// on the cloned disk. This forces cloud-init to re-run its initialization, -// including network configuration for the clone's new MAC address. -// -// If SSHCAPubKey is configured, this function also: -// - Creates a 'sandbox' user with sudo access for managed SSH credentials -// - Injects the SSH CA public key and configures sshd to trust it -func (m *VirshManager) buildCloudInitSeedForClone(ctx context.Context, vmName, outISO string) error { - jobDir := filepath.Dir(outISO) - - // Build cloud-init user-data - var userDataBuilder strings.Builder - userDataBuilder.WriteString(`#cloud-config -# Cloud-init config for cloned VMs -# This triggers cloud-init to re-run network configuration - -# Ensure networking is configured via DHCP -network: - version: 2 - ethernets: - id0: - match: - driver: virtio* - dhcp4: true -`) - - // If SSH CA is configured, add sandbox user and SSH CA trust - if m.cfg.SSHCAPubKey != "" { - userDataBuilder.WriteString(` -# Create sandbox user for managed SSH credentials -users: - - default - - name: sandbox - sudo: ALL=(ALL) NOPASSWD:ALL - shell: /bin/bash - lock_passwd: true - -# Write SSH CA public key -write_files: - - path: /etc/ssh/ssh_ca.pub - content: | - `) - userDataBuilder.WriteString(m.cfg.SSHCAPubKey) - userDataBuilder.WriteString(` - permissions: '0644' - owner: root:root - -# Configure sshd to trust the CA -runcmd: - - | - if [ -s /etc/ssh/ssh_ca.pub ]; then - if ! grep -q "TrustedUserCAKeys" /etc/ssh/sshd_config; then - echo "TrustedUserCAKeys /etc/ssh/ssh_ca.pub" >> /etc/ssh/sshd_config - systemctl restart sshd || systemctl restart ssh || true - fi - fi -`) - } - - userData := userDataBuilder.String() - - // Use a unique instance-id based on the VM name - // This is the critical part: cloud-init checks if instance-id has changed - // If it has, cloud-init re-runs initialization including network setup - metaData := fmt.Sprintf(`instance-id: %s -local-hostname: %s -`, vmName, vmName) - - userDataPath := filepath.Join(jobDir, "user-data") - metaDataPath := filepath.Join(jobDir, "meta-data") - if err := os.WriteFile(userDataPath, []byte(userData), 0o644); err != nil { - return fmt.Errorf("write user-data: %w", err) - } - if err := os.WriteFile(metaDataPath, []byte(metaData), 0o644); err != nil { - return fmt.Errorf("write meta-data: %w", err) - } - - // Try cloud-localds if available - if hasBin("cloud-localds") { - if _, err := m.run(ctx, "cloud-localds", outISO, userDataPath, metaDataPath); err == nil { - // Verify the ISO was actually created - if _, statErr := os.Stat(outISO); statErr == nil { - return nil - } else { - log.Printf("WARNING: cloud-localds succeeded but ISO not found at %s: %v", outISO, statErr) - } - } else { - log.Printf("WARNING: cloud-localds failed: %v, trying fallback tools", err) - } - } - - // Fallback to genisoimage/mkisofs - if hasBin("genisoimage") { - _, err := m.run(ctx, "genisoimage", "-output", outISO, "-volid", "cidata", "-joliet", "-rock", userDataPath, metaDataPath) - if err != nil { - log.Printf("WARNING: genisoimage failed: %v", err) - return err - } - // Verify the ISO was actually created - if _, statErr := os.Stat(outISO); statErr != nil { - return fmt.Errorf("genisoimage succeeded but ISO not found at %s: %w", outISO, statErr) - } - return nil - } - if hasBin("mkisofs") { - _, err := m.run(ctx, "mkisofs", "-output", outISO, "-V", "cidata", "-J", "-R", userDataPath, metaDataPath) - if err != nil { - log.Printf("WARNING: mkisofs failed: %v", err) - return err - } - // Verify the ISO was actually created - if _, statErr := os.Stat(outISO); statErr != nil { - return fmt.Errorf("mkisofs succeeded but ISO not found at %s: %w", outISO, statErr) - } - return nil - } - - return fmt.Errorf("cloud-init seed build tools not found: need cloud-localds or genisoimage/mkisofs") -} - -func hasBin(name string) bool { - _, err := exec.LookPath(name) - return err == nil -} - -// ValidateSourceVM performs pre-flight checks on a source VM before cloning. -func (m *VirshManager) ValidateSourceVM(ctx context.Context, vmName string) (*VMValidationResult, error) { - if vmName == "" { - return nil, fmt.Errorf("vmName is required") - } - - result := &VMValidationResult{ - Valid: true, - Warnings: []string{}, - Errors: []string{}, - } - - // Check VM state - state, err := m.GetVMState(ctx, vmName) - if err != nil { - result.Valid = false - result.Errors = append(result.Errors, fmt.Sprintf("Failed to get VM state: %v", err)) - return result, nil - } - result.State = state - - // Check if VM has a network interface with MAC address - mac, macErr := m.GetVMMAC(ctx, vmName) - if macErr != nil { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not get MAC address from VM definition: %v", macErr)) - result.Warnings = append(result.Warnings, - "The source VM may not have a network interface configured properly") - } else { - result.MACAddress = mac - result.HasNetwork = true - } - - // Check if VM has an IP address (only if running) - switch state { - case VMStateRunning: - virsh := m.binPath("virsh", m.cfg.VirshPath) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domifaddr", vmName, "--source", "lease") - if err == nil { - ip, mac := parseDomIfAddrIPv4WithMAC(out) - if ip != "" { - result.IPAddress = ip - if mac != "" && result.MACAddress == "" { - result.MACAddress = mac - result.HasNetwork = true - } - } else { - result.Warnings = append(result.Warnings, - "Source VM is running but has no IP address assigned") - result.Warnings = append(result.Warnings, - "This may indicate cloud-init or DHCP issues - cloned sandboxes may also fail to get IPs") - } - } - - // Check network interface statistics if we have a MAC - if result.MACAddress != "" { - stats, statsErr := m.getVMNetworkStats(ctx, vmName) - if statsErr == nil { - if stats.txPackets == 0 && stats.rxPackets == 0 { - result.Warnings = append(result.Warnings, - "Source VM network interface shows zero TX/RX packets - network may not be functioning") - } else if stats.txPackets == 0 { - result.Warnings = append(result.Warnings, - "Source VM network interface shows zero TX packets - VM may not be sending network traffic") - } - } - } - case VMStateShutOff: - // VM is shut off - this is fine for cloning, but warn that we can't verify network - result.Warnings = append(result.Warnings, - "Source VM is shut off - cannot verify network configuration (IP/DHCP)") - result.Warnings = append(result.Warnings, - "Consider starting the source VM to verify it can obtain an IP before cloning") - default: - // VM is in an unexpected state - result.Warnings = append(result.Warnings, - fmt.Sprintf("Source VM is in state '%s' - expected 'running' or 'shut off'", state)) - } - - // Check if VM has cloud-init CDROM (helpful for diagnostics) - hasCloudInit := m.vmHasCloudInitCDROM(ctx, vmName) - - if !hasCloudInit { - result.Warnings = append(result.Warnings, - "Source VM does not appear to have a cloud-init CDROM attached") - result.Warnings = append(result.Warnings, - "Cloned sandboxes will still get their own cloud-init ISO for network config") - } - - return result, nil -} - -// vmNetworkStats holds network interface statistics -type vmNetworkStats struct { - rxBytes int64 - rxPackets int64 - txBytes int64 - txPackets int64 -} - -// getVMNetworkStats returns network interface statistics for a VM -func (m *VirshManager) getVMNetworkStats(ctx context.Context, vmName string) (*vmNetworkStats, error) { - virsh := m.binPath("virsh", m.cfg.VirshPath) - - // Get interface name first - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domiflist", vmName) - if err != nil { - return nil, fmt.Errorf("get interface list: %w", err) - } - - // Parse domiflist to get interface name - var ifaceName string - lines := strings.Split(out, "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - if line == "" || strings.HasPrefix(line, "Interface") || strings.HasPrefix(line, "-") { - continue - } - fields := strings.Fields(line) - if len(fields) >= 1 && fields[0] != "-" { - ifaceName = fields[0] - break - } - } - - if ifaceName == "" || ifaceName == "-" { - // Interface name is "-" for some network types, try to get stats anyway - return nil, fmt.Errorf("no named interface found") - } - - // Get interface stats - out, err = m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domifstat", vmName, ifaceName) - if err != nil { - return nil, fmt.Errorf("get interface stats: %w", err) - } - - stats := &vmNetworkStats{} - for _, line := range strings.Split(out, "\n") { - fields := strings.Fields(line) - if len(fields) >= 2 { - var val int64 - _, _ = fmt.Sscanf(fields[1], "%d", &val) - switch { - case strings.Contains(fields[0], "rx_bytes"): - stats.rxBytes = val - case strings.Contains(fields[0], "rx_packets"): - stats.rxPackets = val - case strings.Contains(fields[0], "tx_bytes"): - stats.txBytes = val - case strings.Contains(fields[0], "tx_packets"): - stats.txPackets = val - } - } - } - - return stats, nil -} - -// vmHasCloudInitCDROM checks if a VM has a cloud-init CDROM attached -func (m *VirshManager) vmHasCloudInitCDROM(ctx context.Context, vmName string) bool { - virsh := m.binPath("virsh", m.cfg.VirshPath) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domblklist", vmName, "--details") - if err != nil { - return false - } - - // Look for cdrom device with cloud-init related path - for _, line := range strings.Split(out, "\n") { - fields := strings.Fields(line) - if len(fields) >= 4 && fields[1] == "cdrom" { - path := strings.ToLower(fields[3]) - if strings.Contains(path, "cloud") || strings.Contains(path, "seed") || - strings.Contains(path, "cidata") || strings.Contains(path, "init") { - return true - } - } - } - return false -} - -// CheckHostResources validates that the host has sufficient resources for a new sandbox. -func (m *VirshManager) CheckHostResources(ctx context.Context, requiredCPUs, requiredMemoryMB int) (*ResourceCheckResult, error) { - result := &ResourceCheckResult{ - Valid: true, - RequiredCPUs: requiredCPUs, - RequiredMemoryMB: requiredMemoryMB, - Warnings: []string{}, - Errors: []string{}, - } - - // Check available resources using virsh nodeinfo - info, err := m.getHostInfo(ctx) - if err != nil { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not check host resources: %v", err)) - } else { - result.TotalMemoryMB = info.totalMB - result.AvailableMemoryMB = info.availableMB - result.AvailableCPUs = info.cpus - - // Check if we have enough memory - if int64(requiredMemoryMB) > info.availableMB { - result.Valid = false - result.Errors = append(result.Errors, - fmt.Sprintf("Insufficient memory: need %d MB but only %d MB available", - requiredMemoryMB, info.availableMB)) - } else if float64(requiredMemoryMB) > float64(info.availableMB)*0.8 { - // Warn if using more than 80% of available memory - result.Warnings = append(result.Warnings, - fmt.Sprintf("Low memory warning: requesting %d MB of %d MB available (%.1f%%)", - requiredMemoryMB, info.availableMB, - float64(requiredMemoryMB)/float64(info.availableMB)*100)) - } - - // Check if we have enough CPUs - if requiredCPUs > info.cpus { - result.Valid = false - result.Errors = append(result.Errors, - fmt.Sprintf("Insufficient CPUs: need %d but only %d available", - requiredCPUs, info.cpus)) - } - } - - // Check available disk space in work directory - diskInfo, err := m.getWorkDirDiskSpace(ctx) - if err != nil { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not check disk space: %v", err)) - } else { - result.AvailableDiskMB = diskInfo.availableMB - - // Warn if disk space is low (less than 10GB) - if diskInfo.availableMB < 10*1024 { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Low disk space warning: only %d MB available in work directory (%s)", - diskInfo.availableMB, m.cfg.WorkDir)) - } - - // Error if disk space is critically low (less than 1GB) - if diskInfo.availableMB < 1024 { - result.Valid = false - result.Errors = append(result.Errors, - fmt.Sprintf("Insufficient disk space: only %d MB available in work directory (%s)", - diskInfo.availableMB, m.cfg.WorkDir)) - } - } - - return result, nil -} - -// hostInfo holds host resource information -type hostInfo struct { - cpus int - totalMB int64 - availableMB int64 -} - -// getHostInfo returns available and total resources on the host -func (m *VirshManager) getHostInfo(ctx context.Context) (*hostInfo, error) { - virsh := m.binPath("virsh", m.cfg.VirshPath) - info := &hostInfo{} - - // Get CPU info from nodeinfo - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "nodeinfo") - if err == nil { - for _, line := range strings.Split(out, "\n") { - if strings.HasPrefix(line, "CPU(s):") { - fields := strings.Fields(line) - if len(fields) >= 2 { - _, _ = fmt.Sscanf(fields[1], "%d", &info.cpus) - } - } - if strings.HasPrefix(line, "Memory size:") { - fields := strings.Fields(line) - if len(fields) >= 3 { - var val int64 - _, _ = fmt.Sscanf(fields[2], "%d", &val) - info.totalMB = val / 1024 - info.availableMB = info.totalMB / 2 // Fallback estimate - } - } - } - } - - // Try virsh nodememstats for more accurate available memory - out, err = m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "nodememstats") - if err == nil { - for _, line := range strings.Split(out, "\n") { - fields := strings.Fields(line) - if len(fields) >= 2 { - var val int64 - _, _ = fmt.Sscanf(fields[len(fields)-2], "%d", &val) - if strings.Contains(fields[0], "free") { - info.availableMB = val / 1024 - } - } - } - } - - return info, nil -} - -type diskSpaceInfo struct { - availableMB int64 -} - -func (m *VirshManager) getWorkDirDiskSpace(ctx context.Context) (*diskSpaceInfo, error) { - workDir := m.cfg.WorkDir - if workDir == "" { - workDir = "/var/lib/libvirt/images/sandboxes" - } - - // Use df command to get disk space - var stdout bytes.Buffer - cmd := exec.CommandContext(ctx, "df", "-m", workDir) - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("df command failed: %w", err) - } - - // Parse df output (second line, 4th column for available) - lines := strings.Split(stdout.String(), "\n") - if len(lines) >= 2 { - fields := strings.Fields(lines[1]) - if len(fields) >= 4 { - var available int64 - _, _ = fmt.Sscanf(fields[3], "%d", &available) - return &diskSpaceInfo{ - availableMB: available, - }, nil - } - } - - return nil, fmt.Errorf("could not parse df output") -} diff --git a/fluid-remote/internal/libvirt/virsh_test.go b/fluid-remote/internal/libvirt/virsh_test.go deleted file mode 100755 index fe025981..00000000 --- a/fluid-remote/internal/libvirt/virsh_test.go +++ /dev/null @@ -1,784 +0,0 @@ -//go:build libvirt -// +build libvirt - -package libvirt - -import ( - "strings" - "testing" -) - -func TestRenderDomainXML_CPUMode(t *testing.T) { - tests := []struct { - name string - params domainXMLParams - expectedCPUMode string - }{ - { - name: "x86_64 with kvm uses host-passthrough", - params: domainXMLParams{ - Name: "test-vm", - MemoryMB: 1024, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - Arch: "x86_64", - Machine: "pc-q35-6.2", - DomainType: "kvm", - }, - expectedCPUMode: ``, - }, - { - name: "x86_64 with qemu uses host-passthrough", - params: domainXMLParams{ - Name: "test-vm", - MemoryMB: 1024, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - Arch: "x86_64", - Machine: "pc-q35-6.2", - DomainType: "qemu", - }, - expectedCPUMode: ``, - }, - { - name: "aarch64 with kvm uses host-passthrough", - params: domainXMLParams{ - Name: "test-vm", - MemoryMB: 1024, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - Arch: "aarch64", - Machine: "virt", - DomainType: "kvm", - }, - expectedCPUMode: ``, - }, - { - name: "aarch64 with qemu uses custom cortex-a72 model", - params: domainXMLParams{ - Name: "test-vm", - MemoryMB: 1024, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - Arch: "aarch64", - Machine: "virt", - DomainType: "qemu", - }, - expectedCPUMode: ``, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - xml, err := renderDomainXML(tt.params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - if !strings.Contains(xml, tt.expectedCPUMode) { - t.Errorf("renderDomainXML() expected CPU mode %q not found in XML:\n%s", tt.expectedCPUMode, xml) - } - }) - } -} - -func TestRenderDomainXML_BasicStructure(t *testing.T) { - params := domainXMLParams{ - Name: "test-sandbox", - MemoryMB: 2048, - VCPUs: 4, - DiskPath: "/var/lib/libvirt/images/test-sandbox.qcow2", - Network: "default", - Arch: "x86_64", - Machine: "pc-q35-6.2", - DomainType: "kvm", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - expectedElements := []string{ - ``, - `test-sandbox`, - `2048`, - `4`, - `hvm`, - ``, - ``, - } - - for _, expected := range expectedElements { - if !strings.Contains(xml, expected) { - t.Errorf("renderDomainXML() expected element %q not found in XML:\n%s", expected, xml) - } - } -} - -func TestRenderDomainXML_Aarch64Features(t *testing.T) { - params := domainXMLParams{ - Name: "test-arm-vm", - MemoryMB: 1024, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - Arch: "aarch64", - Machine: "virt", - DomainType: "qemu", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - // aarch64-specific elements - expectedElements := []string{ - ``, - ``, - ``, - ``, - `cortex-a72`, - } - - for _, expected := range expectedElements { - if !strings.Contains(xml, expected) { - t.Errorf("renderDomainXML() expected aarch64 element %q not found in XML:\n%s", expected, xml) - } - } - - // x86_64-specific elements should NOT be present - unexpectedElements := []string{ - ``, - ``, - ``, - } - - for _, unexpected := range unexpectedElements { - if strings.Contains(xml, unexpected) { - t.Errorf("renderDomainXML() unexpected x86_64 element %q found in aarch64 XML:\n%s", unexpected, xml) - } - } -} - -func TestRenderDomainXML_X86Features(t *testing.T) { - params := domainXMLParams{ - Name: "test-x86-vm", - MemoryMB: 1024, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - Arch: "x86_64", - Machine: "pc-q35-6.2", - DomainType: "kvm", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - // x86_64-specific elements - expectedElements := []string{ - ``, - ``, - ``, - } - - for _, expected := range expectedElements { - if !strings.Contains(xml, expected) { - t.Errorf("renderDomainXML() expected x86_64 element %q not found in XML:\n%s", expected, xml) - } - } - - // aarch64-specific elements should NOT be present - unexpectedElements := []string{ - ``, - ``, - ``, - } - - for _, unexpected := range unexpectedElements { - if strings.Contains(xml, unexpected) { - t.Errorf("renderDomainXML() unexpected aarch64 element %q found in x86_64 XML:\n%s", unexpected, xml) - } - } -} - -func TestRenderDomainXML_Defaults(t *testing.T) { - // Test that defaults are applied when fields are empty - params := domainXMLParams{ - Name: "test-defaults", - MemoryMB: 512, - VCPUs: 1, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - // Arch, Machine, and DomainType are empty - should use defaults - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - // Should default to x86_64, pc-q35-6.2, kvm - expectedDefaults := []string{ - ``, - `hvm`, - ``, - } - - for _, expected := range expectedDefaults { - if !strings.Contains(xml, expected) { - t.Errorf("renderDomainXML() expected default element %q not found in XML:\n%s", expected, xml) - } - } -} - -func TestRenderDomainXML_WithCloudInitISO(t *testing.T) { - // Test that cloud-init ISO is properly included in domain XML - params := domainXMLParams{ - Name: "test-cloud-init", - MemoryMB: 2048, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/jobs/test-cloud-init/disk-overlay.qcow2", - CloudInitISO: "/var/lib/libvirt/images/jobs/test-cloud-init/cloud-init.iso", - Network: "default", - Arch: "aarch64", - Machine: "virt", - DomainType: "qemu", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - // Cloud-init ISO elements should be present - expectedElements := []string{ - ``, - ``, - ``, - ``, - ``, - } - - for _, expected := range expectedElements { - if !strings.Contains(xml, expected) { - t.Errorf("renderDomainXML() expected cloud-init element %q not found in XML:\n%s", expected, xml) - } - } -} - -func TestRenderDomainXML_WithoutCloudInitISO(t *testing.T) { - // Test that no cloud-init CDROM is included when CloudInitISO is empty - params := domainXMLParams{ - Name: "test-no-cloud-init", - MemoryMB: 2048, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/jobs/test/disk-overlay.qcow2", - Network: "default", - Arch: "x86_64", - Machine: "pc-q35-6.2", - DomainType: "kvm", - // CloudInitISO is empty - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - // Cloud-init ISO elements should NOT be present - unexpectedElements := []string{ - `device="cdrom"`, - ``, - } - - for _, unexpected := range unexpectedElements { - if strings.Contains(xml, unexpected) { - t.Errorf("renderDomainXML() unexpected cloud-init element %q found in XML when CloudInitISO is empty:\n%s", unexpected, xml) - } - } - - // Main disk should still be present - if !strings.Contains(xml, ``) { - t.Error("renderDomainXML() main disk not found in XML") - } -} - -func TestCloudInitSeedForClone_UniqueInstanceID(t *testing.T) { - // This test verifies the concept that each clone should get a unique instance-id - // The actual buildCloudInitSeedForClone function creates files, so we test the - // expected behavior through the domain XML params - - vmNames := []string{"sbx-abc123", "sbx-def456", "sbx-ghi789"} - - for _, vmName := range vmNames { - params := domainXMLParams{ - Name: vmName, - MemoryMB: 1024, - VCPUs: 1, - DiskPath: "/var/lib/libvirt/images/jobs/" + vmName + "/disk-overlay.qcow2", - CloudInitISO: "/var/lib/libvirt/images/jobs/" + vmName + "/cloud-init.iso", - Network: "default", - Arch: "aarch64", - Machine: "virt", - DomainType: "qemu", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() for %s error = %v", vmName, err) - } - - // Each sandbox should have its own cloud-init ISO path - expectedISOPath := "/var/lib/libvirt/images/jobs/" + vmName + "/cloud-init.iso" - if !strings.Contains(xml, expectedISOPath) { - t.Errorf("renderDomainXML() for %s expected ISO path %q not found in XML", vmName, expectedISOPath) - } - } -} - -func TestRenderDomainXML_UserModeNetworking(t *testing.T) { - tests := []struct { - name string - network string - wantUser bool // true if we expect user-mode networking - }{ - { - name: "user network value", - network: "user", - wantUser: true, - }, - { - name: "empty network value", - network: "", - wantUser: true, - }, - { - name: "default network value", - network: "default", - wantUser: false, - }, - { - name: "custom network value", - network: "br0", - wantUser: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - params := domainXMLParams{ - Name: "test-vm", - MemoryMB: 2048, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: tt.network, - Arch: "aarch64", - Machine: "virt", - DomainType: "qemu", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - hasUserInterface := strings.Contains(xml, ``) - hasNetworkInterface := strings.Contains(xml, ``) - - if tt.wantUser { - if !hasUserInterface { - t.Errorf("expected user-mode networking but got network interface in XML:\n%s", xml) - } - if hasNetworkInterface { - t.Errorf("expected user-mode networking but found network interface in XML:\n%s", xml) - } - } else { - if hasUserInterface { - t.Errorf("expected network interface but got user-mode networking in XML:\n%s", xml) - } - if !hasNetworkInterface { - t.Errorf("expected network interface but not found in XML:\n%s", xml) - } - // Also verify the network name is correct - expectedSource := `` - if !strings.Contains(xml, expectedSource) { - t.Errorf("expected network source %q not found in XML:\n%s", expectedSource, xml) - } - } - }) - } -} - -func TestRenderDomainXML_SocketVMNet(t *testing.T) { - params := domainXMLParams{ - Name: "test-socket-vmnet", - MemoryMB: 2048, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "socket_vmnet", - SocketVMNetPath: "/opt/homebrew/var/run/socket_vmnet", - Emulator: "/path/to/qemu-wrapper.sh", - MACAddress: "52:54:00:ab:cd:ef", - Arch: "aarch64", - Machine: "virt", - DomainType: "qemu", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - // Should have qemu namespace - if !strings.Contains(xml, `xmlns:qemu="http://libvirt.org/schemas/domain/qemu/1.0"`) { - t.Error("expected qemu namespace for socket_vmnet") - } - - // Should have custom emulator - if !strings.Contains(xml, `/path/to/qemu-wrapper.sh`) { - t.Errorf("expected custom emulator in XML:\n%s", xml) - } - - // Should have qemu:commandline with socket networking - if !strings.Contains(xml, ``) { - t.Error("expected qemu:commandline for socket_vmnet") - } - - // Should have socket,fd=3 netdev - if !strings.Contains(xml, `socket,id=vnet,fd=3`) { - t.Errorf("expected socket,fd=3 netdev in XML:\n%s", xml) - } - - // Should have MAC address - if !strings.Contains(xml, `mac=52:54:00:ab:cd:ef`) { - t.Errorf("expected MAC address in XML:\n%s", xml) - } - - // Should NOT have standard interface element - if strings.Contains(xml, ``) || strings.Contains(xml, ``) { - t.Errorf("unexpected standard interface in socket_vmnet XML:\n%s", xml) - } -} - -func TestNormalizeMAC(t *testing.T) { - tests := []struct { - name string - input string - expected string - }{ - { - name: "already normalized", - input: "52:54:00:ab:cd:ef", - expected: "52:54:00:ab:cd:ef", - }, - { - name: "shortened octets", - input: "52:54:0:ab:cd:ef", - expected: "52:54:00:ab:cd:ef", - }, - { - name: "multiple shortened octets", - input: "52:54:0:a:c:e", - expected: "52:54:00:0a:0c:0e", - }, - { - name: "invalid format", - input: "not-a-mac", - expected: "not-a-mac", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := normalizeMAC(tt.input) - if result != tt.expected { - t.Errorf("normalizeMAC(%q) = %q, want %q", tt.input, result, tt.expected) - } - }) - } -} - -func TestGenerateMACAddress(t *testing.T) { - mac := generateMACAddress() - - // Should start with QEMU prefix - if !strings.HasPrefix(mac, "52:54:00:") { - t.Errorf("generateMACAddress() = %q, want prefix '52:54:00:'", mac) - } - - // Should be valid format (17 chars: xx:xx:xx:xx:xx:xx) - if len(mac) != 17 { - t.Errorf("generateMACAddress() = %q, want 17 chars", mac) - } - - // Should have 5 colons - if strings.Count(mac, ":") != 5 { - t.Errorf("generateMACAddress() = %q, want 5 colons", mac) - } - - // Generate another one - should be different (random) - mac2 := generateMACAddress() - if mac == mac2 { - t.Errorf("generateMACAddress() returned same MAC twice: %q", mac) - } -} - -func TestParseVMState(t *testing.T) { - tests := []struct { - name string - output string - expected VMState - }{ - { - name: "running state", - output: "running\n", - expected: VMStateRunning, - }, - { - name: "running state without newline", - output: "running", - expected: VMStateRunning, - }, - { - name: "shut off state", - output: "shut off\n", - expected: VMStateShutOff, - }, - { - name: "paused state", - output: "paused\n", - expected: VMStatePaused, - }, - { - name: "crashed state", - output: "crashed\n", - expected: VMStateCrashed, - }, - { - name: "pmsuspended state", - output: "pmsuspended\n", - expected: VMStateSuspended, - }, - { - name: "unknown state", - output: "some-unknown-state\n", - expected: VMStateUnknown, - }, - { - name: "empty string", - output: "", - expected: VMStateUnknown, - }, - { - name: "whitespace only", - output: " \n", - expected: VMStateUnknown, - }, - { - name: "running with extra whitespace", - output: " running \n", - expected: VMStateRunning, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := parseVMState(tt.output) - if result != tt.expected { - t.Errorf("parseVMState(%q) = %v, want %v", tt.output, result, tt.expected) - } - }) - } -} - -func TestVMState_StringValues(t *testing.T) { - // Verify that VMState constants have the expected string values - tests := []struct { - state VMState - expected string - }{ - {VMStateRunning, "running"}, - {VMStateShutOff, "shut off"}, - {VMStatePaused, "paused"}, - {VMStateCrashed, "crashed"}, - {VMStateSuspended, "pmsuspended"}, - {VMStateUnknown, "unknown"}, - } - - for _, tt := range tests { - t.Run(string(tt.state), func(t *testing.T) { - if string(tt.state) != tt.expected { - t.Errorf("VMState constant %v has value %q, want %q", tt.state, string(tt.state), tt.expected) - } - }) - } -} - -func TestModifyClonedXML_UpdatesCloudInitISO(t *testing.T) { - // Test that modifyClonedXML updates existing CDROM device to use new cloud-init ISO - sourceXML := ` - test-vm - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - hvm - - - - - - - - - - - - - - - - - -
- - -` - - newXML, err := modifyClonedXMLHelper(sourceXML, "sbx-clone123", "/var/lib/libvirt/images/jobs/sbx-clone123/disk-overlay.qcow2", "/var/lib/libvirt/images/jobs/sbx-clone123/cloud-init.iso", 0, 0, "") - if err != nil { - t.Fatalf("modifyClonedXMLHelper() error = %v", err) - } - - // Should have updated name - if !strings.Contains(newXML, "sbx-clone123") { - t.Error("modifyClonedXMLHelper() did not update VM name") - } - - // Should have updated disk path - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-clone123/disk-overlay.qcow2") { - t.Error("modifyClonedXMLHelper() did not update disk path") - } - - // CRITICAL: Should have updated cloud-init ISO path (not the old /tmp/test-vm-seed.img) - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-clone123/cloud-init.iso") { - t.Errorf("modifyClonedXMLHelper() did not update cloud-init ISO path in XML:\n%s", newXML) - } - - // Should NOT contain the old cloud-init ISO path - if strings.Contains(newXML, "/tmp/test-vm-seed.img") { - t.Errorf("modifyClonedXMLHelper() still contains old cloud-init ISO path in XML:\n%s", newXML) - } - - // UUID should be removed - if strings.Contains(newXML, "12345678-1234-1234-1234-123456789012") { - t.Error("modifyClonedXMLHelper() did not remove UUID") - } - - // MAC address should be different from source - if strings.Contains(newXML, "52:54:00:11:22:33") { - t.Error("modifyClonedXMLHelper() did not generate new MAC address") - } -} - -func TestModifyClonedXML_AddsCloudInitCDROM(t *testing.T) { - // Test that modifyClonedXMLHelper adds CDROM device when source VM has none - sourceXML := ` - test-vm-no-cdrom - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - hvm - - - - - - - - - - - - - -` - - newXML, err := modifyClonedXMLHelper(sourceXML, "sbx-new", "/var/lib/libvirt/images/jobs/sbx-new/disk.qcow2", "/var/lib/libvirt/images/jobs/sbx-new/cloud-init.iso", 0, 0, "") - if err != nil { - t.Fatalf("modifyClonedXMLHelper() error = %v", err) - } - - // Should have added CDROM device with cloud-init ISO - if !strings.Contains(newXML, `device="cdrom"`) { - t.Errorf("modifyClonedXMLHelper() did not add CDROM device in XML:\n%s", newXML) - } - - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-new/cloud-init.iso") { - t.Errorf("modifyClonedXMLHelper() did not add cloud-init ISO path in XML:\n%s", newXML) - } - - // Should have added SCSI controller for the CDROM - if !strings.Contains(newXML, `type="scsi"`) { - t.Errorf("modifyClonedXMLHelper() did not add SCSI controller in XML:\n%s", newXML) - } -} - -func TestModifyClonedXML_NoCloudInitISO(t *testing.T) { - // Test that modifyClonedXMLHelper works without cloud-init ISO (empty string) - sourceXML := ` - test-vm - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - hvm - - - - - - - - - - - - - - - - - - -` - - // Empty cloudInitISO - should not modify CDROM - newXML, err := modifyClonedXMLHelper(sourceXML, "sbx-no-cloud", "/var/lib/libvirt/images/jobs/sbx-no-cloud/disk.qcow2", "", 0, 0, "") - if err != nil { - t.Fatalf("modifyClonedXMLHelper() error = %v", err) - } - - // Old CDROM path should still be there (unchanged) - if !strings.Contains(newXML, "/tmp/old-seed.img") { - t.Errorf("modifyClonedXMLHelper() modified CDROM when cloudInitISO was empty:\n%s", newXML) - } - - // Name and disk should still be updated - if !strings.Contains(newXML, "sbx-no-cloud") { - t.Error("modifyClonedXMLHelper() did not update VM name") - } - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-no-cloud/disk.qcow2") { - t.Error("modifyClonedXMLHelper() did not update disk path") - } -} diff --git a/fluid-remote/internal/podman/image.go b/fluid-remote/internal/podman/image.go deleted file mode 100755 index e5408703..00000000 --- a/fluid-remote/internal/podman/image.go +++ /dev/null @@ -1,307 +0,0 @@ -package podman - -import ( - "bytes" - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/workflow" -) - -// ImageBuilder handles Podman image building operations. -type ImageBuilder struct { - // podmanPath is the path to the podman binary. - podmanPath string -} - -// ImageBuilderConfig configures the image builder. -type ImageBuilderConfig struct { - // PodmanPath is the path to the podman binary. - // If empty, "podman" is looked up in PATH. - PodmanPath string -} - -// NewImageBuilder creates a new ImageBuilder with the given configuration. -func NewImageBuilder(cfg ImageBuilderConfig) *ImageBuilder { - podmanPath := cfg.PodmanPath - if podmanPath == "" { - podmanPath = "podman" - } - return &ImageBuilder{ - podmanPath: podmanPath, - } -} - -// ImageResult contains the result of building an image. -type ImageResult struct { - // ImageID is the full image ID. - ImageID string - - // ImageTag is the human-readable image tag (e.g., "vmclone/node-c:20251215T183000Z"). - ImageTag string - - // Cleanup is a function to remove the image. - Cleanup workflow.CleanupFunc -} - -// BuildImage builds a Podman image from a root filesystem archive. -// The image is tagged as vmclone/:. -func (b *ImageBuilder) BuildImage(ctx context.Context, archivePath string, vmName string, workDir string) (*ImageResult, error) { - // Generate image tag with timestamp - timestamp := time.Now().UTC().Format("20060102T150405Z") - imageTag := fmt.Sprintf("vmclone/%s:%s", vmName, timestamp) - - // Create a temporary directory for the build context - buildDir := filepath.Join(workDir, "build") - if err := os.MkdirAll(buildDir, 0o755); err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageBuildImage, - workflow.ErrImageBuildFailed, - fmt.Sprintf("failed to create build directory: %v", err), - ) - } - defer func() { _ = os.RemoveAll(buildDir) }() - - // Copy or link the archive to the build context - archiveBaseName := filepath.Base(archivePath) - buildArchivePath := filepath.Join(buildDir, archiveBaseName) - - // Create a hard link if possible, otherwise copy - if err := os.Link(archivePath, buildArchivePath); err != nil { - // Fall back to copy if hard link fails (e.g., cross-filesystem) - if err := copyFile(archivePath, buildArchivePath); err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageBuildImage, - workflow.ErrImageBuildFailed, - fmt.Sprintf("failed to copy archive to build context: %v", err), - ) - } - } - - // Generate Containerfile - containerfile := generateContainerfile(archiveBaseName) - containerfilePath := filepath.Join(buildDir, "Containerfile") - if err := os.WriteFile(containerfilePath, []byte(containerfile), 0o644); err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageBuildImage, - workflow.ErrImageBuildFailed, - fmt.Sprintf("failed to write Containerfile: %v", err), - ) - } - - // Build the image - imageID, err := b.buildImage(ctx, buildDir, imageTag) - if err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageBuildImage, - workflow.ErrImageBuildFailed, - fmt.Sprintf("podman build failed: %v", err), - ) - } - - return &ImageResult{ - ImageID: imageID, - ImageTag: imageTag, - Cleanup: func() error { - return b.RemoveImage(context.Background(), imageTag) - }, - }, nil -} - -// generateContainerfile creates a Containerfile for importing the root filesystem. -func generateContainerfile(archiveName string) string { - // Use scratch as base - we're importing a complete root filesystem - // ADD automatically extracts tar archives - return fmt.Sprintf(`# Auto-generated Containerfile for VM clone -FROM scratch - -# Add the root filesystem archive -ADD %s / - -# Set container environment marker -ENV container=podman - -# Default to shell - can be overridden at runtime -CMD ["/bin/sh"] -`, archiveName) -} - -// buildImage executes podman build and returns the image ID. -func (b *ImageBuilder) buildImage(ctx context.Context, buildDir string, imageTag string) (string, error) { - args := []string{ - "build", - "--tag", imageTag, - "--file", "Containerfile", - "--format", "oci", - "--quiet", - buildDir, - } - - cmd := exec.CommandContext(ctx, b.podmanPath, args...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return "", fmt.Errorf("build failed: %w: %s", err, stderr.String()) - } - - // podman build --quiet outputs just the image ID - imageID := strings.TrimSpace(stdout.String()) - if imageID == "" { - // If --quiet didn't give us an ID, inspect the image - return b.getImageID(ctx, imageTag) - } - - return imageID, nil -} - -// getImageID retrieves the full image ID for a given tag. -func (b *ImageBuilder) getImageID(ctx context.Context, imageTag string) (string, error) { - args := []string{ - "inspect", - "--format", "{{.Id}}", - imageTag, - } - - cmd := exec.CommandContext(ctx, b.podmanPath, args...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return "", fmt.Errorf("inspect failed: %w: %s", err, stderr.String()) - } - - return strings.TrimSpace(stdout.String()), nil -} - -// RemoveImage removes a Podman image by tag or ID. -func (b *ImageBuilder) RemoveImage(ctx context.Context, imageRef string) error { - args := []string{ - "rmi", - "--force", - imageRef, - } - - cmd := exec.CommandContext(ctx, b.podmanPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("image removal failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// ImageExists checks if an image with the given tag exists. -func (b *ImageBuilder) ImageExists(ctx context.Context, imageTag string) (bool, error) { - args := []string{ - "image", - "exists", - imageTag, - } - - cmd := exec.CommandContext(ctx, b.podmanPath, args...) - err := cmd.Run() - - if err == nil { - return true, nil - } - - // Exit code 1 means image doesn't exist - if exitErr, ok := err.(*exec.ExitError); ok { - if exitErr.ExitCode() == 1 { - return false, nil - } - } - - return false, fmt.Errorf("failed to check image existence: %w", err) -} - -// ListImages lists images matching a filter pattern. -func (b *ImageBuilder) ListImages(ctx context.Context, filter string) ([]string, error) { - args := []string{ - "images", - "--format", "{{.Repository}}:{{.Tag}}", - } - - if filter != "" { - args = append(args, "--filter", fmt.Sprintf("reference=%s", filter)) - } - - cmd := exec.CommandContext(ctx, b.podmanPath, args...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("list images failed: %w: %s", err, stderr.String()) - } - - output := strings.TrimSpace(stdout.String()) - if output == "" { - return []string{}, nil - } - - return strings.Split(output, "\n"), nil -} - -// GetImageInfo retrieves information about an image. -type ImageInfo struct { - ID string - Tag string - Created time.Time - Size int64 -} - -// InspectImage retrieves detailed information about an image. -func (b *ImageBuilder) InspectImage(ctx context.Context, imageRef string) (*ImageInfo, error) { - args := []string{ - "inspect", - "--format", "{{.Id}}|{{.Created}}|{{.Size}}", - imageRef, - } - - cmd := exec.CommandContext(ctx, b.podmanPath, args...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("inspect failed: %w: %s", err, stderr.String()) - } - - parts := strings.Split(strings.TrimSpace(stdout.String()), "|") - if len(parts) < 3 { - return nil, fmt.Errorf("unexpected inspect output format") - } - - created, _ := time.Parse(time.RFC3339, parts[1]) - var size int64 - if _, err := fmt.Sscanf(parts[2], "%d", &size); err != nil { - return nil, fmt.Errorf("failed to parse size: %w", err) - } - - return &ImageInfo{ - ID: parts[0], - Tag: imageRef, - Created: created, - Size: size, - }, nil -} - -// copyFile copies a file from src to dst. -func copyFile(src, dst string) error { - data, err := os.ReadFile(src) - if err != nil { - return err - } - return os.WriteFile(dst, data, 0o644) -} diff --git a/fluid-remote/internal/podman/run.go b/fluid-remote/internal/podman/run.go deleted file mode 100755 index 5eb9c7b7..00000000 --- a/fluid-remote/internal/podman/run.go +++ /dev/null @@ -1,459 +0,0 @@ -package podman - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "os/exec" - "strings" - "time" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/workflow" -) - -// ContainerRunner handles Podman container creation and execution. -type ContainerRunner struct { - // podmanPath is the path to the podman binary. - podmanPath string -} - -// ContainerRunnerConfig configures the container runner. -type ContainerRunnerConfig struct { - // PodmanPath is the path to the podman binary. - // If empty, "podman" is looked up in PATH. - PodmanPath string -} - -// NewContainerRunner creates a new ContainerRunner with the given configuration. -func NewContainerRunner(cfg ContainerRunnerConfig) *ContainerRunner { - podmanPath := cfg.PodmanPath - if podmanPath == "" { - podmanPath = "podman" - } - return &ContainerRunner{ - podmanPath: podmanPath, - } -} - -// ContainerResult contains the result of creating a container. -type ContainerResult struct { - // ContainerID is the full container ID. - ContainerID string - - // ShortID is the short (12 character) container ID. - ShortID string - - // ContainerName is the deterministic container name. - ContainerName string - - // Cleanup is a function to stop and remove the container. - Cleanup workflow.CleanupFunc -} - -// ResourceLimits specifies resource constraints for the container. -type ResourceLimits struct { - // CPUQuota is the CPU quota in microseconds per period (100000 = 1 CPU). - // 0 means no limit. - CPUQuota int64 - - // MemoryLimit is the memory limit in bytes. - // 0 means no limit. - MemoryLimit int64 - - // MemorySwap is the memory+swap limit in bytes. - // -1 means unlimited swap, 0 means same as MemoryLimit. - MemorySwap int64 - - // PidsLimit is the maximum number of PIDs. - // 0 means no limit. - PidsLimit int64 -} - -// DefaultResourceLimits returns sensible default resource limits. -func DefaultResourceLimits() ResourceLimits { - return ResourceLimits{ - CPUQuota: 200000, // 2 CPUs - MemoryLimit: 2 * 1024 * 1024 * 1024, // 2 GB - MemorySwap: -1, // Unlimited swap - PidsLimit: 1024, // 1024 processes - } -} - -// RunContainer creates and starts a container from the given image. -// The container uses a deterministic name based on the VM name. -func (r *ContainerRunner) RunContainer(ctx context.Context, imageTag string, vmName string, limits ResourceLimits) (*ContainerResult, error) { - // Generate deterministic container name - containerName := generateContainerName(vmName) - - // Check if container already exists - exists, err := r.ContainerExists(ctx, containerName) - if err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageRunContainer, - workflow.ErrContainerCreateFailed, - fmt.Sprintf("failed to check container existence: %v", err), - ) - } - - // If container exists, remove it for idempotency - if exists { - if err := r.RemoveContainer(ctx, containerName, true); err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageRunContainer, - workflow.ErrContainerCreateFailed, - fmt.Sprintf("failed to remove existing container: %v", err), - ) - } - } - - // Build container create arguments - args := []string{ - "run", - "--detach", - "--name", containerName, - "--hostname", vmName, - "--env", "container=podman", - "--tty", - "--interactive", - } - - // Apply resource limits - if limits.CPUQuota > 0 { - args = append(args, "--cpu-quota", fmt.Sprintf("%d", limits.CPUQuota)) - } - if limits.MemoryLimit > 0 { - args = append(args, "--memory", fmt.Sprintf("%d", limits.MemoryLimit)) - } - if limits.MemorySwap != 0 { - args = append(args, "--memory-swap", fmt.Sprintf("%d", limits.MemorySwap)) - } - if limits.PidsLimit > 0 { - args = append(args, "--pids-limit", fmt.Sprintf("%d", limits.PidsLimit)) - } - - // Add the image and default command - args = append(args, imageTag, "/bin/sh") - - // Create and start the container - cmd := exec.CommandContext(ctx, r.podmanPath, args...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageRunContainer, - workflow.ErrContainerCreateFailed, - fmt.Sprintf("podman run failed: %v: %s", err, stderr.String()), - ) - } - - // Get the container ID - containerID := strings.TrimSpace(stdout.String()) - if containerID == "" { - // If we didn't get an ID from stdout, inspect the container - containerID, err = r.getContainerID(ctx, containerName) - if err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageRunContainer, - workflow.ErrContainerCreateFailed, - fmt.Sprintf("failed to get container ID: %v", err), - ) - } - } - - // Generate short ID (first 12 characters) - shortID := containerID - if len(shortID) > 12 { - shortID = shortID[:12] - } - - return &ContainerResult{ - ContainerID: containerID, - ShortID: shortID, - ContainerName: containerName, - Cleanup: func() error { - return r.RemoveContainer(context.Background(), containerName, true) - }, - }, nil -} - -// generateContainerName creates a deterministic container name from the VM name. -func generateContainerName(vmName string) string { - // Use a prefix to identify VM clone containers - timestamp := time.Now().UTC().Format("20060102T150405Z") - return fmt.Sprintf("vmclone-%s-%s", vmName, timestamp) -} - -// getContainerID retrieves the container ID for a given name. -func (r *ContainerRunner) getContainerID(ctx context.Context, containerName string) (string, error) { - args := []string{ - "inspect", - "--format", "{{.Id}}", - containerName, - } - - cmd := exec.CommandContext(ctx, r.podmanPath, args...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return "", fmt.Errorf("inspect failed: %w: %s", err, stderr.String()) - } - - return strings.TrimSpace(stdout.String()), nil -} - -// ContainerExists checks if a container with the given name exists. -func (r *ContainerRunner) ContainerExists(ctx context.Context, containerName string) (bool, error) { - args := []string{ - "container", - "exists", - containerName, - } - - cmd := exec.CommandContext(ctx, r.podmanPath, args...) - err := cmd.Run() - - if err == nil { - return true, nil - } - - // Exit code 1 means container doesn't exist - if exitErr, ok := err.(*exec.ExitError); ok { - if exitErr.ExitCode() == 1 { - return false, nil - } - } - - return false, fmt.Errorf("failed to check container existence: %w", err) -} - -// RemoveContainer stops and removes a container. -func (r *ContainerRunner) RemoveContainer(ctx context.Context, containerRef string, force bool) error { - args := []string{"rm"} - - if force { - args = append(args, "--force") - } - - args = append(args, containerRef) - - cmd := exec.CommandContext(ctx, r.podmanPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("container removal failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// StopContainer stops a running container. -func (r *ContainerRunner) StopContainer(ctx context.Context, containerRef string, timeout int) error { - args := []string{ - "stop", - "--time", fmt.Sprintf("%d", timeout), - containerRef, - } - - cmd := exec.CommandContext(ctx, r.podmanPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("container stop failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// StartContainer starts a stopped container. -func (r *ContainerRunner) StartContainer(ctx context.Context, containerRef string) error { - args := []string{"start", containerRef} - - cmd := exec.CommandContext(ctx, r.podmanPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("container start failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// ContainerInfo contains detailed information about a container. -type ContainerInfo struct { - ID string - Name string - Image string - Status string - Running bool - Created time.Time -} - -// InspectContainer retrieves detailed information about a container. -func (r *ContainerRunner) InspectContainer(ctx context.Context, containerRef string) (*ContainerInfo, error) { - args := []string{ - "inspect", - "--format", "json", - containerRef, - } - - cmd := exec.CommandContext(ctx, r.podmanPath, args...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("inspect failed: %w: %s", err, stderr.String()) - } - - // Parse JSON output - var containers []struct { - ID string `json:"Id"` - Name string `json:"Name"` - Image string `json:"Image"` - Created string `json:"Created"` - State struct { - Status string `json:"Status"` - Running bool `json:"Running"` - } `json:"State"` - } - - if err := json.Unmarshal(stdout.Bytes(), &containers); err != nil { - return nil, fmt.Errorf("failed to parse inspect output: %w", err) - } - - if len(containers) == 0 { - return nil, fmt.Errorf("no container found") - } - - c := containers[0] - created, _ := time.Parse(time.RFC3339Nano, c.Created) - - return &ContainerInfo{ - ID: c.ID, - Name: c.Name, - Image: c.Image, - Status: c.State.Status, - Running: c.State.Running, - Created: created, - }, nil -} - -// ExecInContainer executes a command inside a running container. -func (r *ContainerRunner) ExecInContainer(ctx context.Context, containerRef string, command []string) (string, string, int, error) { - args := []string{"exec", containerRef} - args = append(args, command...) - - cmd := exec.CommandContext(ctx, r.podmanPath, args...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - err := cmd.Run() - - exitCode := 0 - if err != nil { - if exitErr, ok := err.(*exec.ExitError); ok { - exitCode = exitErr.ExitCode() - } else { - return "", "", -1, fmt.Errorf("exec failed: %w", err) - } - } - - return stdout.String(), stderr.String(), exitCode, nil -} - -// ListContainers lists containers matching a filter pattern. -func (r *ContainerRunner) ListContainers(ctx context.Context, all bool, filter string) ([]ContainerInfo, error) { - args := []string{ - "ps", - "--format", "json", - } - - if all { - args = append(args, "--all") - } - - if filter != "" { - args = append(args, "--filter", filter) - } - - cmd := exec.CommandContext(ctx, r.podmanPath, args...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("list containers failed: %w: %s", err, stderr.String()) - } - - // Parse JSON output - var containers []struct { - ID string `json:"Id"` - Names []string `json:"Names"` - Image string `json:"Image"` - Created int64 `json:"Created"` - State string `json:"State"` - } - - output := stdout.Bytes() - if len(output) == 0 { - return []ContainerInfo{}, nil - } - - if err := json.Unmarshal(output, &containers); err != nil { - return nil, fmt.Errorf("failed to parse list output: %w", err) - } - - result := make([]ContainerInfo, len(containers)) - for i, c := range containers { - name := "" - if len(c.Names) > 0 { - name = c.Names[0] - } - result[i] = ContainerInfo{ - ID: c.ID, - Name: name, - Image: c.Image, - Status: c.State, - Running: c.State == "running", - Created: time.Unix(c.Created, 0), - } - } - - return result, nil -} - -// WaitForContainer waits for a container to be in a ready state. -func (r *ContainerRunner) WaitForContainer(ctx context.Context, containerRef string, timeout time.Duration) error { - deadline := time.Now().Add(timeout) - - for time.Now().Before(deadline) { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - info, err := r.InspectContainer(ctx, containerRef) - if err != nil { - time.Sleep(100 * time.Millisecond) - continue - } - - if info.Running { - return nil - } - - time.Sleep(100 * time.Millisecond) - } - - return fmt.Errorf("timeout waiting for container to be ready") -} diff --git a/fluid-remote/internal/rest/access.go b/fluid-remote/internal/rest/access.go deleted file mode 100755 index 472fb9cc..00000000 --- a/fluid-remote/internal/rest/access.go +++ /dev/null @@ -1,655 +0,0 @@ -package rest - -import ( - "encoding/json" - "net/http" - "strconv" - "time" - - "github.com/go-chi/chi/v5" - - serverError "github.com/aspectrr/fluid.sh/fluid-remote/internal/error" - serverJSON "github.com/aspectrr/fluid.sh/fluid-remote/internal/json" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/sshca" -) - -// AccessHandler handles SSH access API requests. -type AccessHandler struct { - accessSvc *sshca.AccessService -} - -// NewAccessHandler creates a new access handler. -func NewAccessHandler(accessSvc *sshca.AccessService) *AccessHandler { - return &AccessHandler{ - accessSvc: accessSvc, - } -} - -// RegisterRoutes registers the access routes on the given router. -func (h *AccessHandler) RegisterRoutes(r chi.Router) { - r.Route("/access", func(r chi.Router) { - // Request access to a sandbox - r.Post("/request", h.handleRequestAccess) - - // Get CA public key for VM configuration - r.Get("/ca-pubkey", h.handleGetCAPublicKey) - - // Certificate operations - r.Route("/certificate/{certID}", func(r chi.Router) { - r.Get("/", h.handleGetCertificate) - r.Delete("/", h.handleRevokeCertificate) - }) - - // List certificates - r.Get("/certificates", h.handleListCertificates) - - // Session operations - r.Post("/session/start", h.handleRecordSessionStart) - r.Post("/session/end", h.handleRecordSessionEnd) - - // List active sessions - r.Get("/sessions", h.handleListSessions) - }) -} - -// Request/Response types - -// requestAccessRequest is the request body for requesting sandbox access. -type requestAccessRequest struct { - // SandboxID is the target sandbox. - SandboxID string `json:"sandbox_id"` - - // UserID identifies the requesting user. - UserID string `json:"user_id"` - - // PublicKey is the user's SSH public key in OpenSSH format. - PublicKey string `json:"public_key"` - - // TTLMinutes is the requested access duration (1-10 minutes). - TTLMinutes int `json:"ttl_minutes,omitempty"` -} - -// requestAccessResponse is the response for a successful access request. -type requestAccessResponse struct { - // CertificateID is the ID of the issued certificate. - CertificateID string `json:"certificate_id"` - - // Certificate is the SSH certificate content (save as key-cert.pub). - Certificate string `json:"certificate"` - - // VMIPAddress is the IP address of the sandbox VM. - VMIPAddress string `json:"vm_ip_address"` - - // SSHPort is the SSH port (usually 22). - SSHPort int `json:"ssh_port"` - - // Username is the SSH username to use. - Username string `json:"username"` - - // ValidUntil is when the certificate expires (RFC3339). - ValidUntil string `json:"valid_until"` - - // TTLSeconds is the remaining validity in seconds. - TTLSeconds int `json:"ttl_seconds"` - - // ConnectCommand is an example SSH command for connecting. - ConnectCommand string `json:"connect_command"` - - // Instructions provides usage instructions. - Instructions string `json:"instructions"` -} - -// caPublicKeyResponse is the response for getting the CA public key. -type caPublicKeyResponse struct { - // PublicKey is the CA public key in OpenSSH format. - PublicKey string `json:"public_key"` - - // Usage explains how to use this key. - Usage string `json:"usage"` -} - -// certificateResponse is the response for certificate queries. -type certificateResponse struct { - ID string `json:"id"` - SandboxID string `json:"sandbox_id"` - UserID string `json:"user_id"` - VMID string `json:"vm_id"` - Identity string `json:"identity"` - SerialNumber uint64 `json:"serial_number"` - Principals []string `json:"principals"` - ValidAfter string `json:"valid_after"` - ValidBefore string `json:"valid_before"` - Status string `json:"status"` - IssuedAt string `json:"issued_at"` - IsExpired bool `json:"is_expired"` - TTLSeconds int `json:"ttl_seconds,omitempty"` -} - -// listCertificatesResponse is the response for listing certificates. -type listCertificatesResponse struct { - Certificates []certificateResponse `json:"certificates"` - Total int `json:"total"` -} - -// revokeCertificateRequest is the request body for revoking a certificate. -type revokeCertificateRequest struct { - Reason string `json:"reason"` -} - -// sessionStartRequest is the request body for recording session start. -type sessionStartRequest struct { - CertificateID string `json:"certificate_id"` - SourceIP string `json:"source_ip,omitempty"` -} - -// sessionStartResponse is the response for session start. -type sessionStartResponse struct { - SessionID string `json:"session_id"` -} - -// sessionEndRequest is the request body for recording session end. -type sessionEndRequest struct { - SessionID string `json:"session_id"` - Reason string `json:"reason,omitempty"` -} - -// sessionEndResponse is the response for ending a session. -type sessionEndResponse struct { - Message string `json:"message"` - SessionID string `json:"session_id"` -} - -// revokeCertificateResponse is the response for revoking a certificate. -type revokeCertificateResponse struct { - Message string `json:"message"` - ID string `json:"id"` -} - -// sessionResponse is the response for session queries. -type sessionResponse struct { - ID string `json:"id"` - CertificateID string `json:"certificate_id"` - SandboxID string `json:"sandbox_id"` - UserID string `json:"user_id"` - VMID string `json:"vm_id"` - VMIPAddress string `json:"vm_ip_address"` - SourceIP string `json:"source_ip,omitempty"` - Status string `json:"status"` - StartedAt string `json:"started_at"` - EndedAt string `json:"ended_at,omitempty"` - DurationSeconds int `json:"duration_seconds,omitempty"` -} - -// listSessionsResponse is the response for listing sessions. -type listSessionsResponse struct { - Sessions []sessionResponse `json:"sessions"` - Total int `json:"total"` -} - -// accessErrorResponse is a helper for error responses. -type accessErrorResponse struct { - Error string `json:"error"` - Code int `json:"code"` - Details string `json:"details,omitempty"` -} - -// writeError writes an error response. -func writeError(w http.ResponseWriter, status int, message, details string) { - _ = serverJSON.RespondJSON(w, status, accessErrorResponse{ - Error: message, - Code: status, - Details: details, - }) -} - -// Handlers - -// handleRequestAccess handles POST /v1/access/request -// @Summary Request SSH access to a sandbox -// @Description Issues a short-lived SSH certificate for accessing a sandbox via tmux -// @Tags Access -// @Accept json -// @Produce json -// @Param request body requestAccessRequest true "Access request" -// @Success 200 {object} requestAccessResponse -// @Failure 400 {object} accessErrorResponse -// @Failure 404 {object} accessErrorResponse -// @Failure 500 {object} accessErrorResponse -// @Id requestAccess -// @Router /v1/access/request [post] -func (h *AccessHandler) handleRequestAccess(w http.ResponseWriter, r *http.Request) { - var req requestAccessRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - writeError(w, http.StatusBadRequest, "invalid request body", err.Error()) - return - } - - // Validate required fields - if req.SandboxID == "" { - writeError(w, http.StatusBadRequest, "sandbox_id is required", "") - return - } - if req.UserID == "" { - writeError(w, http.StatusBadRequest, "user_id is required", "") - return - } - if req.PublicKey == "" { - writeError(w, http.StatusBadRequest, "public_key is required", "") - return - } - - // Get source IP from request - sourceIP := r.RemoteAddr - if xff := r.Header.Get("X-Forwarded-For"); xff != "" { - sourceIP = xff - } - - // Build access request - accessReq := &sshca.AccessRequest{ - SandboxID: req.SandboxID, - UserID: req.UserID, - PublicKey: req.PublicKey, - TTLMinutes: req.TTLMinutes, - SourceIP: sourceIP, - RequestTime: time.Now(), - } - - // Request access - resp, err := h.accessSvc.RequestAccess(r.Context(), accessReq) - if err != nil { - writeError(w, http.StatusInternalServerError, "failed to issue certificate", err.Error()) - return - } - - // Build response with instructions - instructions := `To connect to the sandbox: -1. Save your private key to a file (e.g., sandbox_key) -2. Save the certificate to sandbox_key-cert.pub -3. Run: chmod 600 sandbox_key -4. Connect using the command provided in connect_command` - - _ = serverJSON.RespondJSON(w, http.StatusOK, requestAccessResponse{ - CertificateID: resp.CertificateID, - Certificate: resp.Certificate, - VMIPAddress: resp.VMIPAddress, - SSHPort: resp.SSHPort, - Username: resp.Username, - ValidUntil: resp.ValidUntil.Format(time.RFC3339), - TTLSeconds: resp.TTLSeconds, - ConnectCommand: resp.ConnectCommand, - Instructions: instructions, - }) -} - -// handleGetCAPublicKey handles GET /v1/access/ca-pubkey -// @Summary Get the SSH CA public key -// @Description Returns the CA public key that should be trusted by VMs -// @Tags Access -// @Produce json -// @Success 200 {object} caPublicKeyResponse -// @Failure 500 {object} accessErrorResponse -// @Id getCAPublicKey -// @Router /v1/access/ca-pubkey [get] -func (h *AccessHandler) handleGetCAPublicKey(w http.ResponseWriter, r *http.Request) { - pubKey, err := h.accessSvc.GetCAPublicKey() - if err != nil { - writeError(w, http.StatusInternalServerError, "failed to get CA public key", err.Error()) - return - } - - usage := `This CA public key should be installed in VM images at /etc/ssh/ssh_ca.pub -and referenced in sshd_config with: TrustedUserCAKeys /etc/ssh/ssh_ca.pub` - - _ = serverJSON.RespondJSON(w, http.StatusOK, caPublicKeyResponse{ - PublicKey: pubKey, - Usage: usage, - }) -} - -// handleGetCertificate handles GET /v1/access/certificate/{certID} -// @Summary Get certificate details -// @Description Returns details about an issued certificate -// @Tags Access -// @Produce json -// @Param certID path string true "Certificate ID" -// @Success 200 {object} certificateResponse -// @Failure 404 {object} accessErrorResponse -// @Failure 500 {object} accessErrorResponse -// @Id getCertificate -// @Router /v1/access/certificate/{certID} [get] -func (h *AccessHandler) handleGetCertificate(w http.ResponseWriter, r *http.Request) { - certID := chi.URLParam(r, "certID") - if certID == "" { - writeError(w, http.StatusBadRequest, "certificate ID is required", "") - return - } - - cert, err := h.accessSvc.GetCertificate(r.Context(), certID) - if err != nil { - writeError(w, http.StatusNotFound, "certificate not found", err.Error()) - return - } - - ttlSeconds := 0 - if !cert.IsExpired() { - ttlSeconds = int(cert.TimeToExpiry().Seconds()) - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, certificateResponse{ - ID: cert.ID, - SandboxID: cert.SandboxID, - UserID: cert.UserID, - VMID: cert.VMID, - Identity: cert.Identity, - SerialNumber: cert.SerialNumber, - Principals: cert.Principals, - ValidAfter: cert.ValidAfter.Format(time.RFC3339), - ValidBefore: cert.ValidBefore.Format(time.RFC3339), - Status: string(cert.Status), - IssuedAt: cert.IssuedAt.Format(time.RFC3339), - IsExpired: cert.IsExpired(), - TTLSeconds: ttlSeconds, - }) -} - -// handleRevokeCertificate handles DELETE /v1/access/certificate/{certID} -// @Summary Revoke a certificate -// @Description Immediately revokes a certificate, terminating any active sessions -// @Tags Access -// @Accept json -// @Produce json -// @Param certID path string true "Certificate ID" -// @Param request body revokeCertificateRequest false "Revocation reason" -// @Success 200 {object} revokeCertificateResponse -// @Failure 400 {object} accessErrorResponse -// @Failure 404 {object} accessErrorResponse -// @Failure 500 {object} accessErrorResponse -// @Id revokeCertificate -// @Router /v1/access/certificate/{certID} [delete] -func (h *AccessHandler) handleRevokeCertificate(w http.ResponseWriter, r *http.Request) { - certID := chi.URLParam(r, "certID") - if certID == "" { - writeError(w, http.StatusBadRequest, "certificate ID is required", "") - return - } - - var req revokeCertificateRequest - if r.Body != nil && r.ContentLength > 0 { - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - // Non-fatal, use empty reason - req.Reason = "revoked via API" - } - } - if req.Reason == "" { - req.Reason = "revoked via API" - } - - if err := h.accessSvc.RevokeAccess(r.Context(), certID, req.Reason); err != nil { - if err == sshca.ErrCertAlreadyRevoked { - writeError(w, http.StatusBadRequest, "certificate already revoked", "") - return - } - writeError(w, http.StatusInternalServerError, "failed to revoke certificate", err.Error()) - return - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, revokeCertificateResponse{ - Message: "certificate revoked successfully", - ID: certID, - }) -} - -// handleListCertificates handles GET /v1/access/certificates -// @Summary List certificates -// @Description Lists issued certificates with optional filtering -// @Tags Access -// @Produce json -// @Param sandbox_id query string false "Filter by sandbox ID" -// @Param user_id query string false "Filter by user ID" -// @Param status query string false "Filter by status (ACTIVE, EXPIRED, REVOKED)" -// @Param active_only query bool false "Only show active, non-expired certificates" -// @Param limit query int false "Maximum results to return" -// @Param offset query int false "Offset for pagination" -// @Success 200 {object} listCertificatesResponse -// @Failure 500 {object} accessErrorResponse -// @Id listCertificates -// @Router /v1/access/certificates [get] -func (h *AccessHandler) handleListCertificates(w http.ResponseWriter, r *http.Request) { - // Build filter - filter := sshca.CertificateFilter{} - - if sandboxID := r.URL.Query().Get("sandbox_id"); sandboxID != "" { - filter.SandboxID = &sandboxID - } - if userID := r.URL.Query().Get("user_id"); userID != "" { - filter.UserID = &userID - } - if status := r.URL.Query().Get("status"); status != "" { - s := sshca.CertStatus(status) - filter.Status = &s - } - if activeOnly := r.URL.Query().Get("active_only"); activeOnly == "true" { - filter.ActiveOnly = true - } - - // Build options - opts := &sshca.ListOptions{} - if limit := r.URL.Query().Get("limit"); limit != "" { - if l, err := strconv.Atoi(limit); err == nil { - opts.Limit = l - } - } - if offset := r.URL.Query().Get("offset"); offset != "" { - if o, err := strconv.Atoi(offset); err == nil { - opts.Offset = o - } - } - - certs, err := h.accessSvc.ListCertificates(r.Context(), filter, opts) - if err != nil { - writeError(w, http.StatusInternalServerError, "failed to list certificates", err.Error()) - return - } - - // Convert to response format - responses := make([]certificateResponse, len(certs)) - for i, cert := range certs { - ttlSeconds := 0 - if !cert.IsExpired() { - ttlSeconds = int(cert.TimeToExpiry().Seconds()) - } - responses[i] = certificateResponse{ - ID: cert.ID, - SandboxID: cert.SandboxID, - UserID: cert.UserID, - VMID: cert.VMID, - Identity: cert.Identity, - SerialNumber: cert.SerialNumber, - Principals: cert.Principals, - ValidAfter: cert.ValidAfter.Format(time.RFC3339), - ValidBefore: cert.ValidBefore.Format(time.RFC3339), - Status: string(cert.Status), - IssuedAt: cert.IssuedAt.Format(time.RFC3339), - IsExpired: cert.IsExpired(), - TTLSeconds: ttlSeconds, - } - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, listCertificatesResponse{ - Certificates: responses, - Total: len(responses), - }) -} - -// handleRecordSessionStart handles POST /v1/access/session/start -// @Summary Record session start -// @Description Records the start of an SSH session (called by VM or auth service) -// @Tags Access -// @Accept json -// @Produce json -// @Param request body sessionStartRequest true "Session start request" -// @Success 200 {object} sessionStartResponse -// @Failure 400 {object} accessErrorResponse -// @Failure 500 {object} accessErrorResponse -// @Id recordSessionStart -// @Router /v1/access/session/start [post] -func (h *AccessHandler) handleRecordSessionStart(w http.ResponseWriter, r *http.Request) { - var req sessionStartRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - writeError(w, http.StatusBadRequest, "invalid request body", err.Error()) - return - } - - if req.CertificateID == "" { - writeError(w, http.StatusBadRequest, "certificate_id is required", "") - return - } - - sourceIP := req.SourceIP - if sourceIP == "" { - sourceIP = r.RemoteAddr - } - - sessionID, err := h.accessSvc.RecordSessionStart(r.Context(), req.CertificateID, sourceIP) - if err != nil { - writeError(w, http.StatusInternalServerError, "failed to record session start", err.Error()) - return - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, sessionStartResponse{ - SessionID: sessionID, - }) -} - -// handleRecordSessionEnd handles POST /v1/access/session/end -// @Summary Record session end -// @Description Records the end of an SSH session -// @Tags Access -// @Accept json -// @Produce json -// @Param request body sessionEndRequest true "Session end request" -// @Success 200 {object} sessionEndResponse -// @Failure 400 {object} accessErrorResponse -// @Failure 500 {object} accessErrorResponse -// @Id recordSessionEnd -// @Router /v1/access/session/end [post] -func (h *AccessHandler) handleRecordSessionEnd(w http.ResponseWriter, r *http.Request) { - var req sessionEndRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - writeError(w, http.StatusBadRequest, "invalid request body", err.Error()) - return - } - - if req.SessionID == "" { - writeError(w, http.StatusBadRequest, "session_id is required", "") - return - } - - reason := req.Reason - if reason == "" { - reason = "session ended normally" - } - - if err := h.accessSvc.RecordSessionEnd(r.Context(), req.SessionID, reason); err != nil { - writeError(w, http.StatusInternalServerError, "failed to record session end", err.Error()) - return - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, sessionEndResponse{ - Message: "session ended successfully", - SessionID: req.SessionID, - }) -} - -// handleListSessions handles GET /v1/access/sessions -// @Summary List sessions -// @Description Lists access sessions with optional filtering -// @Tags Access -// @Produce json -// @Param sandbox_id query string false "Filter by sandbox ID" -// @Param certificate_id query string false "Filter by certificate ID" -// @Param user_id query string false "Filter by user ID" -// @Param active_only query bool false "Only show active sessions" -// @Param limit query int false "Maximum results to return" -// @Param offset query int false "Offset for pagination" -// @Success 200 {object} listSessionsResponse -// @Failure 500 {object} accessErrorResponse -// @Id listSessions -// @Router /v1/access/sessions [get] -func (h *AccessHandler) handleListSessions(w http.ResponseWriter, r *http.Request) { - // Build filter - filter := sshca.SessionFilter{} - - if sandboxID := r.URL.Query().Get("sandbox_id"); sandboxID != "" { - filter.SandboxID = &sandboxID - } - if certID := r.URL.Query().Get("certificate_id"); certID != "" { - filter.CertificateID = &certID - } - if userID := r.URL.Query().Get("user_id"); userID != "" { - filter.UserID = &userID - } - if activeOnly := r.URL.Query().Get("active_only"); activeOnly == "true" { - filter.ActiveOnly = true - } - - // Build options - opts := &sshca.ListOptions{} - if limit := r.URL.Query().Get("limit"); limit != "" { - if l, err := strconv.Atoi(limit); err == nil { - opts.Limit = l - } - } - if offset := r.URL.Query().Get("offset"); offset != "" { - if o, err := strconv.Atoi(offset); err == nil { - opts.Offset = o - } - } - - var sessions []*sshca.AccessSession - var err error - - if filter.SandboxID != nil { - sessions, err = h.accessSvc.GetActiveSessionsForSandbox(r.Context(), *filter.SandboxID) - } else { - // Return empty list if no sandbox_id filter (would need a more general method) - sessions = []*sshca.AccessSession{} - } - if err != nil { - writeError(w, http.StatusInternalServerError, "failed to list sessions", err.Error()) - return - } - - // Convert to response format - responses := make([]sessionResponse, len(sessions)) - for i, session := range sessions { - resp := sessionResponse{ - ID: session.ID, - CertificateID: session.CertificateID, - SandboxID: session.SandboxID, - UserID: session.UserID, - VMID: session.VMID, - VMIPAddress: session.VMIPAddress, - SourceIP: session.SourceIP, - Status: string(session.Status), - StartedAt: session.StartedAt.Format(time.RFC3339), - } - if session.EndedAt != nil { - resp.EndedAt = session.EndedAt.Format(time.RFC3339) - } - if session.DurationSeconds != nil { - resp.DurationSeconds = *session.DurationSeconds - } else if session.EndedAt != nil { - resp.DurationSeconds = int(session.EndedAt.Sub(session.StartedAt).Seconds()) - } - responses[i] = resp - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, listSessionsResponse{ - Sessions: responses, - Total: len(responses), - }) -} - -// Ensure serverError is used to avoid unused import error -var _ = serverError.ErrorResponse{} diff --git a/fluid-remote/internal/rest/server.go b/fluid-remote/internal/rest/server.go deleted file mode 100755 index 9bea5c9e..00000000 --- a/fluid-remote/internal/rest/server.go +++ /dev/null @@ -1,1120 +0,0 @@ -package rest - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "strings" - "time" - - "github.com/MarceloPetrucio/go-scalar-api-reference" - "github.com/go-chi/chi/v5" - "github.com/go-chi/chi/v5/middleware" - "github.com/gorilla/websocket" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/ansible" - serverError "github.com/aspectrr/fluid.sh/fluid-remote/internal/error" - serverJSON "github.com/aspectrr/fluid.sh/fluid-remote/internal/json" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/libvirt" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/store" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/telemetry" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/vm" -) - -// Server wires the HTTP layer to application services. -type Server struct { - Router chi.Router - vmSvc *vm.Service - domainMgr *libvirt.DomainManager - multiHostMgr *libvirt.MultiHostDomainManager - ansibleHandler *ansible.Handler - playbookHandler *ansible.PlaybookHandler - telemetry telemetry.Service -} - -// NewServer constructs a REST server with routes registered. -func NewServer(vmSvc *vm.Service, domainMgr *libvirt.DomainManager, ansibleRunner *ansible.Runner, tele telemetry.Service) *Server { - return NewServerWithPlaybooks(vmSvc, domainMgr, ansibleRunner, nil, tele) -} - -// NewServerWithPlaybooks constructs a REST server with playbook management support. -func NewServerWithPlaybooks(vmSvc *vm.Service, domainMgr *libvirt.DomainManager, ansibleRunner *ansible.Runner, playbookSvc *ansible.PlaybookService, tele telemetry.Service) *Server { - return NewServerWithMultiHost(vmSvc, domainMgr, nil, ansibleRunner, playbookSvc, tele) -} - -// NewServerWithMultiHost constructs a REST server with multi-host VM listing support. -func NewServerWithMultiHost(vmSvc *vm.Service, domainMgr *libvirt.DomainManager, multiHostMgr *libvirt.MultiHostDomainManager, ansibleRunner *ansible.Runner, playbookSvc *ansible.PlaybookService, tele telemetry.Service) *Server { - router := chi.NewRouter() - - router.Use(middleware.RequestID) - router.Use(middleware.Logger) - router.Use(middleware.Recoverer) - - // Telemetry middleware - router.Use(func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if tele != nil { - start := time.Now() - ww := middleware.NewWrapResponseWriter(w, r.ProtoMajor) - - next.ServeHTTP(ww, r) - - tele.Track("api_request", map[string]interface{}{ - "method": r.Method, - "path": r.URL.Path, - "status": ww.Status(), - "duration_ms": time.Since(start).Milliseconds(), - "user_agent": r.UserAgent(), - }) - } else { - next.ServeHTTP(w, r) - } - }) - }) - - var ansibleHandler *ansible.Handler - if ansibleRunner != nil { - ansibleHandler = ansible.NewHandler(ansibleRunner) - } - - var playbookHandler *ansible.PlaybookHandler - if playbookSvc != nil { - playbookHandler = ansible.NewPlaybookHandler(playbookSvc) - } - - s := &Server{ - Router: router, - vmSvc: vmSvc, - domainMgr: domainMgr, - multiHostMgr: multiHostMgr, - ansibleHandler: ansibleHandler, - playbookHandler: playbookHandler, - telemetry: tele, - } - s.routes() - return s -} - -// StartHTTP runs the HTTP server on the given address. -func (s *Server) StartHTTP(addr string) error { - srv := &http.Server{ - Addr: addr, - Handler: s.Router, - ReadHeaderTimeout: 10 * time.Second, - } - return srv.ListenAndServe() -} - -func (s *Server) routes() { - r := s.Router - - // @Summary API reference - // @Description Returns HTML API reference documentation - // @Accept json - // @Produce html - // @Success 200 {string} string - // @Router /docs [get] - r.Get("/docs", func(w http.ResponseWriter, r *http.Request) { - htmlContent, err := scalar.ApiReferenceHTML(&scalar.Options{ - // SpecURL: "https://generator3.swagger.io/openapi.json",// allow external URL or local path file - SpecURL: "./docs/openapi.yaml", - CustomOptions: scalar.CustomOptions{ - PageTitle: "Virsh Sandbox API", - }, - DarkMode: true, - }) - if err != nil { - fmt.Printf("%v", err) - } - - _, _ = fmt.Fprintln(w, htmlContent) - }) - - // API v1 routes - r.Route("/v1", func(r chi.Router) { - r.Get("/health", s.handleHealth) - r.Get("/vms", s.handleListVMs) - - // Sandbox lifecycle - r.Route("/sandboxes", func(r chi.Router) { - r.Get("/", s.handleListSandboxes) - r.Post("/", s.handleCreateSandbox) - - r.Route("/{id}", func(r chi.Router) { - r.Get("/", s.handleGetSandbox) - r.Get("/commands", s.handleListSandboxCommands) - r.Get("/stream", s.handleSandboxStream) - r.Get("/ip", s.handleDiscoverIP) - - r.Post("/sshkey", s.handleInjectSSHKey) - r.Post("/start", s.handleStartSandbox) - r.Post("/run", s.handleRunCommand) - r.Post("/snapshot", s.handleCreateSnapshot) - r.Post("/diff", s.handleDiffSnapshots) - - r.Post("/generate/{tool}", s.handleGenerate) // tool ∈ {ansible, puppet} - r.Post("/publish", s.handlePublish) - - r.Delete("/", s.handleDestroySandbox) - }) - }) - - // Ansible job management - if s.ansibleHandler != nil { - if s.playbookHandler != nil { - s.ansibleHandler.RegisterRoutesWithPlaybooks(r, s.playbookHandler) - } else { - s.ansibleHandler.RegisterRoutes(r) - } - } else if s.playbookHandler != nil { - // Register playbook routes directly if no ansible handler - r.Route("/ansible", func(r chi.Router) { - s.playbookHandler.RegisterPlaybookRoutes(r) - }) - } - }) -} - -// --- Request/Response DTOs --- - -type createSandboxRequest struct { - SourceVMName string `json:"source_vm_name"` // required; name of existing VM in libvirt to clone from - AgentID string `json:"agent_id"` // required - CPU int `json:"cpu,omitempty"` // optional; default from service config if <=0 - MemoryMB int `json:"memory_mb,omitempty"` // optional; default from service config if <=0 - TTLSeconds *int `json:"ttl_seconds,omitempty"` // optional; TTL for auto garbage collection - AutoStart bool `json:"auto_start,omitempty"` // optional; if true, start the VM immediately after creation - WaitForIP bool `json:"wait_for_ip,omitempty"` // optional; if true and auto_start, wait for IP discovery -} - -type createSandboxResponse struct { - Sandbox *store.Sandbox `json:"sandbox"` - IPAddress string `json:"ip_address,omitempty"` // populated when auto_start and wait_for_ip are true -} - -type injectSSHKeyRequest struct { - PublicKey string `json:"public_key"` // required - Username string `json:"username,omitempty"` // required (explicit); typical: "ubuntu" or "centos" -} - -type startSandboxRequest struct { - WaitForIP bool `json:"wait_for_ip"` // optional; default false -} - -type startSandboxResponse struct { - IPAddress string `json:"ip_address,omitempty"` -} - -type discoverIPResponse struct { - IPAddress string `json:"ip_address"` -} - -type runCommandRequest struct { - Username string `json:"user,omitempty"` // optional; defaults to "sandbox" when using managed credentials - PrivateKeyPath string `json:"private_key_path,omitempty"` // optional; if empty, uses managed credentials (requires SSH CA) - Command string `json:"command"` // required - TimeoutSec int `json:"timeout_sec,omitempty"` // optional; default from service config - Env map[string]string `json:"env,omitempty"` // optional -} - -type runCommandResponse struct { - Command *store.Command `json:"command"` -} - -type snapshotRequest struct { - Name string `json:"name"` // required - External bool `json:"external,omitempty"` // optional; default false (internal snapshot) -} - -type snapshotResponse struct { - Snapshot *store.Snapshot `json:"snapshot"` -} - -type diffRequest struct { - FromSnapshot string `json:"from_snapshot"` // required - ToSnapshot string `json:"to_snapshot"` // required -} - -type diffResponse struct { - Diff *store.Diff `json:"diff"` -} - -type generateResponse struct { - Message string `json:"message"` - Note string `json:"note,omitempty"` -} - -type publishRequest struct { - JobID string `json:"job_id"` // required - Message string `json:"message,omitempty"` // optional commit/PR message - Reviewers []string `json:"reviewers,omitempty"` // optional -} - -type publishResponse struct { - Message string `json:"message"` - Note string `json:"note,omitempty"` -} - -type ErrorResponse struct { - Error string `json:"error"` - Code int `json:"code"` - Details string `json:"details,omitempty"` -} - -type vmInfo struct { - Name string `json:"name"` - UUID string `json:"uuid"` - State string `json:"state"` - Persistent bool `json:"persistent"` - DiskPath string `json:"disk_path,omitempty"` - HostName string `json:"host_name,omitempty"` // Host display name (multi-host mode) - HostAddress string `json:"host_address,omitempty"` // Host IP/hostname (multi-host mode) -} - -type hostError struct { - HostName string `json:"host_name"` - HostAddress string `json:"host_address"` - Error string `json:"error"` -} - -type listVMsResponse struct { - VMs []vmInfo `json:"vms"` - HostErrors []hostError `json:"host_errors,omitempty"` // Errors from unreachable hosts (multi-host mode) -} - -type sandboxInfo struct { - ID string `json:"id"` - JobID string `json:"job_id"` - AgentID string `json:"agent_id"` - SandboxName string `json:"sandbox_name"` - BaseImage string `json:"base_image"` - Network string `json:"network"` - IPAddress *string `json:"ip_address,omitempty"` - State string `json:"state"` - TTLSeconds *int `json:"ttl_seconds,omitempty"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` -} - -type listSandboxesResponse struct { - Sandboxes []sandboxInfo `json:"sandboxes"` - Total int `json:"total"` -} - -type healthResponse struct { - Status string `json:"status"` -} - -// --- Handlers --- - -// handleHealth returns service health status. -// @Summary Health check -// @Description Returns service health status -// @Tags Health -// @Accept json -// @Produce json -// @Success 200 {object} healthResponse -// @Id getHealth -// @Router /v1/health [get] -func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) { - _ = serverJSON.RespondJSON(w, http.StatusOK, healthResponse{Status: "ok"}) -} - -// @Summary Create a new sandbox -// @Description Creates a new virtual machine sandbox by cloning from an existing VM. When multi-host is configured, automatically routes to the host containing the source VM. -// @Tags Sandbox -// @Accept json -// @Produce json -// @Param request body createSandboxRequest true "Sandbox creation parameters" -// @Success 201 {object} createSandboxResponse -// @Failure 400 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Id createSandbox -// @Router /v1/sandboxes [post] -func (s *Server) handleCreateSandbox(w http.ResponseWriter, r *http.Request) { - var req createSandboxRequest - if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { - serverError.RespondError(w, http.StatusBadRequest, err) - return - } - if req.SourceVMName == "" || req.AgentID == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("source_vm_name and agent_id are required")) - return - } - - // If multi-host is configured, find which host has the source VM - if s.multiHostMgr != nil { - host, err := s.multiHostMgr.FindHostForVM(r.Context(), req.SourceVMName) - if err == nil && host != nil { - // Source VM found on a remote host - create sandbox there - sb, ip, createErr := s.vmSvc.CreateSandboxOnHost(r.Context(), host, req.SourceVMName, req.AgentID, req.CPU, req.MemoryMB, req.TTLSeconds, req.AutoStart, req.WaitForIP) - if createErr != nil { - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("create sandbox on host %s: %w", host.Name, createErr)) - return - } - _ = serverJSON.RespondJSON(w, http.StatusCreated, createSandboxResponse{Sandbox: sb, IPAddress: ip}) - return - } - // If not found on any remote host, fall through to local creation - } - - // Create sandbox locally (single-host mode or source VM not on remote hosts) - sb, ip, err := s.vmSvc.CreateSandbox(r.Context(), req.SourceVMName, req.AgentID, req.CPU, req.MemoryMB, req.TTLSeconds, req.AutoStart, req.WaitForIP) - if err != nil { - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("create sandbox: %w", err)) - return - } - _ = serverJSON.RespondJSON(w, http.StatusCreated, createSandboxResponse{Sandbox: sb, IPAddress: ip}) -} - -// @Summary Inject SSH key into sandbox -// @Description Injects a public SSH key for a user in the sandbox -// @Tags Sandbox -// @Accept json -// @Produce json -// @Param id path string true "Sandbox ID" -// @Param request body injectSSHKeyRequest true "SSH key injection parameters" -// @Success 204 -// @Failure 400 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Id injectSshKey -// @Router /v1/sandboxes/{id}/sshkey [post] -func (s *Server) handleInjectSSHKey(w http.ResponseWriter, r *http.Request) { - id := chi.URLParam(r, "id") - var req injectSSHKeyRequest - if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { - serverError.RespondError(w, http.StatusBadRequest, err) - return - } - if req.PublicKey == "" || req.Username == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("public_key and username are required")) - return - } - - if err := s.vmSvc.InjectSSHKey(r.Context(), id, req.Username, req.PublicKey); err != nil { - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("inject ssh key: %w", err)) - return - } - w.WriteHeader(http.StatusNoContent) -} - -// @Summary Start sandbox -// @Description Starts the virtual machine sandbox -// @Tags Sandbox -// @Accept json -// @Produce json -// @Param id path string true "Sandbox ID" -// @Param request body startSandboxRequest false "Start parameters" -// @Success 200 {object} startSandboxResponse -// @Failure 400 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Id startSandbox -// @Router /v1/sandboxes/{id}/start [post] -func (s *Server) handleStartSandbox(w http.ResponseWriter, r *http.Request) { - id := chi.URLParam(r, "id") - - var req startSandboxRequest - // tolerate empty body; default WaitForIP=false - if r.ContentLength > 0 { - if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { - serverError.RespondError(w, http.StatusBadRequest, err) - return - } - } - - ip, err := s.vmSvc.StartSandbox(r.Context(), id, req.WaitForIP) - if err != nil { - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("start sandbox: %w", err)) - return - } - _ = serverJSON.RespondJSON(w, http.StatusOK, startSandboxResponse{IPAddress: ip}) -} - -// @Summary Discover sandbox IP -// @Description Discovers and returns the IP address for a running sandbox. Use this for async workflows where wait_for_ip was false during start. -// @Tags Sandbox -// @Produce json -// @Param id path string true "Sandbox ID" -// @Success 200 {object} discoverIPResponse -// @Failure 400 {object} ErrorResponse -// @Failure 404 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Id discoverSandboxIP -// @Router /v1/sandboxes/{id}/ip [get] -func (s *Server) handleDiscoverIP(w http.ResponseWriter, r *http.Request) { - id := chi.URLParam(r, "id") - if id == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("sandbox id is required")) - return - } - - ip, err := s.vmSvc.DiscoverIP(r.Context(), id) - if err != nil { - if errors.Is(err, store.ErrNotFound) { - serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("sandbox not found: %s", id)) - return - } - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("discover ip: %w", err)) - return - } - _ = serverJSON.RespondJSON(w, http.StatusOK, discoverIPResponse{IPAddress: ip}) -} - -// @Summary Run command in sandbox -// @Description Executes a command inside the sandbox via SSH. If private_key_path is omitted and SSH CA is configured, managed credentials will be used automatically. -// @Tags Sandbox -// @Accept json -// @Produce json -// @Param id path string true "Sandbox ID" -// @Param request body runCommandRequest true "Command execution parameters" -// @Success 200 {object} runCommandResponse -// @Failure 400 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Id runSandboxCommand -// @Router /v1/sandboxes/{id}/run [post] -func (s *Server) handleRunCommand(w http.ResponseWriter, r *http.Request) { - id := chi.URLParam(r, "id") - var req runCommandRequest - if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { - serverError.RespondError(w, http.StatusBadRequest, err) - return - } - // Command is always required - if req.Command == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("command is required")) - return - } - // Username defaults to "sandbox" when using managed credentials (handled in service layer) - // PrivateKeyPath is optional - if empty, service will use managed credentials - timeout := time.Duration(req.TimeoutSec) * time.Second - cmd, err := s.vmSvc.RunCommand(r.Context(), id, req.Username, req.PrivateKeyPath, req.Command, timeout, req.Env) - if err != nil { - // If we have a command result (with stderr/stdout), return it even on error. - // This allows callers to see SSH error messages in stderr. - if cmd != nil { - _ = serverJSON.RespondJSON(w, http.StatusOK, runCommandResponse{Command: cmd}) - return - } - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("run command: %w", err)) - return - } - _ = serverJSON.RespondJSON(w, http.StatusOK, runCommandResponse{Command: cmd}) -} - -// @Summary Create snapshot -// @Description Creates a snapshot of the sandbox -// @Tags Sandbox -// @Accept json -// @Produce json -// @Param id path string true "Sandbox ID" -// @Param request body snapshotRequest true "Snapshot parameters" -// @Success 201 {object} snapshotResponse -// @Failure 400 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Id createSnapshot -// @Router /v1/sandboxes/{id}/snapshot [post] -func (s *Server) handleCreateSnapshot(w http.ResponseWriter, r *http.Request) { - id := chi.URLParam(r, "id") - var req snapshotRequest - if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { - serverError.RespondError(w, http.StatusBadRequest, err) - return - } - if req.Name == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("name is required")) - return - } - snap, err := s.vmSvc.CreateSnapshot(r.Context(), id, req.Name, req.External) - if err != nil { - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("create snapshot: %w", err)) - return - } - _ = serverJSON.RespondJSON(w, http.StatusCreated, snapshotResponse{Snapshot: snap}) -} - -// @Summary Diff snapshots -// @Description Computes differences between two snapshots -// @Tags Sandbox -// @Accept json -// @Produce json -// @Param id path string true "Sandbox ID" -// @Param request body diffRequest true "Diff parameters" -// @Success 200 {object} diffResponse -// @Failure 400 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Id diffSnapshots -// @Router /v1/sandboxes/{id}/diff [post] -func (s *Server) handleDiffSnapshots(w http.ResponseWriter, r *http.Request) { - id := chi.URLParam(r, "id") - var req diffRequest - if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { - serverError.RespondError(w, http.StatusBadRequest, err) - return - } - if req.FromSnapshot == "" || req.ToSnapshot == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("from_snapshot and to_snapshot are required")) - return - } - d, err := s.vmSvc.DiffSnapshots(r.Context(), id, req.FromSnapshot, req.ToSnapshot) - if err != nil { - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("diff snapshots: %w", err)) - return - } - _ = serverJSON.RespondJSON(w, http.StatusOK, diffResponse{Diff: d}) -} - -// @Summary Generate configuration -// @Description Generates Ansible or Puppet configuration from sandbox changes -// @Tags Sandbox -// @Accept json -// @Produce json -// @Param id path string true "Sandbox ID" -// @Param tool path string true "Tool type (ansible or puppet)" -// @Success 501 {object} generateResponse -// @Failure 400 {object} ErrorResponse -// @Id generateConfiguration -// @Router /v1/sandboxes/{id}/generate/{tool} [post] -func (s *Server) handleGenerate(w http.ResponseWriter, r *http.Request) { - id := chi.URLParam(r, "id") - tool := chi.URLParam(r, "tool") - if id == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("sandbox id is required")) - return - } - switch tool { - case "ansible", "puppet": - // Stub: these will be implemented when ansible/puppet generators are wired. - _ = serverJSON.RespondJSON(w, http.StatusNotImplemented, generateResponse{ - Message: "generation not implemented yet", - Note: "tool=" + tool + " for sandbox " + id, - }) - default: - serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("unsupported tool %q; expected 'ansible' or 'puppet'", tool)) - } -} - -// @Summary Publish changes -// @Description Publishes sandbox changes to GitOps repository -// @Tags Sandbox -// @Accept json -// @Produce json -// @Param id path string true "Sandbox ID" -// @Param request body publishRequest true "Publish parameters" -// @Success 501 {object} publishResponse -// @Failure 400 {object} ErrorResponse -// @Id publishChanges -// @Router /v1/sandboxes/{id}/publish [post] -func (s *Server) handlePublish(w http.ResponseWriter, r *http.Request) { - var req publishRequest - if err := serverJSON.DecodeJSON(r.Context(), r, &req); err != nil { - serverError.RespondError(w, http.StatusBadRequest, err) - return - } - if req.JobID == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("job_id is required")) - return - } - // Stub: implement when GitOps publisher is wired. - _ = serverJSON.RespondJSON(w, http.StatusNotImplemented, publishResponse{ - Message: "publish not implemented yet", - Note: "job_id=" + req.JobID, - }) -} - -// @Summary List all host VMs -// @Description Returns a list of host virtual machines from libvirt (excludes sandboxes). When multi-host is configured, aggregates VMs from all hosts. -// @Tags VMs -// @Accept json -// @Produce json -// @Success 200 {object} listVMsResponse -// @Failure 500 {object} ErrorResponse -// @Id listVirtualMachines -// @Router /v1/vms [get] -func (s *Server) handleListVMs(w http.ResponseWriter, r *http.Request) { - // Use multi-host manager if configured - if s.multiHostMgr != nil { - result, err := s.multiHostMgr.ListDomains(r.Context()) - if err != nil { - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("list vms (multi-host): %w", err)) - return - } - - vms := make([]vmInfo, 0, len(result.Domains)) - for _, d := range result.Domains { - // Skip sandboxes (names starting with "sbx-") - if strings.HasPrefix(d.Name, "sbx-") { - continue - } - vms = append(vms, vmInfo{ - Name: d.Name, - UUID: d.UUID, - State: d.State.String(), - Persistent: d.Persistent, - DiskPath: d.DiskPath, - HostName: d.HostName, - HostAddress: d.HostAddress, - }) - } - - var hostErrors []hostError - for _, he := range result.HostErrors { - hostErrors = append(hostErrors, hostError{ - HostName: he.HostName, - HostAddress: he.HostAddress, - Error: he.Error, - }) - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, listVMsResponse{VMs: vms, HostErrors: hostErrors}) - return - } - - // Fall back to single-host mode - domains, err := s.domainMgr.ListDomains(r.Context()) - if err != nil { - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("list vms: %w", err)) - return - } - - vms := make([]vmInfo, 0, len(domains)) - for _, d := range domains { - // Skip sandboxes (names starting with "sbx-") - if strings.HasPrefix(d.Name, "sbx-") { - continue - } - vms = append(vms, vmInfo{ - Name: d.Name, - UUID: d.UUID, - State: d.State.String(), - Persistent: d.Persistent, - DiskPath: d.DiskPath, - }) - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, listVMsResponse{VMs: vms}) -} - -// @Summary List sandboxes -// @Description Lists all sandboxes with optional filtering by agent_id, job_id, base_image, state, or vm_name -// @Tags Sandbox -// @Accept json -// @Produce json -// @Param agent_id query string false "Filter by agent ID" -// @Param job_id query string false "Filter by job ID" -// @Param base_image query string false "Filter by base image" -// @Param state query string false "Filter by state (CREATED, STARTING, RUNNING, STOPPED, DESTROYED, ERROR)" -// @Param vm_name query string false "Filter by VM name" -// @Param limit query int false "Max results to return" -// @Param offset query int false "Number of results to skip" -// @Success 200 {object} listSandboxesResponse -// @Failure 500 {object} ErrorResponse -// @Id listSandboxes -// @Router /v1/sandboxes [get] -func (s *Server) handleListSandboxes(w http.ResponseWriter, r *http.Request) { - // Build filter from query params - filter := store.SandboxFilter{} - - if agentID := r.URL.Query().Get("agent_id"); agentID != "" { - filter.AgentID = &agentID - } - if jobID := r.URL.Query().Get("job_id"); jobID != "" { - filter.JobID = &jobID - } - if baseImage := r.URL.Query().Get("base_image"); baseImage != "" { - filter.BaseImage = &baseImage - } - if stateStr := r.URL.Query().Get("state"); stateStr != "" { - state := store.SandboxState(stateStr) - filter.State = &state - } - if vmName := r.URL.Query().Get("vm_name"); vmName != "" { - filter.VMName = &vmName - } - - // Build list options from query params - var opts *store.ListOptions - limitStr := r.URL.Query().Get("limit") - offsetStr := r.URL.Query().Get("offset") - if limitStr != "" || offsetStr != "" { - opts = &store.ListOptions{} - if limitStr != "" { - if _, err := fmt.Sscanf(limitStr, "%d", &opts.Limit); err != nil { - serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("invalid limit: %w", err)) - return - } - } - if offsetStr != "" { - if _, err := fmt.Sscanf(offsetStr, "%d", &opts.Offset); err != nil { - serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("invalid offset: %w", err)) - return - } - } - } - - sandboxes, err := s.vmSvc.GetSandboxes(r.Context(), filter, opts) - if err != nil { - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("list sandboxes: %w", err)) - return - } - - result := make([]sandboxInfo, 0, len(sandboxes)) - for _, sb := range sandboxes { - result = append(result, sandboxInfo{ - ID: sb.ID, - JobID: sb.JobID, - AgentID: sb.AgentID, - SandboxName: sb.SandboxName, - BaseImage: sb.BaseImage, - Network: sb.Network, - IPAddress: sb.IPAddress, - State: string(sb.State), - TTLSeconds: sb.TTLSeconds, - CreatedAt: sb.CreatedAt.Format("2006-01-02T15:04:05Z07:00"), - UpdatedAt: sb.UpdatedAt.Format("2006-01-02T15:04:05Z07:00"), - }) - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, listSandboxesResponse{ - Sandboxes: result, - Total: len(result), - }) -} - -type destroySandboxResponse struct { - State store.SandboxState `json:"state"` - BaseImage string `json:"base_image"` - SandboxName string `json:"sandbox_name"` - TTLSeconds *int `json:"ttl_seconds,omitempty"` -} - -// @Summary Destroy sandbox -// @Description Destroys the sandbox and cleans up resources -// @Tags Sandbox -// @Accept json -// @Produce json -// @Param id path string true "Sandbox ID" -// @Success 200 {object} destroySandboxResponse -// @Failure 400 {object} ErrorResponse -// @Failure 404 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Id destroySandbox -// @Router /v1/sandboxes/{id} [delete] -func (s *Server) handleDestroySandbox(w http.ResponseWriter, r *http.Request) { - id := chi.URLParam(r, "id") - if id == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("sandbox id is required")) - return - } - sb, err := s.vmSvc.DestroySandbox(r.Context(), id) - if err != nil { - if errors.Is(err, store.ErrNotFound) { - serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("sandbox not found: %s", id)) - return - } - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("destroy sandbox: %w", err)) - return - } - _ = serverJSON.RespondJSON(w, http.StatusOK, destroySandboxResponse{ - State: sb.State, - BaseImage: sb.BaseImage, - SandboxName: sb.SandboxName, - TTLSeconds: sb.TTLSeconds, - }) -} - -// --- Get Single Sandbox DTOs --- - -type getSandboxResponse struct { - Sandbox *store.Sandbox `json:"sandbox"` - Commands []*store.Command `json:"commands,omitempty"` -} - -// @Summary Get sandbox details -// @Description Returns detailed information about a specific sandbox including recent commands -// @Tags Sandbox -// @Accept json -// @Produce json -// @Param id path string true "Sandbox ID" -// @Param include_commands query bool false "Include command history" -// @Success 200 {object} getSandboxResponse -// @Failure 400 {object} ErrorResponse -// @Failure 404 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Id getSandbox -// @Router /v1/sandboxes/{id} [get] -func (s *Server) handleGetSandbox(w http.ResponseWriter, r *http.Request) { - id := chi.URLParam(r, "id") - if id == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("sandbox id is required")) - return - } - - sb, err := s.vmSvc.GetSandbox(r.Context(), id) - if err != nil { - if errors.Is(err, store.ErrNotFound) { - serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("sandbox not found: %s", id)) - return - } - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("get sandbox: %w", err)) - return - } - - resp := getSandboxResponse{Sandbox: sb} - - // Optionally include commands - if r.URL.Query().Get("include_commands") == "true" { - cmds, err := s.vmSvc.GetSandboxCommands(r.Context(), id, nil) - if err != nil && !errors.Is(err, store.ErrNotFound) { - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("get commands: %w", err)) - return - } - resp.Commands = cmds - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, resp) -} - -// --- List Sandbox Commands DTOs --- - -type listSandboxCommandsResponse struct { - Commands []*store.Command `json:"commands"` - Total int `json:"total"` -} - -// @Summary List sandbox commands -// @Description Returns all commands executed in the sandbox -// @Tags Sandbox -// @Accept json -// @Produce json -// @Param id path string true "Sandbox ID" -// @Param limit query int false "Max results to return" -// @Param offset query int false "Number of results to skip" -// @Success 200 {object} listSandboxCommandsResponse -// @Failure 400 {object} ErrorResponse -// @Failure 404 {object} ErrorResponse -// @Failure 500 {object} ErrorResponse -// @Id listSandboxCommands -// @Router /v1/sandboxes/{id}/commands [get] -func (s *Server) handleListSandboxCommands(w http.ResponseWriter, r *http.Request) { - id := chi.URLParam(r, "id") - if id == "" { - serverError.RespondError(w, http.StatusBadRequest, errors.New("sandbox id is required")) - return - } - - // Build list options from query params - var opts *store.ListOptions - limitStr := r.URL.Query().Get("limit") - offsetStr := r.URL.Query().Get("offset") - if limitStr != "" || offsetStr != "" { - opts = &store.ListOptions{} - if limitStr != "" { - if _, err := fmt.Sscanf(limitStr, "%d", &opts.Limit); err != nil { - serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("invalid limit: %w", err)) - return - } - } - if offsetStr != "" { - if _, err := fmt.Sscanf(offsetStr, "%d", &opts.Offset); err != nil { - serverError.RespondError(w, http.StatusBadRequest, fmt.Errorf("invalid offset: %w", err)) - return - } - } - } - - cmds, err := s.vmSvc.GetSandboxCommands(r.Context(), id, opts) - if err != nil { - if errors.Is(err, store.ErrNotFound) { - serverError.RespondError(w, http.StatusNotFound, fmt.Errorf("sandbox not found: %s", id)) - return - } - serverError.RespondError(w, http.StatusInternalServerError, fmt.Errorf("list commands: %w", err)) - return - } - - _ = serverJSON.RespondJSON(w, http.StatusOK, listSandboxCommandsResponse{ - Commands: cmds, - Total: len(cmds), - }) -} - -// --- Sandbox Stream WebSocket --- - -// StreamEvent represents a realtime event from the sandbox. -type StreamEvent struct { - Type string `json:"type"` // "command_start", "command_output", "command_end", "file_change", "heartbeat" - Timestamp string `json:"timestamp"` // RFC3339 timestamp - Data json.RawMessage `json:"data,omitempty"` // Event-specific payload - SandboxID string `json:"sandbox_id,omitempty"` // Sandbox ID for context -} - -// CommandStartEvent is sent when a command begins execution. -type CommandStartEvent struct { - CommandID string `json:"command_id"` - Command string `json:"command"` - WorkDir string `json:"work_dir,omitempty"` -} - -// CommandOutputEvent is sent for streaming command output. -type CommandOutputEvent struct { - CommandID string `json:"command_id"` - Output string `json:"output"` - IsStderr bool `json:"is_stderr"` -} - -// CommandEndEvent is sent when a command completes. -type CommandEndEvent struct { - CommandID string `json:"command_id"` - ExitCode int `json:"exit_code"` - Duration string `json:"duration"` -} - -// FileChangeEvent is sent when files are modified. -type FileChangeEvent struct { - Path string `json:"path"` - Operation string `json:"operation"` // "created", "modified", "deleted" -} - -var upgrader = websocket.Upgrader{ - ReadBufferSize: 1024, - WriteBufferSize: 1024, - CheckOrigin: func(r *http.Request) bool { - return true // Allow all origins; tighten in production - }, -} - -// @Summary Stream sandbox activity -// @Description Connects via WebSocket to stream realtime sandbox activity (commands, file changes) -// @Tags Sandbox -// @Param id path string true "Sandbox ID" -// @Success 101 {string} string "Switching Protocols - WebSocket connection established" -// @Failure 400 {string} string "Invalid sandbox ID" -// @Failure 404 {string} string "Sandbox not found" -// @Id streamSandboxActivity -// @Router /v1/sandboxes/{id}/stream [get] -func (s *Server) handleSandboxStream(w http.ResponseWriter, r *http.Request) { - id := chi.URLParam(r, "id") - if id == "" { - http.Error(w, "sandbox id is required", http.StatusBadRequest) - return - } - - // Verify sandbox exists - sb, err := s.vmSvc.GetSandbox(r.Context(), id) - if err != nil { - if errors.Is(err, store.ErrNotFound) { - http.Error(w, "sandbox not found", http.StatusNotFound) - return - } - http.Error(w, "internal error", http.StatusInternalServerError) - return - } - - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - return - } - defer func() { _ = conn.Close() }() - - // Set a reasonable deadline - if err := conn.SetWriteDeadline(time.Now().Add(10 * time.Minute)); err != nil { - return - } - - // Send initial sandbox state - initialData, _ := json.Marshal(map[string]interface{}{ - "sandbox_id": sb.ID, - "sandbox_name": sb.SandboxName, - "state": sb.State, - "ip_address": sb.IPAddress, - }) - initialEvent := StreamEvent{ - Type: "connected", - Timestamp: time.Now().UTC().Format(time.RFC3339), - Data: initialData, - SandboxID: sb.ID, - } - if err := conn.WriteJSON(initialEvent); err != nil { - return - } - - // Send existing commands - cmds, _ := s.vmSvc.GetSandboxCommands(r.Context(), id, &store.ListOptions{Limit: 50}) - for _, cmd := range cmds { - cmdData, _ := json.Marshal(map[string]interface{}{ - "command_id": cmd.ID, - "command": cmd.Command, - "stdout": cmd.Stdout, - "stderr": cmd.Stderr, - "exit_code": cmd.ExitCode, - "started_at": cmd.StartedAt.Format(time.RFC3339), - "ended_at": cmd.EndedAt.Format(time.RFC3339), - }) - cmdEvent := StreamEvent{ - Type: "command_history", - Timestamp: cmd.EndedAt.Format(time.RFC3339), - Data: cmdData, - SandboxID: sb.ID, - } - if err := conn.WriteJSON(cmdEvent); err != nil { - return - } - } - - // Keep connection alive with heartbeats and poll for new commands - ticker := time.NewTicker(5 * time.Second) - defer ticker.Stop() - - lastCommandCount := len(cmds) - - for { - select { - case <-r.Context().Done(): - return - case <-ticker.C: - // Refresh deadline - if err := conn.SetWriteDeadline(time.Now().Add(10 * time.Minute)); err != nil { - return - } - - // Check for new commands - newCmds, _ := s.vmSvc.GetSandboxCommands(r.Context(), id, &store.ListOptions{Limit: 50}) - if len(newCmds) > lastCommandCount { - // Send new commands - for i := lastCommandCount; i < len(newCmds); i++ { - cmd := newCmds[i] - cmdData, _ := json.Marshal(map[string]interface{}{ - "command_id": cmd.ID, - "command": cmd.Command, - "stdout": cmd.Stdout, - "stderr": cmd.Stderr, - "exit_code": cmd.ExitCode, - "started_at": cmd.StartedAt.Format(time.RFC3339), - "ended_at": cmd.EndedAt.Format(time.RFC3339), - }) - cmdEvent := StreamEvent{ - Type: "command_new", - Timestamp: cmd.EndedAt.Format(time.RFC3339), - Data: cmdData, - SandboxID: sb.ID, - } - if err := conn.WriteJSON(cmdEvent); err != nil { - return - } - } - lastCommandCount = len(newCmds) - } - - // Send heartbeat - heartbeat := StreamEvent{ - Type: "heartbeat", - Timestamp: time.Now().UTC().Format(time.RFC3339), - SandboxID: sb.ID, - } - if err := conn.WriteJSON(heartbeat); err != nil { - return - } - } - } -} diff --git a/fluid-remote/internal/rest/server_test.go b/fluid-remote/internal/rest/server_test.go deleted file mode 100755 index ca1830a2..00000000 --- a/fluid-remote/internal/rest/server_test.go +++ /dev/null @@ -1,535 +0,0 @@ -package rest - -import ( - "context" - "encoding/json" - "errors" - "net/http" - "net/http/httptest" - "testing" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/store" -) - -// mockVMService implements the minimal interface needed for testing -type mockVMService struct { - destroySandboxFn func(ctx context.Context, sandboxID string) (*store.Sandbox, error) -} - -func (m *mockVMService) DestroySandbox(ctx context.Context, sandboxID string) (*store.Sandbox, error) { - if m.destroySandboxFn != nil { - return m.destroySandboxFn(ctx, sandboxID) - } - return &store.Sandbox{ - State: store.SandboxStateDestroyed, - BaseImage: "test-image.qcow2", - SandboxName: "test-sandbox", - }, nil -} - -func TestDestroySandbox_NotFound(t *testing.T) { - // Create a mock service that returns ErrNotFound - mockSvc := &mockVMService{ - destroySandboxFn: func(ctx context.Context, sandboxID string) (*store.Sandbox, error) { - return nil, store.ErrNotFound - }, - } - - // Create a request - req := httptest.NewRequest(http.MethodDelete, "/v1/sandboxes/nonexistent-id", nil) - rec := httptest.NewRecorder() - - // We need to test the error handling logic directly - // Simulate what the handler does - id := "nonexistent-id" - sb, err := mockSvc.DestroySandbox(req.Context(), id) - - if err != nil { - if errors.Is(err, store.ErrNotFound) { - rec.WriteHeader(http.StatusNotFound) - } else { - rec.WriteHeader(http.StatusInternalServerError) - } - } else { - rec.WriteHeader(http.StatusOK) - } - - // Verify the response - if rec.Code != http.StatusNotFound { - t.Errorf("expected status %d, got %d", http.StatusNotFound, rec.Code) - } - - // sb should be nil for not found - if sb != nil { - t.Error("expected sandbox to be nil for not found error") - } -} - -func TestDestroySandbox_Success(t *testing.T) { - ttl := 3600 - // Create a mock service that returns success with sandbox info - mockSvc := &mockVMService{ - destroySandboxFn: func(ctx context.Context, sandboxID string) (*store.Sandbox, error) { - return &store.Sandbox{ - State: store.SandboxStateDestroyed, - BaseImage: "ubuntu-22.04.qcow2", - SandboxName: "sandbox-test-123", - TTLSeconds: &ttl, - }, nil - }, - } - - // Create a request - req := httptest.NewRequest(http.MethodDelete, "/v1/sandboxes/test-sandbox-id", nil) - rec := httptest.NewRecorder() - - // Simulate what the handler does - id := "test-sandbox-id" - sb, err := mockSvc.DestroySandbox(req.Context(), id) - - if err != nil { - if errors.Is(err, store.ErrNotFound) { - rec.WriteHeader(http.StatusNotFound) - } else { - rec.WriteHeader(http.StatusInternalServerError) - } - } else { - rec.WriteHeader(http.StatusOK) - } - - // Verify the response - if rec.Code != http.StatusOK { - t.Errorf("expected status %d, got %d", http.StatusOK, rec.Code) - } - - // Verify sandbox info is returned - if sb == nil { - t.Fatal("expected sandbox to be returned") - } - if sb.State != store.SandboxStateDestroyed { - t.Errorf("expected state %s, got %s", store.SandboxStateDestroyed, sb.State) - } - if sb.BaseImage != "ubuntu-22.04.qcow2" { - t.Errorf("expected base_image %s, got %s", "ubuntu-22.04.qcow2", sb.BaseImage) - } - if sb.SandboxName != "sandbox-test-123" { - t.Errorf("expected sandbox_name %s, got %s", "sandbox-test-123", sb.SandboxName) - } - if sb.TTLSeconds == nil || *sb.TTLSeconds != 3600 { - t.Errorf("expected ttl_seconds %d, got %v", 3600, sb.TTLSeconds) - } -} - -func TestDestroySandbox_InternalError(t *testing.T) { - // Create a mock service that returns an internal error - mockSvc := &mockVMService{ - destroySandboxFn: func(ctx context.Context, sandboxID string) (*store.Sandbox, error) { - return nil, errors.New("some internal error") - }, - } - - // Create a request - req := httptest.NewRequest(http.MethodDelete, "/v1/sandboxes/test-sandbox-id", nil) - rec := httptest.NewRecorder() - - // Simulate what the handler does - id := "test-sandbox-id" - _, err := mockSvc.DestroySandbox(req.Context(), id) - - if err != nil { - if errors.Is(err, store.ErrNotFound) { - rec.WriteHeader(http.StatusNotFound) - } else { - rec.WriteHeader(http.StatusInternalServerError) - } - } else { - rec.WriteHeader(http.StatusOK) - } - - // Verify the response - if rec.Code != http.StatusInternalServerError { - t.Errorf("expected status %d, got %d", http.StatusInternalServerError, rec.Code) - } -} - -func TestErrorResponseJSON(t *testing.T) { - // Test that error response can be properly marshaled - errResp := ErrorResponse{ - Error: "sandbox not found: test-id", - Code: 404, - } - - data, err := json.Marshal(errResp) - if err != nil { - t.Fatalf("failed to marshal error response: %v", err) - } - - var decoded ErrorResponse - if err := json.Unmarshal(data, &decoded); err != nil { - t.Fatalf("failed to unmarshal error response: %v", err) - } - - if decoded.Error != errResp.Error { - t.Errorf("expected error %q, got %q", errResp.Error, decoded.Error) - } - - if decoded.Code != errResp.Code { - t.Errorf("expected code %d, got %d", errResp.Code, decoded.Code) - } -} - -func TestDestroySandboxResponseJSON(t *testing.T) { - ttl := 7200 - // Test that destroy sandbox response can be properly marshaled - resp := destroySandboxResponse{ - State: store.SandboxStateDestroyed, - BaseImage: "centos-9.qcow2", - SandboxName: "my-sandbox", - TTLSeconds: &ttl, - } - - data, err := json.Marshal(resp) - if err != nil { - t.Fatalf("failed to marshal destroy sandbox response: %v", err) - } - - var decoded destroySandboxResponse - if err := json.Unmarshal(data, &decoded); err != nil { - t.Fatalf("failed to unmarshal destroy sandbox response: %v", err) - } - - if decoded.State != resp.State { - t.Errorf("expected state %q, got %q", resp.State, decoded.State) - } - - if decoded.BaseImage != resp.BaseImage { - t.Errorf("expected base_image %q, got %q", resp.BaseImage, decoded.BaseImage) - } - - if decoded.SandboxName != resp.SandboxName { - t.Errorf("expected sandbox_name %q, got %q", resp.SandboxName, decoded.SandboxName) - } - - if decoded.TTLSeconds == nil || *decoded.TTLSeconds != *resp.TTLSeconds { - t.Errorf("expected ttl_seconds %d, got %v", *resp.TTLSeconds, decoded.TTLSeconds) - } -} - -func TestDestroySandboxResponseJSON_NoTTL(t *testing.T) { - // Test that destroy sandbox response without TTL omits the field - resp := destroySandboxResponse{ - State: store.SandboxStateDestroyed, - BaseImage: "ubuntu-22.04.qcow2", - SandboxName: "ephemeral-sandbox", - TTLSeconds: nil, - } - - data, err := json.Marshal(resp) - if err != nil { - t.Fatalf("failed to marshal destroy sandbox response: %v", err) - } - - // Verify TTL is omitted from JSON - var rawMap map[string]interface{} - if err := json.Unmarshal(data, &rawMap); err != nil { - t.Fatalf("failed to unmarshal to map: %v", err) - } - - if _, exists := rawMap["ttl_seconds"]; exists { - t.Error("expected ttl_seconds to be omitted when nil") - } -} - -func TestCreateSandboxRequestJSON(t *testing.T) { - ttl := 3600 - // Test that createSandboxRequest can be properly marshaled/unmarshaled with new fields - req := createSandboxRequest{ - SourceVMName: "test-vm", - AgentID: "agent-123", - CPU: 4, - MemoryMB: 4096, - TTLSeconds: &ttl, - AutoStart: true, - WaitForIP: true, - } - - data, err := json.Marshal(req) - if err != nil { - t.Fatalf("failed to marshal create sandbox request: %v", err) - } - - var decoded createSandboxRequest - if err := json.Unmarshal(data, &decoded); err != nil { - t.Fatalf("failed to unmarshal create sandbox request: %v", err) - } - - if decoded.SourceVMName != req.SourceVMName { - t.Errorf("expected source_vm_name %q, got %q", req.SourceVMName, decoded.SourceVMName) - } - if decoded.AgentID != req.AgentID { - t.Errorf("expected agent_id %q, got %q", req.AgentID, decoded.AgentID) - } - if decoded.TTLSeconds == nil || *decoded.TTLSeconds != *req.TTLSeconds { - t.Errorf("expected ttl_seconds %d, got %v", *req.TTLSeconds, decoded.TTLSeconds) - } - if decoded.AutoStart != req.AutoStart { - t.Errorf("expected auto_start %v, got %v", req.AutoStart, decoded.AutoStart) - } - if decoded.WaitForIP != req.WaitForIP { - t.Errorf("expected wait_for_ip %v, got %v", req.WaitForIP, decoded.WaitForIP) - } -} - -func TestCreateSandboxRequestJSON_MinimalFields(t *testing.T) { - // Test that only required fields are needed - jsonData := `{"source_vm_name":"test-vm","agent_id":"agent-123"}` - - var decoded createSandboxRequest - if err := json.Unmarshal([]byte(jsonData), &decoded); err != nil { - t.Fatalf("failed to unmarshal minimal create sandbox request: %v", err) - } - - if decoded.SourceVMName != "test-vm" { - t.Errorf("expected source_vm_name %q, got %q", "test-vm", decoded.SourceVMName) - } - if decoded.AgentID != "agent-123" { - t.Errorf("expected agent_id %q, got %q", "agent-123", decoded.AgentID) - } - if decoded.TTLSeconds != nil { - t.Errorf("expected ttl_seconds to be nil, got %v", decoded.TTLSeconds) - } - if decoded.AutoStart != false { - t.Errorf("expected auto_start to be false, got %v", decoded.AutoStart) - } - if decoded.WaitForIP != false { - t.Errorf("expected wait_for_ip to be false, got %v", decoded.WaitForIP) - } -} - -func TestCreateSandboxResponseJSON_WithIPAddress(t *testing.T) { - // Test that createSandboxResponse includes ip_address when set - ip := "192.168.1.100" - resp := createSandboxResponse{ - Sandbox: &store.Sandbox{ - ID: "SBX-123", - AgentID: "agent-123", - SandboxName: "my-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, - IPAddress: ip, - } - - data, err := json.Marshal(resp) - if err != nil { - t.Fatalf("failed to marshal create sandbox response: %v", err) - } - - var rawMap map[string]interface{} - if err := json.Unmarshal(data, &rawMap); err != nil { - t.Fatalf("failed to unmarshal to map: %v", err) - } - - if rawMap["ip_address"] != ip { - t.Errorf("expected ip_address %q, got %v", ip, rawMap["ip_address"]) - } -} - -func TestCreateSandboxResponseJSON_NoIPAddress(t *testing.T) { - // Test that createSandboxResponse omits ip_address when empty - resp := createSandboxResponse{ - Sandbox: &store.Sandbox{ - ID: "SBX-123", - AgentID: "agent-123", - SandboxName: "my-sandbox", - State: store.SandboxStateCreated, - }, - IPAddress: "", - } - - data, err := json.Marshal(resp) - if err != nil { - t.Fatalf("failed to marshal create sandbox response: %v", err) - } - - var rawMap map[string]interface{} - if err := json.Unmarshal(data, &rawMap); err != nil { - t.Fatalf("failed to unmarshal to map: %v", err) - } - - // ip_address should be omitted when empty due to omitempty tag - if ipVal, exists := rawMap["ip_address"]; exists && ipVal != "" { - t.Errorf("expected ip_address to be omitted or empty, got %v", ipVal) - } -} - -func TestGetSandboxResponseJSON(t *testing.T) { - // Test that getSandboxResponse can be properly marshaled - ip := "192.168.1.50" - resp := getSandboxResponse{ - Sandbox: &store.Sandbox{ - ID: "SBX-456", - JobID: "JOB-123", - AgentID: "agent-456", - SandboxName: "test-sandbox", - BaseImage: "ubuntu-22.04.qcow2", - Network: "default", - IPAddress: &ip, - State: store.SandboxStateRunning, - }, - Commands: []*store.Command{ - { - ID: "CMD-001", - SandboxID: "SBX-456", - Command: "echo hello", - Stdout: "hello\n", - Stderr: "", - ExitCode: 0, - }, - }, - } - - data, err := json.Marshal(resp) - if err != nil { - t.Fatalf("failed to marshal get sandbox response: %v", err) - } - - var decoded getSandboxResponse - if err := json.Unmarshal(data, &decoded); err != nil { - t.Fatalf("failed to unmarshal get sandbox response: %v", err) - } - - if decoded.Sandbox == nil { - t.Fatal("expected sandbox to be present") - } - if decoded.Sandbox.ID != resp.Sandbox.ID { - t.Errorf("expected sandbox ID %q, got %q", resp.Sandbox.ID, decoded.Sandbox.ID) - } - if len(decoded.Commands) != 1 { - t.Errorf("expected 1 command, got %d", len(decoded.Commands)) - } - if decoded.Commands[0].Command != "echo hello" { - t.Errorf("expected command %q, got %q", "echo hello", decoded.Commands[0].Command) - } -} - -func TestGetSandboxResponseJSON_NoCommands(t *testing.T) { - // Test that getSandboxResponse omits commands when nil - resp := getSandboxResponse{ - Sandbox: &store.Sandbox{ - ID: "SBX-789", - SandboxName: "empty-sandbox", - State: store.SandboxStateCreated, - }, - Commands: nil, - } - - data, err := json.Marshal(resp) - if err != nil { - t.Fatalf("failed to marshal get sandbox response: %v", err) - } - - var rawMap map[string]interface{} - if err := json.Unmarshal(data, &rawMap); err != nil { - t.Fatalf("failed to unmarshal to map: %v", err) - } - - // commands should be omitted when nil due to omitempty tag - if _, exists := rawMap["commands"]; exists { - t.Error("expected commands to be omitted when nil") - } -} - -func TestListSandboxCommandsResponseJSON(t *testing.T) { - // Test that listSandboxCommandsResponse can be properly marshaled - resp := listSandboxCommandsResponse{ - Commands: []*store.Command{ - { - ID: "CMD-001", - SandboxID: "SBX-123", - Command: "ls -la", - Stdout: "total 0\n", - ExitCode: 0, - }, - { - ID: "CMD-002", - SandboxID: "SBX-123", - Command: "pwd", - Stdout: "/home/user\n", - ExitCode: 0, - }, - }, - Total: 2, - } - - data, err := json.Marshal(resp) - if err != nil { - t.Fatalf("failed to marshal list sandbox commands response: %v", err) - } - - var decoded listSandboxCommandsResponse - if err := json.Unmarshal(data, &decoded); err != nil { - t.Fatalf("failed to unmarshal list sandbox commands response: %v", err) - } - - if decoded.Total != 2 { - t.Errorf("expected total %d, got %d", 2, decoded.Total) - } - if len(decoded.Commands) != 2 { - t.Errorf("expected 2 commands, got %d", len(decoded.Commands)) - } - if decoded.Commands[0].Command != "ls -la" { - t.Errorf("expected command %q, got %q", "ls -la", decoded.Commands[0].Command) - } -} - -func TestStreamEventJSON(t *testing.T) { - // Test that StreamEvent can be properly marshaled - event := StreamEvent{ - Type: "command_new", - Timestamp: "2024-01-15T10:30:00Z", - Data: json.RawMessage(`{"command_id":"CMD-001","command":"echo test"}`), - SandboxID: "SBX-123", - } - - data, err := json.Marshal(event) - if err != nil { - t.Fatalf("failed to marshal stream event: %v", err) - } - - var decoded StreamEvent - if err := json.Unmarshal(data, &decoded); err != nil { - t.Fatalf("failed to unmarshal stream event: %v", err) - } - - if decoded.Type != "command_new" { - t.Errorf("expected type %q, got %q", "command_new", decoded.Type) - } - if decoded.SandboxID != "SBX-123" { - t.Errorf("expected sandbox_id %q, got %q", "SBX-123", decoded.SandboxID) - } -} - -func TestStreamEventJSON_Heartbeat(t *testing.T) { - // Test heartbeat event without data - event := StreamEvent{ - Type: "heartbeat", - Timestamp: "2024-01-15T10:30:00Z", - SandboxID: "SBX-123", - } - - data, err := json.Marshal(event) - if err != nil { - t.Fatalf("failed to marshal heartbeat event: %v", err) - } - - var rawMap map[string]interface{} - if err := json.Unmarshal(data, &rawMap); err != nil { - t.Fatalf("failed to unmarshal to map: %v", err) - } - - // data should be omitted for heartbeat due to omitempty tag - if _, exists := rawMap["data"]; exists { - t.Error("expected data to be omitted for heartbeat event") - } -} diff --git a/fluid-remote/internal/sshca/access.go b/fluid-remote/internal/sshca/access.go deleted file mode 100755 index be1860ef..00000000 --- a/fluid-remote/internal/sshca/access.go +++ /dev/null @@ -1,519 +0,0 @@ -// Package sshca provides SSH Certificate Authority management for ephemeral sandbox access. -package sshca - -import ( - "context" - "crypto/sha256" - "encoding/base64" - "fmt" - "strings" - "sync" - "time" - - "github.com/google/uuid" -) - -// AccessService orchestrates SSH certificate-based access to sandboxes. -// It handles certificate issuance, session tracking, and cleanup. -type AccessService struct { - ca *CA - store CertificateStore - vmLookup VMInfoProvider - timeNowFn func() time.Time - mu sync.RWMutex - - // Configuration - defaultTTL time.Duration - maxTTL time.Duration - sshPort int - username string -} - -// VMInfoProvider defines the interface for looking up VM/sandbox information. -type VMInfoProvider interface { - // GetSandboxIP returns the IP address of a sandbox. - GetSandboxIP(ctx context.Context, sandboxID string) (string, error) - - // GetSandboxVMName returns the VM name for a sandbox. - GetSandboxVMName(ctx context.Context, sandboxID string) (string, error) - - // IsSandboxRunning checks if the sandbox is in a running state. - IsSandboxRunning(ctx context.Context, sandboxID string) (bool, error) -} - -// AccessServiceConfig configures the access service. -type AccessServiceConfig struct { - // DefaultTTL is the default certificate lifetime. - DefaultTTL time.Duration - - // MaxTTL is the maximum allowed certificate lifetime. - MaxTTL time.Duration - - // SSHPort is the SSH port on VMs (default 22). - SSHPort int - - // Username is the SSH username (default "sandbox"). - Username string -} - -// DefaultAccessServiceConfig returns sensible defaults. -func DefaultAccessServiceConfig() AccessServiceConfig { - return AccessServiceConfig{ - DefaultTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - SSHPort: 22, - Username: "sandbox", - } -} - -// AccessServiceOption configures the AccessService. -type AccessServiceOption func(*AccessService) - -// WithAccessTimeNow overrides the clock (useful for tests). -func WithAccessTimeNow(fn func() time.Time) AccessServiceOption { - return func(s *AccessService) { s.timeNowFn = fn } -} - -// NewAccessService creates a new access service. -func NewAccessService(ca *CA, store CertificateStore, vmLookup VMInfoProvider, cfg AccessServiceConfig, opts ...AccessServiceOption) *AccessService { - if cfg.DefaultTTL == 0 { - cfg.DefaultTTL = 5 * time.Minute - } - if cfg.MaxTTL == 0 { - cfg.MaxTTL = 10 * time.Minute - } - if cfg.SSHPort == 0 { - cfg.SSHPort = 22 - } - if cfg.Username == "" { - cfg.Username = "sandbox" - } - - s := &AccessService{ - ca: ca, - store: store, - vmLookup: vmLookup, - timeNowFn: time.Now, - defaultTTL: cfg.DefaultTTL, - maxTTL: cfg.MaxTTL, - sshPort: cfg.SSHPort, - username: cfg.Username, - } - - for _, opt := range opts { - opt(s) - } - - return s -} - -// RequestAccess issues a short-lived SSH certificate for sandbox access. -func (s *AccessService) RequestAccess(ctx context.Context, req *AccessRequest) (*AccessResponse, error) { - s.mu.Lock() - defer s.mu.Unlock() - - // Validate request - if req.SandboxID == "" { - return nil, fmt.Errorf("sandbox_id is required") - } - if req.UserID == "" { - return nil, fmt.Errorf("user_id is required") - } - if req.PublicKey == "" { - return nil, fmt.Errorf("public_key is required") - } - - // Determine TTL - ttl := time.Duration(req.TTLMinutes) * time.Minute - if ttl == 0 { - ttl = s.defaultTTL - } - if ttl < time.Minute { - ttl = time.Minute - } - if ttl > s.maxTTL { - ttl = s.maxTTL - } - - // Check if sandbox is running - running, err := s.vmLookup.IsSandboxRunning(ctx, req.SandboxID) - if err != nil { - return nil, fmt.Errorf("check sandbox status: %w", err) - } - if !running { - return nil, fmt.Errorf("sandbox %s is not running", req.SandboxID) - } - - // Get sandbox IP - vmIP, err := s.vmLookup.GetSandboxIP(ctx, req.SandboxID) - if err != nil { - return nil, fmt.Errorf("get sandbox IP: %w", err) - } - if vmIP == "" { - return nil, fmt.Errorf("sandbox %s has no IP address", req.SandboxID) - } - - // Get VM name for certificate identity - vmName, err := s.vmLookup.GetSandboxVMName(ctx, req.SandboxID) - if err != nil { - return nil, fmt.Errorf("get sandbox VM name: %w", err) - } - - now := s.timeNowFn() - if req.RequestTime.IsZero() { - req.RequestTime = now - } - - // Issue certificate - certReq := &CertificateRequest{ - UserID: req.UserID, - VMID: vmName, - SandboxID: req.SandboxID, - PublicKey: req.PublicKey, - TTL: ttl, - Principals: []string{s.username}, - SourceIP: req.SourceIP, - RequestTime: req.RequestTime, - } - - cert, err := s.ca.IssueCertificate(ctx, certReq) - if err != nil { - return nil, fmt.Errorf("issue certificate: %w", err) - } - - // Calculate public key fingerprint - fingerprint := s.calculateFingerprint(req.PublicKey) - - // Persist certificate record - record := &CertificateRecord{ - ID: cert.ID, - SandboxID: req.SandboxID, - UserID: req.UserID, - VMID: vmName, - Identity: cert.Identity, - SerialNumber: cert.SerialNumber, - Principals: cert.Principals, - PublicKeyFingerprint: fingerprint, - ValidAfter: cert.ValidAfter, - ValidBefore: cert.ValidBefore, - SourceIP: req.SourceIP, - Status: CertStatusActive, - IssuedAt: now, - } - - if s.store != nil { - if err := s.store.CreateCertificate(ctx, record); err != nil { - return nil, fmt.Errorf("persist certificate: %w", err) - } - } - - // Build response - validUntil := cert.ValidBefore - ttlSeconds := int(validUntil.Sub(now).Seconds()) - - connectCmd := fmt.Sprintf("ssh -i /path/to/key -o CertificateFile=/path/to/key-cert.pub -o StrictHostKeyChecking=no %s@%s", - s.username, vmIP) - - return &AccessResponse{ - CertificateID: cert.ID, - Certificate: cert.Certificate, - VMIPAddress: vmIP, - SSHPort: s.sshPort, - Username: s.username, - ValidUntil: validUntil, - TTLSeconds: ttlSeconds, - ConnectCommand: connectCmd, - }, nil -} - -// RevokeAccess revokes a certificate, immediately terminating access. -func (s *AccessService) RevokeAccess(ctx context.Context, certificateID, reason string) error { - s.mu.Lock() - defer s.mu.Unlock() - - if s.store == nil { - return fmt.Errorf("no certificate store configured") - } - - // Get certificate - cert, err := s.store.GetCertificate(ctx, certificateID) - if err != nil { - return fmt.Errorf("get certificate: %w", err) - } - - if cert.Status == CertStatusRevoked { - return ErrCertAlreadyRevoked - } - - // Revoke certificate - if err := s.store.RevokeCertificate(ctx, certificateID, reason); err != nil { - return fmt.Errorf("revoke certificate: %w", err) - } - - // End any active sessions for this certificate - sessions, err := s.store.GetSessionsByCertificate(ctx, certificateID) - if err != nil { - return fmt.Errorf("get sessions: %w", err) - } - - for _, session := range sessions { - if session.Status == SessionStatusActive || session.Status == SessionStatusPending { - now := s.timeNowFn() - if err := s.store.EndSession(ctx, session.ID, now, "certificate revoked: "+reason); err != nil { - // Log but continue - _ = err - } - } - } - - return nil -} - -// RecordSessionStart records the start of an SSH session. -func (s *AccessService) RecordSessionStart(ctx context.Context, certificateID, sourceIP string) (string, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if s.store == nil { - return "", fmt.Errorf("no certificate store configured") - } - - // Get certificate - cert, err := s.store.GetCertificate(ctx, certificateID) - if err != nil { - return "", fmt.Errorf("get certificate: %w", err) - } - - // Validate certificate is still valid - if cert.Status != CertStatusActive { - return "", fmt.Errorf("certificate status is %s, not active", cert.Status) - } - if cert.IsExpired() { - return "", fmt.Errorf("certificate has expired") - } - - // Get VM IP - vmIP, err := s.vmLookup.GetSandboxIP(ctx, cert.SandboxID) - if err != nil { - vmIP = "" // Non-fatal - } - - // Create session record - sessionID := s.generateSessionID() - now := s.timeNowFn() - - session := &AccessSession{ - ID: sessionID, - CertificateID: certificateID, - SandboxID: cert.SandboxID, - UserID: cert.UserID, - VMID: cert.VMID, - VMIPAddress: vmIP, - SourceIP: sourceIP, - Status: SessionStatusActive, - StartedAt: now, - } - - if err := s.store.CreateSession(ctx, session); err != nil { - return "", fmt.Errorf("create session: %w", err) - } - - // Update certificate last used - if err := s.store.UpdateCertificateLastUsed(ctx, certificateID, now); err != nil { - // Non-fatal, just ignore - _ = err - } - - return sessionID, nil -} - -// RecordSessionEnd records the end of an SSH session. -func (s *AccessService) RecordSessionEnd(ctx context.Context, sessionID, reason string) error { - s.mu.Lock() - defer s.mu.Unlock() - - if s.store == nil { - return fmt.Errorf("no certificate store configured") - } - - now := s.timeNowFn() - return s.store.EndSession(ctx, sessionID, now, reason) -} - -// GetCertificate retrieves certificate information. -func (s *AccessService) GetCertificate(ctx context.Context, id string) (*CertificateRecord, error) { - if s.store == nil { - return nil, fmt.Errorf("no certificate store configured") - } - return s.store.GetCertificate(ctx, id) -} - -// ListCertificates lists certificates with optional filtering. -func (s *AccessService) ListCertificates(ctx context.Context, filter CertificateFilter, opts *ListOptions) ([]*CertificateRecord, error) { - if s.store == nil { - return nil, fmt.Errorf("no certificate store configured") - } - return s.store.ListCertificates(ctx, filter, opts) -} - -// GetActiveCertificatesForSandbox returns all active certificates for a sandbox. -func (s *AccessService) GetActiveCertificatesForSandbox(ctx context.Context, sandboxID string) ([]*CertificateRecord, error) { - if s.store == nil { - return nil, fmt.Errorf("no certificate store configured") - } - filter := CertificateFilter{ - SandboxID: &sandboxID, - ActiveOnly: true, - } - return s.store.ListCertificates(ctx, filter, nil) -} - -// GetActiveSessionsForSandbox returns all active sessions for a sandbox. -func (s *AccessService) GetActiveSessionsForSandbox(ctx context.Context, sandboxID string) ([]*AccessSession, error) { - if s.store == nil { - return nil, fmt.Errorf("no certificate store configured") - } - filter := SessionFilter{ - SandboxID: &sandboxID, - ActiveOnly: true, - } - return s.store.ListSessions(ctx, filter, nil) -} - -// CleanupExpiredCertificates marks expired certificates and ends associated sessions. -func (s *AccessService) CleanupExpiredCertificates(ctx context.Context) (int, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if s.store == nil { - return 0, fmt.Errorf("no certificate store configured") - } - - // Mark expired certificates - count, err := s.store.ExpireCertificates(ctx) - if err != nil { - return 0, fmt.Errorf("expire certificates: %w", err) - } - - // End sessions for expired certificates - filter := SessionFilter{ - ActiveOnly: true, - } - sessions, err := s.store.ListSessions(ctx, filter, nil) - if err != nil { - return count, fmt.Errorf("list sessions: %w", err) - } - - now := s.timeNowFn() - for _, session := range sessions { - // Check if certificate is expired - cert, err := s.store.GetCertificate(ctx, session.CertificateID) - if err != nil { - continue - } - if cert.IsExpired() || cert.Status == CertStatusExpired { - if err := s.store.EndSession(ctx, session.ID, now, "certificate expired"); err != nil { - // Log but continue - _ = err - } - } - } - - return count, nil -} - -// RevokeAllForSandbox revokes all certificates for a sandbox. -// This is typically called when destroying a sandbox. -func (s *AccessService) RevokeAllForSandbox(ctx context.Context, sandboxID, reason string) error { - s.mu.Lock() - defer s.mu.Unlock() - - if s.store == nil { - return nil // No store configured, nothing to revoke - } - - // Get all active certificates for the sandbox - filter := CertificateFilter{ - SandboxID: &sandboxID, - ActiveOnly: true, - } - certs, err := s.store.ListCertificates(ctx, filter, nil) - if err != nil { - return fmt.Errorf("list certificates: %w", err) - } - - // Revoke each certificate - for _, cert := range certs { - if err := s.store.RevokeCertificate(ctx, cert.ID, reason); err != nil { - // Log but continue - _ = err - } - } - - // End all active sessions - sessionFilter := SessionFilter{ - SandboxID: &sandboxID, - ActiveOnly: true, - } - sessions, err := s.store.ListSessions(ctx, sessionFilter, nil) - if err != nil { - return fmt.Errorf("list sessions: %w", err) - } - - now := s.timeNowFn() - for _, session := range sessions { - if err := s.store.EndSession(ctx, session.ID, now, reason); err != nil { - // Log but continue - _ = err - } - } - - return nil -} - -// GetCAPublicKey returns the CA public key for VM configuration. -func (s *AccessService) GetCAPublicKey() (string, error) { - return s.ca.GetPublicKey() -} - -// calculateFingerprint computes the SHA256 fingerprint of a public key. -func (s *AccessService) calculateFingerprint(publicKey string) string { - parts := strings.SplitN(publicKey, " ", 3) - if len(parts) < 2 { - return "" - } - - keyData, err := base64.StdEncoding.DecodeString(parts[1]) - if err != nil { - return "" - } - - hash := sha256.Sum256(keyData) - return fmt.Sprintf("SHA256:%s", base64.StdEncoding.EncodeToString(hash[:])) -} - -// generateSessionID generates a unique session identifier. -func (s *AccessService) generateSessionID() string { - id := uuid.NewString() - return fmt.Sprintf("SESS-%s", strings.ToUpper(id[:8])) -} - -// StartCleanupRoutine starts a background goroutine to periodically clean up expired certificates. -func (s *AccessService) StartCleanupRoutine(ctx context.Context, interval time.Duration) { - go func() { - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if _, err := s.CleanupExpiredCertificates(ctx); err != nil { - // Log error but continue - _ = err - } - } - } - }() -} diff --git a/fluid-remote/internal/sshca/ca.go b/fluid-remote/internal/sshca/ca.go deleted file mode 100755 index 74ebb017..00000000 --- a/fluid-remote/internal/sshca/ca.go +++ /dev/null @@ -1,571 +0,0 @@ -// Package sshca provides SSH Certificate Authority management for ephemeral sandbox access. -// -// This package handles: -// - SSH CA key generation and storage -// - Short-lived SSH certificate issuance -// - Certificate validation and metadata -// -// Certificates are designed to be ephemeral (1-10 minutes TTL) and are -// used to provide secure, auditable access to sandbox VMs without requiring -// any persistent credentials on the VM side. -package sshca - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/base64" - "errors" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - "sync" - "time" -) - -// Common errors -var ( - ErrCANotInitialized = errors.New("sshca: CA not initialized") - ErrInvalidPublicKey = errors.New("sshca: invalid public key") - ErrInvalidTTL = errors.New("sshca: TTL must be between 1 and 10 minutes") - ErrCertGenFailed = errors.New("sshca: certificate generation failed") - ErrCAKeyNotFound = errors.New("sshca: CA private key not found") - ErrCAKeyPermissions = errors.New("sshca: CA private key has insecure permissions") - ErrSSHKeygenNotFound = errors.New("sshca: ssh-keygen binary not found") - ErrInvalidPrincipal = errors.New("sshca: invalid principal") - ErrInvalidCertOptions = errors.New("sshca: invalid certificate options") -) - -// Config holds configuration for the SSH CA. -type Config struct { - // CAKeyPath is the path to the CA private key file. - // This key is used to sign all user certificates. - CAKeyPath string - - // CAPubKeyPath is the path to the CA public key file. - // This is baked into VM images for certificate verification. - CAPubKeyPath string - - // WorkDir is the directory for temporary certificate operations. - // Certificates are generated here before being returned to callers. - WorkDir string - - // DefaultTTL is the default certificate lifetime if not specified. - // Must be between 1 and 10 minutes. - DefaultTTL time.Duration - - // MaxTTL is the maximum allowed certificate lifetime. - // Requests for longer TTLs will be capped to this value. - MaxTTL time.Duration - - // DefaultPrincipals are the default principals added to certificates - // if none are specified. Usually ["sandbox"]. - DefaultPrincipals []string - - // SSHKeygenPath is the optional path to ssh-keygen binary. - // If empty, it will be looked up in PATH. - SSHKeygenPath string - - // EnforceKeyPermissions when true, validates CA key file permissions. - EnforceKeyPermissions bool -} - -// DefaultConfig returns a configuration with sensible defaults. -func DefaultConfig() Config { - return Config{ - CAKeyPath: "/etc/fluid-remote/ssh_ca", - CAPubKeyPath: "/etc/fluid-remote/ssh_ca.pub", - WorkDir: "/tmp/sshca", - DefaultTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - DefaultPrincipals: []string{"sandbox"}, - SSHKeygenPath: "", - EnforceKeyPermissions: true, - } -} - -// CertificateRequest contains all parameters needed to issue a certificate. -type CertificateRequest struct { - // UserID identifies the user requesting access. - // This is embedded in the certificate identity for audit purposes. - UserID string - - // VMID identifies the target VM/sandbox. - // This is embedded in the certificate identity for audit purposes. - VMID string - - // SandboxID is the internal sandbox identifier. - SandboxID string - - // PublicKey is the user's SSH public key to be certified. - // Must be in OpenSSH format (e.g., "ssh-ed25519 AAAA... comment"). - PublicKey string - - // TTL is the requested certificate lifetime. - // If zero, DefaultTTL is used. If greater than MaxTTL, it's capped. - TTL time.Duration - - // Principals are the allowed usernames for this certificate. - // If empty, DefaultPrincipals are used. - Principals []string - - // SourceIP is the IP address of the requester (for audit). - SourceIP string - - // RequestTime is when the request was made. - RequestTime time.Time -} - -// Certificate represents an issued SSH certificate. -type Certificate struct { - // ID is a unique identifier for this certificate. - ID string - - // Identity is the certificate identity string embedded in the cert. - // Format: "user:{UserID}-vm:{VMID}-sbx:{SandboxID}" - Identity string - - // Certificate is the OpenSSH certificate content. - // This is the content of the -cert.pub file. - Certificate string - - // SerialNumber is the certificate serial number. - SerialNumber uint64 - - // ValidAfter is when the certificate becomes valid. - ValidAfter time.Time - - // ValidBefore is when the certificate expires. - ValidBefore time.Time - - // Principals are the usernames allowed by this certificate. - Principals []string - - // CriticalOptions lists certificate critical options. - CriticalOptions map[string]string - - // Extensions lists certificate extensions. - Extensions []string - - // Request is the original request that created this certificate. - Request *CertificateRequest - - // IssuedAt is when the certificate was issued. - IssuedAt time.Time -} - -// CA manages SSH certificate authority operations. -type CA struct { - cfg Config - mu sync.RWMutex - serialNum uint64 - sshKeygen string - caPubKey string - timeNowFn func() time.Time - initialized bool -} - -// Option configures the CA during construction. -type Option func(*CA) - -// WithTimeNow overrides the clock (useful for tests). -func WithTimeNow(fn func() time.Time) Option { - return func(ca *CA) { ca.timeNowFn = fn } -} - -// NewCA creates a new SSH Certificate Authority manager. -func NewCA(cfg Config, opts ...Option) (*CA, error) { - ca := &CA{ - cfg: cfg, - serialNum: 0, - timeNowFn: time.Now, - } - - for _, opt := range opts { - opt(ca) - } - - // Locate ssh-keygen - sshKeygen := cfg.SSHKeygenPath - if sshKeygen == "" { - path, err := exec.LookPath("ssh-keygen") - if err != nil { - return nil, fmt.Errorf("%w: %v", ErrSSHKeygenNotFound, err) - } - sshKeygen = path - } - ca.sshKeygen = sshKeygen - - // Create work directory - if err := os.MkdirAll(cfg.WorkDir, 0o700); err != nil { - return nil, fmt.Errorf("create work directory: %w", err) - } - - return ca, nil -} - -// Initialize validates the CA configuration and loads the CA public key. -// This must be called before issuing certificates. -func (ca *CA) Initialize(ctx context.Context) error { - ca.mu.Lock() - defer ca.mu.Unlock() - - // Check CA private key exists - if _, err := os.Stat(ca.cfg.CAKeyPath); os.IsNotExist(err) { - return fmt.Errorf("%w: %s", ErrCAKeyNotFound, ca.cfg.CAKeyPath) - } - - // Validate CA key permissions - if ca.cfg.EnforceKeyPermissions { - info, err := os.Stat(ca.cfg.CAKeyPath) - if err != nil { - return fmt.Errorf("stat CA key: %w", err) - } - mode := info.Mode().Perm() - // Key should be readable only by owner (0600 or 0400) - if mode&0o077 != 0 { - return fmt.Errorf("%w: %s has mode %o, expected 0600 or 0400", - ErrCAKeyPermissions, ca.cfg.CAKeyPath, mode) - } - } - - // Load CA public key - pubKeyBytes, err := os.ReadFile(ca.cfg.CAPubKeyPath) - if err != nil { - return fmt.Errorf("read CA public key: %w", err) - } - ca.caPubKey = strings.TrimSpace(string(pubKeyBytes)) - - // Initialize serial number with random value - var serialBytes [8]byte - if _, err := rand.Read(serialBytes[:]); err != nil { - return fmt.Errorf("initialize serial: %w", err) - } - ca.serialNum = uint64(serialBytes[0])<<56 | - uint64(serialBytes[1])<<48 | - uint64(serialBytes[2])<<40 | - uint64(serialBytes[3])<<32 | - uint64(serialBytes[4])<<24 | - uint64(serialBytes[5])<<16 | - uint64(serialBytes[6])<<8 | - uint64(serialBytes[7]) - - ca.initialized = true - return nil -} - -// IssueCertificate generates a short-lived SSH certificate for the given request. -func (ca *CA) IssueCertificate(ctx context.Context, req *CertificateRequest) (*Certificate, error) { - ca.mu.Lock() - defer ca.mu.Unlock() - - if !ca.initialized { - return nil, ErrCANotInitialized - } - - // Validate request - if err := ca.validateRequest(req); err != nil { - return nil, err - } - - // Determine TTL - ttl := req.TTL - if ttl == 0 { - ttl = ca.cfg.DefaultTTL - } - if ttl < time.Minute { - return nil, fmt.Errorf("%w: minimum TTL is 1 minute", ErrInvalidTTL) - } - if ttl > ca.cfg.MaxTTL { - ttl = ca.cfg.MaxTTL - } - - // Determine principals - principals := req.Principals - if len(principals) == 0 { - principals = ca.cfg.DefaultPrincipals - } - - // Validate principals - for _, p := range principals { - if p == "" || strings.ContainsAny(p, " \t\n\r") { - return nil, fmt.Errorf("%w: %q", ErrInvalidPrincipal, p) - } - } - - // Generate unique certificate ID - certID := ca.generateCertID() - - // Build certificate identity - identity := fmt.Sprintf("user:%s-vm:%s-sbx:%s-cert:%s", - req.UserID, req.VMID, req.SandboxID, certID) - - // Increment serial number - ca.serialNum++ - serial := ca.serialNum - - // Calculate validity window - now := ca.timeNowFn() - validAfter := now.Add(-time.Minute) // Allow 1 minute clock skew - validBefore := now.Add(ttl) - - // Format validity for ssh-keygen - validityStr := fmt.Sprintf("+%dm", int(ttl.Minutes())) - - // Create temporary directory for this certificate - tempDir, err := os.MkdirTemp(ca.cfg.WorkDir, "cert-") - if err != nil { - return nil, fmt.Errorf("create temp dir: %w", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - // Write public key to temp file - pubKeyPath := filepath.Join(tempDir, "user_key.pub") - if err := os.WriteFile(pubKeyPath, []byte(req.PublicKey), 0o600); err != nil { - return nil, fmt.Errorf("write public key: %w", err) - } - - // Build ssh-keygen command - // ssh-keygen -s CA_KEY -I IDENTITY -n PRINCIPALS -V VALIDITY -z SERIAL -O OPTIONS KEY.pub - args := []string{ - "-s", ca.cfg.CAKeyPath, - "-I", identity, - "-n", strings.Join(principals, ","), - "-V", validityStr, - "-z", fmt.Sprintf("%d", serial), - // Security options - disable forwarding but allow PTY for tmux - "-O", "no-port-forwarding", - "-O", "no-agent-forwarding", - "-O", "no-X11-forwarding", - // Note: permit-pty is enabled by default, so we don't need to specify it - pubKeyPath, - } - - cmd := exec.CommandContext(ctx, ca.sshKeygen, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("%w: %v: %s", ErrCertGenFailed, err, stderr.String()) - } - - // Read generated certificate - certPath := filepath.Join(tempDir, "user_key-cert.pub") - certBytes, err := os.ReadFile(certPath) - if err != nil { - return nil, fmt.Errorf("read certificate: %w", err) - } - - cert := &Certificate{ - ID: certID, - Identity: identity, - Certificate: strings.TrimSpace(string(certBytes)), - SerialNumber: serial, - ValidAfter: validAfter, - ValidBefore: validBefore, - Principals: principals, - CriticalOptions: map[string]string{}, - Extensions: []string{ - "permit-pty", - }, - Request: req, - IssuedAt: now, - } - - return cert, nil -} - -// GetPublicKey returns the CA public key content. -// This is the key that should be baked into VM images. -func (ca *CA) GetPublicKey() (string, error) { - ca.mu.RLock() - defer ca.mu.RUnlock() - - if !ca.initialized { - return "", ErrCANotInitialized - } - return ca.caPubKey, nil -} - -// GenerateCA creates a new SSH CA key pair. -// This should typically only be called once during initial setup. -func GenerateCA(keyPath, comment string) error { - // Ensure directory exists - dir := filepath.Dir(keyPath) - if err := os.MkdirAll(dir, 0o700); err != nil { - return fmt.Errorf("create CA directory: %w", err) - } - - // Find ssh-keygen - sshKeygen, err := exec.LookPath("ssh-keygen") - if err != nil { - return fmt.Errorf("%w: %v", ErrSSHKeygenNotFound, err) - } - - // Generate Ed25519 key pair - args := []string{ - "-t", "ed25519", - "-f", keyPath, - "-N", "", // No passphrase - "-C", comment, - } - - cmd := exec.Command(sshKeygen, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("generate CA key: %v: %s", err, stderr.String()) - } - - // Set secure permissions on private key - if err := os.Chmod(keyPath, 0o600); err != nil { - return fmt.Errorf("set CA key permissions: %w", err) - } - - return nil -} - -// GenerateUserKeyPair generates a new SSH key pair for a user. -// Returns the private key, public key, and any error. -func GenerateUserKeyPair(comment string) (privateKey, publicKey string, err error) { - // Create temporary directory - tempDir, err := os.MkdirTemp("", "sshkey-") - if err != nil { - return "", "", fmt.Errorf("create temp dir: %w", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "user_key") - - // Find ssh-keygen - sshKeygen, err := exec.LookPath("ssh-keygen") - if err != nil { - return "", "", fmt.Errorf("%w: %v", ErrSSHKeygenNotFound, err) - } - - // Generate Ed25519 key pair - args := []string{ - "-t", "ed25519", - "-f", keyPath, - "-N", "", // No passphrase - "-C", comment, - } - - cmd := exec.Command(sshKeygen, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return "", "", fmt.Errorf("generate user key: %v: %s", err, stderr.String()) - } - - // Read keys - privKeyBytes, err := os.ReadFile(keyPath) - if err != nil { - return "", "", fmt.Errorf("read private key: %w", err) - } - - pubKeyBytes, err := os.ReadFile(keyPath + ".pub") - if err != nil { - return "", "", fmt.Errorf("read public key: %w", err) - } - - return string(privKeyBytes), strings.TrimSpace(string(pubKeyBytes)), nil -} - -// validateRequest validates a certificate request. -func (ca *CA) validateRequest(req *CertificateRequest) error { - if req.UserID == "" { - return fmt.Errorf("%w: UserID is required", ErrInvalidCertOptions) - } - if req.VMID == "" { - return fmt.Errorf("%w: VMID is required", ErrInvalidCertOptions) - } - if req.PublicKey == "" { - return fmt.Errorf("%w: PublicKey is required", ErrInvalidCertOptions) - } - - // Basic validation of public key format - parts := strings.SplitN(req.PublicKey, " ", 3) - if len(parts) < 2 { - return fmt.Errorf("%w: must be in OpenSSH format", ErrInvalidPublicKey) - } - - keyType := parts[0] - validTypes := []string{ - "ssh-rsa", "ssh-ed25519", "ecdsa-sha2-nistp256", - "ecdsa-sha2-nistp384", "ecdsa-sha2-nistp521", - } - found := false - for _, t := range validTypes { - if keyType == t { - found = true - break - } - } - if !found { - return fmt.Errorf("%w: unsupported key type %q", ErrInvalidPublicKey, keyType) - } - - // Validate base64 encoding of key - if _, err := base64.StdEncoding.DecodeString(parts[1]); err != nil { - return fmt.Errorf("%w: invalid base64 encoding", ErrInvalidPublicKey) - } - - return nil -} - -// generateCertID generates a unique certificate identifier. -func (ca *CA) generateCertID() string { - var b [16]byte - if _, err := rand.Read(b[:]); err != nil { - // Fallback to time-based ID if random fails (extremely unlikely) - return fmt.Sprintf("%x", time.Now().UnixNano()) - } - return fmt.Sprintf("%x", b[:8]) -} - -// CertInfo extracts information from a certificate for display/audit. -type CertInfo struct { - ID string - Identity string - Serial uint64 - ValidAfter time.Time - ValidBefore time.Time - Principals []string - Extensions []string - IsExpired bool - TimeToExpiry time.Duration -} - -// GetCertInfo parses certificate info for display purposes. -func (c *Certificate) GetCertInfo() *CertInfo { - now := time.Now() - return &CertInfo{ - ID: c.ID, - Identity: c.Identity, - Serial: c.SerialNumber, - ValidAfter: c.ValidAfter, - ValidBefore: c.ValidBefore, - Principals: c.Principals, - Extensions: c.Extensions, - IsExpired: now.After(c.ValidBefore), - TimeToExpiry: c.ValidBefore.Sub(now), - } -} - -// SSHConnectCommand returns the SSH command string for connecting with this certificate. -func (c *Certificate) SSHConnectCommand(privateKeyPath, certPath, vmIP string, port int) string { - if port == 0 { - port = 22 - } - principal := "sandbox" - if len(c.Principals) > 0 { - principal = c.Principals[0] - } - return fmt.Sprintf("ssh -i %s -o CertificateFile=%s -o StrictHostKeyChecking=no -p %d %s@%s", - privateKeyPath, certPath, port, principal, vmIP) -} diff --git a/fluid-remote/internal/sshca/ca_test.go b/fluid-remote/internal/sshca/ca_test.go deleted file mode 100755 index 8edde909..00000000 --- a/fluid-remote/internal/sshca/ca_test.go +++ /dev/null @@ -1,475 +0,0 @@ -package sshca - -import ( - "context" - "os" - "path/filepath" - "strings" - "testing" - "time" -) - -func TestGenerateCA(t *testing.T) { - // Create temp directory - tempDir, err := os.MkdirTemp("", "sshca-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "test_ca") - comment := "test-ssh-ca" - - // Generate CA - err = GenerateCA(keyPath, comment) - if err != nil { - t.Fatalf("GenerateCA failed: %v", err) - } - - // Check private key exists - if _, err := os.Stat(keyPath); os.IsNotExist(err) { - t.Error("private key was not created") - } - - // Check public key exists - pubKeyPath := keyPath + ".pub" - if _, err := os.Stat(pubKeyPath); os.IsNotExist(err) { - t.Error("public key was not created") - } - - // Check private key permissions - info, err := os.Stat(keyPath) - if err != nil { - t.Fatalf("failed to stat private key: %v", err) - } - if info.Mode().Perm() != 0o600 { - t.Errorf("private key has wrong permissions: %o, expected 0600", info.Mode().Perm()) - } - - // Check public key content - pubKeyBytes, err := os.ReadFile(pubKeyPath) - if err != nil { - t.Fatalf("failed to read public key: %v", err) - } - pubKey := string(pubKeyBytes) - if !strings.HasPrefix(pubKey, "ssh-ed25519 ") { - t.Errorf("public key has wrong format: %s", pubKey[:min(len(pubKey), 50)]) - } - if !strings.Contains(pubKey, comment) { - t.Errorf("public key does not contain comment: %s", pubKey) - } -} - -func TestGenerateUserKeyPair(t *testing.T) { - comment := "test-user-key" - - privateKey, publicKey, err := GenerateUserKeyPair(comment) - if err != nil { - t.Fatalf("GenerateUserKeyPair failed: %v", err) - } - - // Check private key format - if !strings.Contains(privateKey, "OPENSSH PRIVATE KEY") { - t.Error("private key is not in OpenSSH format") - } - - // Check public key format - if !strings.HasPrefix(publicKey, "ssh-ed25519 ") { - t.Errorf("public key has wrong format: %s", publicKey[:min(len(publicKey), 50)]) - } - if !strings.Contains(publicKey, comment) { - t.Errorf("public key does not contain comment") - } -} - -func TestNewCA(t *testing.T) { - cfg := DefaultConfig() - cfg.CAKeyPath = "/nonexistent/path" - cfg.EnforceKeyPermissions = false - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - if ca == nil { - t.Error("NewCA returned nil") - } else if ca.sshKeygen == "" { - t.Error("ssh-keygen path not set") - } -} - -func TestCAInitialize(t *testing.T) { - // Create temp directory with CA keys - tempDir, err := os.MkdirTemp("", "sshca-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "test_ca") - err = GenerateCA(keyPath, "test-ca") - if err != nil { - t.Fatalf("failed to generate CA: %v", err) - } - - // Create CA instance - cfg := Config{ - CAKeyPath: keyPath, - CAPubKeyPath: keyPath + ".pub", - WorkDir: tempDir, - DefaultTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - DefaultPrincipals: []string{"sandbox"}, - EnforceKeyPermissions: true, - } - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - // Initialize CA - err = ca.Initialize(context.Background()) - if err != nil { - t.Fatalf("Initialize failed: %v", err) - } - - if !ca.initialized { - t.Error("CA not marked as initialized") - } - - // Get public key - pubKey, err := ca.GetPublicKey() - if err != nil { - t.Fatalf("GetPublicKey failed: %v", err) - } - if !strings.HasPrefix(pubKey, "ssh-ed25519 ") { - t.Errorf("public key has wrong format: %s", pubKey[:min(len(pubKey), 50)]) - } -} - -func TestCAInitializeNotFound(t *testing.T) { - cfg := Config{ - CAKeyPath: "/nonexistent/path/ssh_ca", - CAPubKeyPath: "/nonexistent/path/ssh_ca.pub", - WorkDir: "/tmp", - EnforceKeyPermissions: false, - } - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - err = ca.Initialize(context.Background()) - if err == nil { - t.Error("Initialize should have failed with nonexistent key") - } -} - -func TestCAIssueCertificate(t *testing.T) { - // Create temp directory with CA keys - tempDir, err := os.MkdirTemp("", "sshca-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "test_ca") - err = GenerateCA(keyPath, "test-ca") - if err != nil { - t.Fatalf("failed to generate CA: %v", err) - } - - // Generate user key - _, userPubKey, err := GenerateUserKeyPair("test-user") - if err != nil { - t.Fatalf("failed to generate user key: %v", err) - } - - // Create and initialize CA - cfg := Config{ - CAKeyPath: keyPath, - CAPubKeyPath: keyPath + ".pub", - WorkDir: tempDir, - DefaultTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - DefaultPrincipals: []string{"sandbox"}, - EnforceKeyPermissions: true, - } - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - err = ca.Initialize(context.Background()) - if err != nil { - t.Fatalf("Initialize failed: %v", err) - } - - // Issue certificate - req := &CertificateRequest{ - UserID: "test-user", - VMID: "test-vm", - SandboxID: "SBX-123", - PublicKey: userPubKey, - TTL: 5 * time.Minute, - Principals: []string{"sandbox"}, - SourceIP: "127.0.0.1", - RequestTime: time.Now(), - } - - cert, err := ca.IssueCertificate(context.Background(), req) - if err != nil { - t.Fatalf("IssueCertificate failed: %v", err) - } - - // Validate certificate - if cert.ID == "" { - t.Error("certificate ID is empty") - } - if cert.Identity == "" { - t.Error("certificate identity is empty") - } - if !strings.Contains(cert.Identity, "test-user") { - t.Errorf("identity should contain user ID: %s", cert.Identity) - } - if !strings.Contains(cert.Identity, "test-vm") { - t.Errorf("identity should contain VM ID: %s", cert.Identity) - } - if cert.Certificate == "" { - t.Error("certificate content is empty") - } - if !strings.Contains(cert.Certificate, "cert-v01@openssh.com") { - t.Error("certificate is not in OpenSSH certificate format") - } - if cert.SerialNumber == 0 { - t.Error("serial number should not be zero") - } - if len(cert.Principals) == 0 { - t.Error("principals should not be empty") - } - if cert.ValidBefore.Before(cert.ValidAfter) { - t.Error("ValidBefore should be after ValidAfter") - } - - // Check certificate info - info := cert.GetCertInfo() - if info.IsExpired { - t.Error("certificate should not be expired immediately after issuance") - } - if info.TimeToExpiry <= 0 { - t.Error("time to expiry should be positive") - } -} - -func TestCAIssueCertificateNotInitialized(t *testing.T) { - cfg := DefaultConfig() - cfg.EnforceKeyPermissions = false - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - req := &CertificateRequest{ - UserID: "test-user", - VMID: "test-vm", - SandboxID: "SBX-123", - PublicKey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAITest test", - } - - _, err = ca.IssueCertificate(context.Background(), req) - if err != ErrCANotInitialized { - t.Errorf("expected ErrCANotInitialized, got: %v", err) - } -} - -func TestCAValidateRequest(t *testing.T) { - // Create temp directory with CA keys - tempDir, err := os.MkdirTemp("", "sshca-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "test_ca") - err = GenerateCA(keyPath, "test-ca") - if err != nil { - t.Fatalf("failed to generate CA: %v", err) - } - - cfg := Config{ - CAKeyPath: keyPath, - CAPubKeyPath: keyPath + ".pub", - WorkDir: tempDir, - DefaultTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - DefaultPrincipals: []string{"sandbox"}, - EnforceKeyPermissions: true, - } - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - err = ca.Initialize(context.Background()) - if err != nil { - t.Fatalf("Initialize failed: %v", err) - } - - tests := []struct { - name string - req *CertificateRequest - wantErr bool - }{ - { - name: "missing UserID", - req: &CertificateRequest{ - VMID: "test-vm", - SandboxID: "SBX-123", - PublicKey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAITest test", - }, - wantErr: true, - }, - { - name: "missing VMID", - req: &CertificateRequest{ - UserID: "test-user", - SandboxID: "SBX-123", - PublicKey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAITest test", - }, - wantErr: true, - }, - { - name: "missing PublicKey", - req: &CertificateRequest{ - UserID: "test-user", - VMID: "test-vm", - SandboxID: "SBX-123", - }, - wantErr: true, - }, - { - name: "invalid PublicKey format", - req: &CertificateRequest{ - UserID: "test-user", - VMID: "test-vm", - SandboxID: "SBX-123", - PublicKey: "not-a-valid-key", - }, - wantErr: true, - }, - { - name: "unsupported key type", - req: &CertificateRequest{ - UserID: "test-user", - VMID: "test-vm", - SandboxID: "SBX-123", - PublicKey: "ssh-dss AAAAB3NzaC1kc3MAAACBA test", - }, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := ca.IssueCertificate(context.Background(), tt.req) - if (err != nil) != tt.wantErr { - t.Errorf("IssueCertificate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestCATTLCapping(t *testing.T) { - // Create temp directory with CA keys - tempDir, err := os.MkdirTemp("", "sshca-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "test_ca") - err = GenerateCA(keyPath, "test-ca") - if err != nil { - t.Fatalf("failed to generate CA: %v", err) - } - - _, userPubKey, err := GenerateUserKeyPair("test-user") - if err != nil { - t.Fatalf("failed to generate user key: %v", err) - } - - cfg := Config{ - CAKeyPath: keyPath, - CAPubKeyPath: keyPath + ".pub", - WorkDir: tempDir, - DefaultTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - DefaultPrincipals: []string{"sandbox"}, - EnforceKeyPermissions: true, - } - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - err = ca.Initialize(context.Background()) - if err != nil { - t.Fatalf("Initialize failed: %v", err) - } - - // Request certificate with TTL exceeding max - req := &CertificateRequest{ - UserID: "test-user", - VMID: "test-vm", - SandboxID: "SBX-123", - PublicKey: userPubKey, - TTL: 30 * time.Minute, // Exceeds MaxTTL - } - - cert, err := ca.IssueCertificate(context.Background(), req) - if err != nil { - t.Fatalf("IssueCertificate failed: %v", err) - } - - // Check that TTL was capped - actualTTL := cert.ValidBefore.Sub(cert.IssuedAt) - // Allow for some clock skew (the cert adds 1 minute before valid_after) - if actualTTL > 11*time.Minute { - t.Errorf("TTL should be capped to MaxTTL (10m), got: %v", actualTTL) - } -} - -func TestCertificateConnectCommand(t *testing.T) { - cert := &Certificate{ - Principals: []string{"sandbox"}, - } - - cmd := cert.SSHConnectCommand("/path/to/key", "/path/to/key-cert.pub", "192.168.1.100", 22) - - if !strings.Contains(cmd, "-i /path/to/key") { - t.Error("command should contain private key path") - } - if !strings.Contains(cmd, "CertificateFile=/path/to/key-cert.pub") { - t.Error("command should contain certificate path") - } - if !strings.Contains(cmd, "sandbox@192.168.1.100") { - t.Error("command should contain user@host") - } - if !strings.Contains(cmd, "-p 22") { - t.Error("command should contain port") - } -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/fluid-remote/internal/sshca/store.go b/fluid-remote/internal/sshca/store.go deleted file mode 100755 index fc113baa..00000000 --- a/fluid-remote/internal/sshca/store.go +++ /dev/null @@ -1,338 +0,0 @@ -package sshca - -import ( - "context" - "errors" - "time" -) - -// Store errors -var ( - ErrCertNotFound = errors.New("sshca: certificate not found") - ErrSessionNotFound = errors.New("sshca: session not found") - ErrCertAlreadyRevoked = errors.New("sshca: certificate already revoked") -) - -// CertificateRecord represents a persisted SSH certificate. -type CertificateRecord struct { - // ID is the unique certificate identifier. - ID string `json:"id" db:"id"` - - // SandboxID links this certificate to a sandbox. - SandboxID string `json:"sandbox_id" db:"sandbox_id"` - - // UserID identifies the user who requested the certificate. - UserID string `json:"user_id" db:"user_id"` - - // VMID identifies the target VM. - VMID string `json:"vm_id" db:"vm_id"` - - // Identity is the certificate identity string. - Identity string `json:"identity" db:"identity"` - - // SerialNumber is the certificate serial number. - SerialNumber uint64 `json:"serial_number" db:"serial_number"` - - // Principals are the allowed usernames. - Principals []string `json:"principals" db:"principals"` - - // PublicKeyFingerprint is the SHA256 fingerprint of the user's public key. - PublicKeyFingerprint string `json:"public_key_fingerprint" db:"public_key_fingerprint"` - - // ValidAfter is when the certificate becomes valid. - ValidAfter time.Time `json:"valid_after" db:"valid_after"` - - // ValidBefore is when the certificate expires. - ValidBefore time.Time `json:"valid_before" db:"valid_before"` - - // SourceIP is the IP address of the requester. - SourceIP string `json:"source_ip,omitempty" db:"source_ip"` - - // Status indicates the certificate state. - Status CertStatus `json:"status" db:"status"` - - // RevokedAt is when the certificate was revoked (if applicable). - RevokedAt *time.Time `json:"revoked_at,omitempty" db:"revoked_at"` - - // RevokeReason explains why the certificate was revoked. - RevokeReason string `json:"revoke_reason,omitempty" db:"revoke_reason"` - - // IssuedAt is when the certificate was issued. - IssuedAt time.Time `json:"issued_at" db:"issued_at"` - - // LastUsedAt tracks when the certificate was last used for connection. - LastUsedAt *time.Time `json:"last_used_at,omitempty" db:"last_used_at"` -} - -// CertStatus represents the state of a certificate. -type CertStatus string - -const ( - // CertStatusActive indicates the certificate is valid and usable. - CertStatusActive CertStatus = "ACTIVE" - - // CertStatusExpired indicates the certificate has passed its ValidBefore time. - CertStatusExpired CertStatus = "EXPIRED" - - // CertStatusRevoked indicates the certificate was manually revoked. - CertStatusRevoked CertStatus = "REVOKED" - - // CertStatusUsed indicates the certificate was used and session ended. - CertStatusUsed CertStatus = "USED" -) - -// AccessSession tracks a user's SSH session using a certificate. -type AccessSession struct { - // ID is the unique session identifier. - ID string `json:"id" db:"id"` - - // CertificateID links to the certificate used for this session. - CertificateID string `json:"certificate_id" db:"certificate_id"` - - // SandboxID links to the accessed sandbox. - SandboxID string `json:"sandbox_id" db:"sandbox_id"` - - // UserID identifies the user. - UserID string `json:"user_id" db:"user_id"` - - // VMID identifies the target VM. - VMID string `json:"vm_id" db:"vm_id"` - - // VMIPAddress is the IP address used to connect. - VMIPAddress string `json:"vm_ip_address" db:"vm_ip_address"` - - // SourceIP is the IP address the user connected from. - SourceIP string `json:"source_ip,omitempty" db:"source_ip"` - - // Status indicates the session state. - Status SessionStatus `json:"status" db:"status"` - - // StartedAt is when the session began. - StartedAt time.Time `json:"started_at" db:"started_at"` - - // EndedAt is when the session ended (if applicable). - EndedAt *time.Time `json:"ended_at,omitempty" db:"ended_at"` - - // DurationSeconds is the session duration in seconds. - DurationSeconds *int `json:"duration_seconds,omitempty" db:"duration_seconds"` - - // DisconnectReason explains why the session ended. - DisconnectReason string `json:"disconnect_reason,omitempty" db:"disconnect_reason"` -} - -// SessionStatus represents the state of an access session. -type SessionStatus string - -const ( - // SessionStatusPending indicates the session is waiting for connection. - SessionStatusPending SessionStatus = "PENDING" - - // SessionStatusActive indicates the session is currently connected. - SessionStatusActive SessionStatus = "ACTIVE" - - // SessionStatusEnded indicates the session ended normally. - SessionStatusEnded SessionStatus = "ENDED" - - // SessionStatusExpired indicates the session ended due to certificate expiry. - SessionStatusExpired SessionStatus = "EXPIRED" - - // SessionStatusRevoked indicates the session was terminated by revocation. - SessionStatusRevoked SessionStatus = "REVOKED" - - // SessionStatusError indicates the session ended due to an error. - SessionStatusError SessionStatus = "ERROR" -) - -// CertificateFilter provides filtering options for certificate queries. -type CertificateFilter struct { - SandboxID *string - UserID *string - VMID *string - Status *CertStatus - // ActiveOnly filters to certificates that are currently valid (not expired/revoked). - ActiveOnly bool - // IssuedAfter filters to certificates issued after this time. - IssuedAfter *time.Time - // IssuedBefore filters to certificates issued before this time. - IssuedBefore *time.Time -} - -// SessionFilter provides filtering options for session queries. -type SessionFilter struct { - CertificateID *string - SandboxID *string - UserID *string - Status *SessionStatus - // ActiveOnly filters to currently active sessions. - ActiveOnly bool - // StartedAfter filters to sessions started after this time. - StartedAfter *time.Time -} - -// ListOptions provides pagination and ordering options. -type ListOptions struct { - Limit int - Offset int - OrderBy string - Asc bool -} - -// CertificateStore defines persistence operations for SSH certificates. -type CertificateStore interface { - // CreateCertificate persists a new certificate record. - CreateCertificate(ctx context.Context, cert *CertificateRecord) error - - // GetCertificate retrieves a certificate by ID. - GetCertificate(ctx context.Context, id string) (*CertificateRecord, error) - - // GetCertificateBySerial retrieves a certificate by serial number. - GetCertificateBySerial(ctx context.Context, serial uint64) (*CertificateRecord, error) - - // ListCertificates retrieves certificates matching the filter. - ListCertificates(ctx context.Context, filter CertificateFilter, opts *ListOptions) ([]*CertificateRecord, error) - - // UpdateCertificateStatus updates the status of a certificate. - UpdateCertificateStatus(ctx context.Context, id string, status CertStatus) error - - // RevokeCertificate marks a certificate as revoked. - RevokeCertificate(ctx context.Context, id string, reason string) error - - // UpdateCertificateLastUsed updates the last used timestamp. - UpdateCertificateLastUsed(ctx context.Context, id string, at time.Time) error - - // ExpireCertificates marks all expired certificates as EXPIRED. - // Returns the number of certificates updated. - ExpireCertificates(ctx context.Context) (int, error) - - // DeleteCertificate removes a certificate record (for cleanup). - DeleteCertificate(ctx context.Context, id string) error - - // CreateSession persists a new access session record. - CreateSession(ctx context.Context, session *AccessSession) error - - // GetSession retrieves a session by ID. - GetSession(ctx context.Context, id string) (*AccessSession, error) - - // ListSessions retrieves sessions matching the filter. - ListSessions(ctx context.Context, filter SessionFilter, opts *ListOptions) ([]*AccessSession, error) - - // UpdateSessionStatus updates the status of a session. - UpdateSessionStatus(ctx context.Context, id string, status SessionStatus, reason string) error - - // EndSession marks a session as ended. - EndSession(ctx context.Context, id string, endedAt time.Time, reason string) error - - // GetActiveSessions returns all currently active sessions. - GetActiveSessions(ctx context.Context) ([]*AccessSession, error) - - // GetSessionsByCertificate returns all sessions for a certificate. - GetSessionsByCertificate(ctx context.Context, certID string) ([]*AccessSession, error) -} - -// AccessRequest represents a request for sandbox access. -// This is used as input to the access service. -type AccessRequest struct { - // SandboxID is the target sandbox. - SandboxID string `json:"sandbox_id"` - - // UserID identifies the requesting user. - UserID string `json:"user_id"` - - // PublicKey is the user's SSH public key. - PublicKey string `json:"public_key"` - - // TTLMinutes is the requested access duration in minutes (1-10). - TTLMinutes int `json:"ttl_minutes,omitempty"` - - // SourceIP is the IP address of the requester (populated by server). - SourceIP string `json:"-"` - - // RequestTime is when the request was made (populated by server). - RequestTime time.Time `json:"-"` -} - -// AccessResponse contains the issued certificate and connection details. -type AccessResponse struct { - // CertificateID is the ID of the issued certificate. - CertificateID string `json:"certificate_id"` - - // Certificate is the SSH certificate content. - Certificate string `json:"certificate"` - - // VMIPAddress is the IP address of the sandbox VM. - VMIPAddress string `json:"vm_ip_address"` - - // SSHPort is the SSH port (usually 22). - SSHPort int `json:"ssh_port"` - - // Username is the SSH username to use (usually "sandbox"). - Username string `json:"username"` - - // ValidUntil is when the certificate expires. - ValidUntil time.Time `json:"valid_until"` - - // TTLSeconds is the remaining validity in seconds. - TTLSeconds int `json:"ttl_seconds"` - - // ConnectCommand is an example SSH command for connecting. - ConnectCommand string `json:"connect_command"` -} - -// IsExpired returns true if the certificate has expired. -func (c *CertificateRecord) IsExpired() bool { - return time.Now().After(c.ValidBefore) -} - -// IsActive returns true if the certificate is active and not expired. -func (c *CertificateRecord) IsActive() bool { - now := time.Now() - return c.Status == CertStatusActive && - now.After(c.ValidAfter) && - now.Before(c.ValidBefore) -} - -// TimeToExpiry returns the duration until the certificate expires. -// Returns 0 if already expired. -func (c *CertificateRecord) TimeToExpiry() time.Duration { - remaining := time.Until(c.ValidBefore) - if remaining < 0 { - return 0 - } - return remaining -} - -// Duration returns the session duration. -// Returns 0 if the session hasn't ended. -func (s *AccessSession) Duration() time.Duration { - if s.EndedAt == nil { - return time.Since(s.StartedAt) - } - return s.EndedAt.Sub(s.StartedAt) -} - -// AuditEntry represents an audit log entry for SSH access events. -type AuditEntry struct { - // Timestamp is when the event occurred. - Timestamp time.Time `json:"timestamp"` - - // Event is the type of event (e.g., "cert_issued", "session_started", "session_ended"). - Event string `json:"event"` - - // UserID is the user involved. - UserID string `json:"user_id"` - - // SandboxID is the sandbox involved. - SandboxID string `json:"sandbox_id"` - - // CertificateID is the certificate involved (if applicable). - CertificateID string `json:"certificate_id,omitempty"` - - // SessionID is the session involved (if applicable). - SessionID string `json:"session_id,omitempty"` - - // SourceIP is the source IP address. - SourceIP string `json:"source_ip,omitempty"` - - // Details contains additional event-specific information. - Details map[string]interface{} `json:"details,omitempty"` -} diff --git a/fluid-remote/internal/sshca/vmadapter.go b/fluid-remote/internal/sshca/vmadapter.go deleted file mode 100755 index be5dad32..00000000 --- a/fluid-remote/internal/sshca/vmadapter.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package sshca provides SSH Certificate Authority management for ephemeral sandbox access. -package sshca - -import ( - "context" - "fmt" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/store" -) - -// SandboxStore defines the minimal interface needed to look up sandbox information. -type SandboxStore interface { - GetSandbox(ctx context.Context, id string) (*store.Sandbox, error) -} - -// VMAdapter implements VMInfoProvider by delegating to the sandbox store. -type VMAdapter struct { - store SandboxStore -} - -// NewVMAdapter creates a new VM adapter. -func NewVMAdapter(st SandboxStore) *VMAdapter { - return &VMAdapter{ - store: st, - } -} - -// GetSandboxIP returns the IP address of a sandbox. -func (a *VMAdapter) GetSandboxIP(ctx context.Context, sandboxID string) (string, error) { - sb, err := a.store.GetSandbox(ctx, sandboxID) - if err != nil { - return "", fmt.Errorf("get sandbox: %w", err) - } - - if sb.IPAddress == nil || *sb.IPAddress == "" { - return "", fmt.Errorf("sandbox %s has no IP address", sandboxID) - } - - return *sb.IPAddress, nil -} - -// GetSandboxVMName returns the VM name for a sandbox. -func (a *VMAdapter) GetSandboxVMName(ctx context.Context, sandboxID string) (string, error) { - sb, err := a.store.GetSandbox(ctx, sandboxID) - if err != nil { - return "", fmt.Errorf("get sandbox: %w", err) - } - - return sb.SandboxName, nil -} - -// IsSandboxRunning checks if the sandbox is in a running state. -func (a *VMAdapter) IsSandboxRunning(ctx context.Context, sandboxID string) (bool, error) { - sb, err := a.store.GetSandbox(ctx, sandboxID) - if err != nil { - return false, fmt.Errorf("get sandbox: %w", err) - } - - return sb.State == store.SandboxStateRunning, nil -} - -// Verify VMAdapter implements VMInfoProvider at compile time. -var _ VMInfoProvider = (*VMAdapter)(nil) diff --git a/fluid-remote/internal/sshkeys/manager.go b/fluid-remote/internal/sshkeys/manager.go deleted file mode 100755 index 1d018d0e..00000000 --- a/fluid-remote/internal/sshkeys/manager.go +++ /dev/null @@ -1,336 +0,0 @@ -// Package sshkeys provides managed SSH key lifecycle for sandbox command execution. -// -// This package handles ephemeral SSH keypair generation, certificate signing, -// and cleanup for the RunCommand endpoint. Keys are cached per-sandbox and -// automatically regenerated before certificate expiry. -package sshkeys - -import ( - "context" - "fmt" - "log/slog" - "os" - "path/filepath" - "sync" - "time" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/sshca" -) - -// KeyProvider provides SSH credentials for sandboxes. -type KeyProvider interface { - // GetCredentials returns SSH credentials for a sandbox. - // If valid cached credentials exist, they are returned. - // Otherwise, new credentials are generated. - GetCredentials(ctx context.Context, sandboxID, username string) (*Credentials, error) - - // CleanupSandbox removes all cached credentials for a sandbox. - // Called when sandbox is destroyed. - CleanupSandbox(ctx context.Context, sandboxID string) error - - // Close releases all resources. - Close() error -} - -// Credentials holds SSH key material for connecting to a sandbox. -type Credentials struct { - // PrivateKeyPath is the path to the private key file (0600 permissions). - PrivateKeyPath string - - // CertificatePath is the path to the certificate file (key-cert.pub). - CertificatePath string - - // PublicKey is the public key content. - PublicKey string - - // Username is the SSH username. - Username string - - // ValidUntil is when the certificate expires. - ValidUntil time.Time - - // SandboxID is the sandbox these credentials are for. - SandboxID string -} - -// IsExpired returns true if credentials are expired or will expire within margin. -func (c *Credentials) IsExpired(margin time.Duration) bool { - return time.Now().Add(margin).After(c.ValidUntil) -} - -// Config configures the KeyManager. -type Config struct { - // KeyDir is the base directory for storing keys (default: /tmp/sandbox-keys). - KeyDir string - - // CertificateTTL is the certificate lifetime (default: 5 minutes). - CertificateTTL time.Duration - - // RefreshMargin is how early to regenerate before expiry (default: 30 seconds). - RefreshMargin time.Duration - - // DefaultUsername is the default SSH username (default: "sandbox"). - DefaultUsername string -} - -// DefaultConfig returns sensible defaults. -func DefaultConfig() Config { - return Config{ - KeyDir: "/tmp/sandbox-keys", - CertificateTTL: 5 * time.Minute, - RefreshMargin: 30 * time.Second, - DefaultUsername: "sandbox", - } -} - -// KeyManager manages ephemeral SSH keys for sandboxes. -type KeyManager struct { - ca *sshca.CA - cfg Config - logger *slog.Logger - timeNowFn func() time.Time - - // Per-sandbox locks to prevent concurrent key generation. - mu sync.RWMutex - sandboxLocks map[string]*sync.Mutex - - // Cached credentials per sandbox. - credentials map[string]*Credentials -} - -// NewKeyManager creates a new key manager. -func NewKeyManager(ca *sshca.CA, cfg Config, logger *slog.Logger) (*KeyManager, error) { - if ca == nil { - return nil, fmt.Errorf("sshca.CA is required") - } - if logger == nil { - logger = slog.Default() - } - - // Apply defaults. - if cfg.KeyDir == "" { - cfg.KeyDir = DefaultConfig().KeyDir - } - if cfg.CertificateTTL <= 0 { - cfg.CertificateTTL = DefaultConfig().CertificateTTL - } - if cfg.RefreshMargin <= 0 { - cfg.RefreshMargin = DefaultConfig().RefreshMargin - } - if cfg.DefaultUsername == "" { - cfg.DefaultUsername = DefaultConfig().DefaultUsername - } - - // Ensure key directory exists. - if err := os.MkdirAll(cfg.KeyDir, 0o700); err != nil { - return nil, fmt.Errorf("create key directory %s: %w", cfg.KeyDir, err) - } - - return &KeyManager{ - ca: ca, - cfg: cfg, - logger: logger, - timeNowFn: time.Now, - sandboxLocks: make(map[string]*sync.Mutex), - credentials: make(map[string]*Credentials), - }, nil -} - -// GetCredentials implements KeyProvider. -func (m *KeyManager) GetCredentials(ctx context.Context, sandboxID, username string) (*Credentials, error) { - if sandboxID == "" { - return nil, fmt.Errorf("sandboxID is required") - } - if username == "" { - username = m.cfg.DefaultUsername - } - - // Get per-sandbox lock. - lock := m.getSandboxLock(sandboxID) - lock.Lock() - defer lock.Unlock() - - // Check cache for valid credentials. - cacheKey := m.cacheKey(sandboxID, username) - m.mu.RLock() - creds, ok := m.credentials[cacheKey] - m.mu.RUnlock() - - if ok && !creds.IsExpired(m.cfg.RefreshMargin) { - m.logger.Debug("using cached credentials", - "sandbox_id", sandboxID, - "username", username, - "valid_until", creds.ValidUntil, - ) - return creds, nil - } - - // Generate new credentials. - m.logger.Info("generating new credentials", - "sandbox_id", sandboxID, - "username", username, - "ttl", m.cfg.CertificateTTL, - ) - - newCreds, err := m.generateCredentials(ctx, sandboxID, username) - if err != nil { - return nil, fmt.Errorf("generate credentials: %w", err) - } - - // Cache the credentials. - m.mu.Lock() - m.credentials[cacheKey] = newCreds - m.mu.Unlock() - - return newCreds, nil -} - -// CleanupSandbox implements KeyProvider. -func (m *KeyManager) CleanupSandbox(ctx context.Context, sandboxID string) error { - if sandboxID == "" { - return fmt.Errorf("sandboxID is required") - } - - // Get per-sandbox lock. - lock := m.getSandboxLock(sandboxID) - lock.Lock() - defer lock.Unlock() - - m.logger.Info("cleaning up sandbox credentials", "sandbox_id", sandboxID) - - // Remove from cache (all usernames for this sandbox). - m.mu.Lock() - for key := range m.credentials { - if m.extractSandboxID(key) == sandboxID { - delete(m.credentials, key) - } - } - m.mu.Unlock() - - // Remove key files. - keyDir := m.sandboxKeyDir(sandboxID) - if err := os.RemoveAll(keyDir); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("remove key directory %s: %w", keyDir, err) - } - - // Clean up the sandbox lock. - m.mu.Lock() - delete(m.sandboxLocks, sandboxID) - m.mu.Unlock() - - return nil -} - -// Close implements KeyProvider. -func (m *KeyManager) Close() error { - m.mu.Lock() - defer m.mu.Unlock() - - m.logger.Info("closing key manager", "cached_credentials", len(m.credentials)) - - // Clear all cached credentials (files are left for cleanup on sandbox destroy). - m.credentials = make(map[string]*Credentials) - m.sandboxLocks = make(map[string]*sync.Mutex) - - return nil -} - -// getSandboxLock returns the mutex for a specific sandbox, creating one if needed. -func (m *KeyManager) getSandboxLock(sandboxID string) *sync.Mutex { - m.mu.Lock() - defer m.mu.Unlock() - - if lock, ok := m.sandboxLocks[sandboxID]; ok { - return lock - } - - lock := &sync.Mutex{} - m.sandboxLocks[sandboxID] = lock - return lock -} - -// cacheKey generates a cache key for sandbox+username. -func (m *KeyManager) cacheKey(sandboxID, username string) string { - return sandboxID + ":" + username -} - -// extractSandboxID extracts the sandbox ID from a cache key. -func (m *KeyManager) extractSandboxID(cacheKey string) string { - for i := 0; i < len(cacheKey); i++ { - if cacheKey[i] == ':' { - return cacheKey[:i] - } - } - return cacheKey -} - -// sandboxKeyDir returns the directory for a sandbox's keys. -func (m *KeyManager) sandboxKeyDir(sandboxID string) string { - return filepath.Join(m.cfg.KeyDir, sandboxID) -} - -// generateCredentials creates new SSH credentials for a sandbox. -func (m *KeyManager) generateCredentials(ctx context.Context, sandboxID, username string) (*Credentials, error) { - // Create sandbox key directory. - keyDir := m.sandboxKeyDir(sandboxID) - if err := os.MkdirAll(keyDir, 0o700); err != nil { - return nil, fmt.Errorf("create sandbox key directory: %w", err) - } - - // Generate ephemeral keypair. - comment := fmt.Sprintf("sandbox-%s-%s", sandboxID, username) - privateKey, publicKey, err := sshca.GenerateUserKeyPair(comment) - if err != nil { - return nil, fmt.Errorf("generate keypair: %w", err) - } - - // Write key files. - privateKeyPath := filepath.Join(keyDir, "key") - certPath := filepath.Join(keyDir, "key-cert.pub") - - if err := os.WriteFile(privateKeyPath, []byte(privateKey), 0o600); err != nil { - return nil, fmt.Errorf("write private key: %w", err) - } - - // Request certificate from CA. - certReq := sshca.CertificateRequest{ - UserID: fmt.Sprintf("sandbox-runner:%s", sandboxID), - VMID: sandboxID, - SandboxID: sandboxID, - PublicKey: publicKey, - TTL: m.cfg.CertificateTTL, - Principals: []string{username}, - SourceIP: "internal", - RequestTime: m.timeNowFn(), - } - - cert, err := m.ca.IssueCertificate(ctx, &certReq) - if err != nil { - // Clean up the private key on failure. - _ = os.Remove(privateKeyPath) - return nil, fmt.Errorf("issue certificate: %w", err) - } - - // Write certificate. - if err := os.WriteFile(certPath, []byte(cert.Certificate), 0o644); err != nil { - _ = os.Remove(privateKeyPath) - return nil, fmt.Errorf("write certificate: %w", err) - } - - m.logger.Debug("generated credentials", - "sandbox_id", sandboxID, - "username", username, - "private_key_path", privateKeyPath, - "cert_path", certPath, - "valid_until", cert.ValidBefore, - ) - - return &Credentials{ - PrivateKeyPath: privateKeyPath, - CertificatePath: certPath, - PublicKey: publicKey, - Username: username, - ValidUntil: cert.ValidBefore, - SandboxID: sandboxID, - }, nil -} diff --git a/fluid-remote/internal/sshkeys/manager_test.go b/fluid-remote/internal/sshkeys/manager_test.go deleted file mode 100755 index cdcbde94..00000000 --- a/fluid-remote/internal/sshkeys/manager_test.go +++ /dev/null @@ -1,492 +0,0 @@ -package sshkeys - -import ( - "context" - "os" - "path/filepath" - "sync" - "testing" - "time" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/sshca" -) - -// testCA creates a real CA for testing. -// Returns the CA and a cleanup function. -func testCA(t *testing.T) (*sshca.CA, func()) { - t.Helper() - - tempDir, err := os.MkdirTemp("", "sshkeys-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - - keyPath := filepath.Join(tempDir, "test_ca") - - // Generate CA keypair. - if err := sshca.GenerateCA(keyPath, "test-ca"); err != nil { - _ = os.RemoveAll(tempDir) - t.Fatalf("failed to generate CA: %v", err) - } - - cfg := sshca.Config{ - CAKeyPath: keyPath, - CAPubKeyPath: keyPath + ".pub", - WorkDir: filepath.Join(tempDir, "work"), - DefaultTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - DefaultPrincipals: []string{"sandbox"}, - EnforceKeyPermissions: false, // Disable for tests - } - - ca, err := sshca.NewCA(cfg) - if err != nil { - _ = os.RemoveAll(tempDir) - t.Fatalf("failed to create CA: %v", err) - } - - if err := ca.Initialize(context.Background()); err != nil { - _ = os.RemoveAll(tempDir) - t.Fatalf("failed to initialize CA: %v", err) - } - - return ca, func() { - _ = os.RemoveAll(tempDir) - } -} - -func TestNewKeyManager(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - cfg := Config{ - KeyDir: tempDir, - CertificateTTL: 5 * time.Minute, - RefreshMargin: 30 * time.Second, - DefaultUsername: "sandbox", - } - - km, err := NewKeyManager(ca, cfg, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - if km.ca == nil { - t.Error("CA is nil") - } - if km.cfg.KeyDir != tempDir { - t.Errorf("KeyDir mismatch: got %s, want %s", km.cfg.KeyDir, tempDir) - } -} - -func TestNewKeyManager_NilCA(t *testing.T) { - _, err := NewKeyManager(nil, Config{}, nil) - if err == nil { - t.Error("expected error for nil CA") - } -} - -func TestNewKeyManager_DefaultConfig(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - // Empty config should use defaults. - km, err := NewKeyManager(ca, Config{KeyDir: tempDir}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - defaults := DefaultConfig() - if km.cfg.CertificateTTL != defaults.CertificateTTL { - t.Errorf("CertificateTTL mismatch: got %v, want %v", km.cfg.CertificateTTL, defaults.CertificateTTL) - } - if km.cfg.RefreshMargin != defaults.RefreshMargin { - t.Errorf("RefreshMargin mismatch: got %v, want %v", km.cfg.RefreshMargin, defaults.RefreshMargin) - } - if km.cfg.DefaultUsername != defaults.DefaultUsername { - t.Errorf("DefaultUsername mismatch: got %s, want %s", km.cfg.DefaultUsername, defaults.DefaultUsername) - } -} - -func TestGetCredentials_GeneratesNewKeys(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir, CertificateTTL: 5 * time.Minute}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - creds, err := km.GetCredentials(ctx, "SBX-123", "sandbox") - if err != nil { - t.Fatalf("GetCredentials failed: %v", err) - } - - // Check credentials. - if creds.SandboxID != "SBX-123" { - t.Errorf("SandboxID mismatch: got %s, want SBX-123", creds.SandboxID) - } - if creds.Username != "sandbox" { - t.Errorf("Username mismatch: got %s, want sandbox", creds.Username) - } - if creds.PrivateKeyPath == "" { - t.Error("PrivateKeyPath is empty") - } - if creds.CertificatePath == "" { - t.Error("CertificatePath is empty") - } - if creds.ValidUntil.IsZero() { - t.Error("ValidUntil is zero") - } - - // Check files exist. - if _, err := os.Stat(creds.PrivateKeyPath); os.IsNotExist(err) { - t.Error("private key file does not exist") - } - if _, err := os.Stat(creds.CertificatePath); os.IsNotExist(err) { - t.Error("certificate file does not exist") - } -} - -func TestGetCredentials_ReturnsCached(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir, CertificateTTL: 5 * time.Minute}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - - // First call generates. - creds1, err := km.GetCredentials(ctx, "SBX-123", "sandbox") - if err != nil { - t.Fatalf("GetCredentials (1) failed: %v", err) - } - - // Second call should return cached. - creds2, err := km.GetCredentials(ctx, "SBX-123", "sandbox") - if err != nil { - t.Fatalf("GetCredentials (2) failed: %v", err) - } - - // Should be the same credentials. - if creds1.PrivateKeyPath != creds2.PrivateKeyPath { - t.Error("expected cached credentials to be returned") - } -} - -func TestGetCredentials_RegeneratesOnExpiry(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{ - KeyDir: tempDir, - CertificateTTL: 5 * time.Minute, - RefreshMargin: 30 * time.Second, - }, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - // First call generates. - creds1, err := km.GetCredentials(ctx, "SBX-123", "sandbox") - if err != nil { - t.Fatalf("GetCredentials (1) failed: %v", err) - } - - // Simulate time passing by modifying cached credentials to be expired. - km.mu.Lock() - for key, creds := range km.credentials { - creds.ValidUntil = time.Now().Add(-1 * time.Minute) // Already expired - km.credentials[key] = creds - } - km.mu.Unlock() - - // Second call should regenerate. - creds2, err := km.GetCredentials(ctx, "SBX-123", "sandbox") - if err != nil { - t.Fatalf("GetCredentials (2) failed: %v", err) - } - - // ValidUntil should be different (new certificate was issued). - // Note: paths are the same because sandbox ID is the same, but the - // certificate content and expiry time will be different. - if creds2.ValidUntil.Before(time.Now()) { - t.Error("expected new credentials with valid expiry after regeneration") - } - // New expiry should be after the old (expired) one. - if !creds2.ValidUntil.After(creds1.ValidUntil) { - t.Error("expected new credentials to have later expiry than expired ones") - } -} - -func TestGetCredentials_DefaultUsername(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{ - KeyDir: tempDir, - DefaultUsername: "myuser", - }, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - - // Empty username should use default. - creds, err := km.GetCredentials(ctx, "SBX-123", "") - if err != nil { - t.Fatalf("GetCredentials failed: %v", err) - } - - if creds.Username != "myuser" { - t.Errorf("Username mismatch: got %s, want myuser", creds.Username) - } -} - -func TestGetCredentials_ConcurrentSafety(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - sandboxID := "SBX-CONCURRENT" - - // Launch multiple goroutines requesting the same sandbox's credentials. - var wg sync.WaitGroup - results := make(chan *Credentials, 10) - errors := make(chan error, 10) - - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - creds, err := km.GetCredentials(ctx, sandboxID, "sandbox") - if err != nil { - errors <- err - return - } - results <- creds - }() - } - - wg.Wait() - close(results) - close(errors) - - // Check for errors. - for err := range errors { - t.Errorf("GetCredentials error: %v", err) - } - - // All results should have the same private key path (cached). - var firstPath string - for creds := range results { - if firstPath == "" { - firstPath = creds.PrivateKeyPath - } else if creds.PrivateKeyPath != firstPath { - t.Error("concurrent calls returned different credentials") - } - } -} - -func TestCleanupSandbox_RemovesFiles(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - - // Generate credentials. - creds, err := km.GetCredentials(ctx, "SBX-CLEANUP", "sandbox") - if err != nil { - t.Fatalf("GetCredentials failed: %v", err) - } - - // Verify files exist. - if _, err := os.Stat(creds.PrivateKeyPath); os.IsNotExist(err) { - t.Fatal("private key file should exist") - } - - // Cleanup. - if err := km.CleanupSandbox(ctx, "SBX-CLEANUP"); err != nil { - t.Fatalf("CleanupSandbox failed: %v", err) - } - - // Verify files are gone. - if _, err := os.Stat(creds.PrivateKeyPath); !os.IsNotExist(err) { - t.Error("private key file should be deleted") - } - sandboxDir := km.sandboxKeyDir("SBX-CLEANUP") - if _, err := os.Stat(sandboxDir); !os.IsNotExist(err) { - t.Error("sandbox key directory should be deleted") - } - - // Verify cache is cleared. - km.mu.RLock() - if len(km.credentials) > 0 { - t.Error("credentials should be cleared from cache") - } - km.mu.RUnlock() -} - -func TestCleanupSandbox_EmptySandboxID(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - err = km.CleanupSandbox(context.Background(), "") - if err == nil { - t.Error("expected error for empty sandboxID") - } -} - -func TestKeyFilePermissions(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - creds, err := km.GetCredentials(ctx, "SBX-PERM", "sandbox") - if err != nil { - t.Fatalf("GetCredentials failed: %v", err) - } - - // Check private key permissions. - info, err := os.Stat(creds.PrivateKeyPath) - if err != nil { - t.Fatalf("failed to stat private key: %v", err) - } - perm := info.Mode().Perm() - if perm != 0o600 { - t.Errorf("private key has wrong permissions: %o, expected 0600", perm) - } -} - -func TestCredentials_IsExpired(t *testing.T) { - tests := []struct { - name string - validUntil time.Time - margin time.Duration - want bool - }{ - { - name: "not expired", - validUntil: time.Now().Add(10 * time.Minute), - margin: 30 * time.Second, - want: false, - }, - { - name: "expired", - validUntil: time.Now().Add(-1 * time.Minute), - margin: 30 * time.Second, - want: true, - }, - { - name: "within margin", - validUntil: time.Now().Add(20 * time.Second), - margin: 30 * time.Second, - want: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &Credentials{ValidUntil: tt.validUntil} - if got := c.IsExpired(tt.margin); got != tt.want { - t.Errorf("IsExpired() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/fluid-remote/internal/store/postgres/postgres.go b/fluid-remote/internal/store/postgres/postgres.go deleted file mode 100755 index 032439d7..00000000 --- a/fluid-remote/internal/store/postgres/postgres.go +++ /dev/null @@ -1,1241 +0,0 @@ -package postgres - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "time" - - "github.com/jackc/pgconn" - "gorm.io/datatypes" - "gorm.io/driver/postgres" - "gorm.io/gorm" - "gorm.io/gorm/logger" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/store" -) - -// Ensure interface compliance. -var ( - _ store.Store = (*postgresStore)(nil) - _ store.DataStore = (*postgresStore)(nil) -) - -type postgresStore struct { - db *gorm.DB - conf store.Config -} - -// New creates a Store backed by Postgres + GORM. -func New(ctx context.Context, cfg store.Config) (store.Store, error) { - if cfg.DatabaseURL == "" { - return nil, fmt.Errorf("postgres: missing DatabaseURL") - } - - db, err := gorm.Open( - postgres.Open(cfg.DatabaseURL), - &gorm.Config{ - NowFunc: func() time.Time { return time.Now().UTC() }, - Logger: logger.Default.LogMode(logger.Silent), - }, - ) - if err != nil { - return nil, fmt.Errorf("postgres: open: %w", err) - } - - sqlDB, err := db.DB() - if err != nil { - return nil, fmt.Errorf("postgres: sql.DB handle: %w", err) - } - - if cfg.MaxOpenConns > 0 { - sqlDB.SetMaxOpenConns(cfg.MaxOpenConns) - } - if cfg.MaxIdleConns > 0 { - sqlDB.SetMaxIdleConns(cfg.MaxIdleConns) - } - if cfg.ConnMaxLifetime > 0 { - sqlDB.SetConnMaxLifetime(cfg.ConnMaxLifetime) - } - - pg := &postgresStore{ - db: db.WithContext(ctx), - conf: cfg, - } - - if cfg.AutoMigrate && !cfg.ReadOnly { - if err := pg.autoMigrate(ctx); err != nil { - _ = sqlDB.Close() - return nil, err - } - } - - if err := pg.Ping(ctx); err != nil { - _ = sqlDB.Close() - return nil, err - } - - return pg, nil -} - -// NewWithDB wraps an existing *gorm.DB (useful for tests). -func NewWithDB(db *gorm.DB, cfg store.Config) store.Store { - return &postgresStore{db: db, conf: cfg} -} - -func (s *postgresStore) Config() store.Config { - return s.conf -} - -func (s *postgresStore) Close() error { - sqlDB, err := s.db.DB() - if err != nil { - return err - } - return sqlDB.Close() -} - -func (s *postgresStore) Ping(ctx context.Context) error { - sqlDB, err := s.db.DB() - if err != nil { - return err - } - return sqlDB.PingContext(ctx) -} - -func (s *postgresStore) WithTx(ctx context.Context, fn func(tx store.DataStore) error) error { - return s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { - return fn(&postgresStore{db: tx, conf: s.conf}) - }) -} - -// --- Sandbox --- - -func (s *postgresStore) CreateSandbox(ctx context.Context, sb *store.Sandbox) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: CreateSandbox: %w", store.ErrInvalid) - } - if sb == nil || sb.ID == "" || sb.JobID == "" || sb.AgentID == "" || sb.SandboxName == "" || - sb.BaseImage == "" || sb.Network == "" || sb.State == "" { - return fmt.Errorf("postgres: CreateSandbox: %w", store.ErrInvalid) - } - - now := time.Now().UTC() - sb.CreatedAt = now - sb.UpdatedAt = now - - if err := s.db.WithContext(ctx).Create(sandboxToModel(sb)).Error; err != nil { - return mapDBError(err) - } - return nil -} - -func (s *postgresStore) GetSandbox(ctx context.Context, id string) (*store.Sandbox, error) { - var model SandboxModel - if err := s.db.WithContext(ctx). - Where("id = ? AND deleted_at IS NULL", id). - First(&model).Error; err != nil { - return nil, mapDBError(err) - } - return sandboxFromModel(&model), nil -} - -func (s *postgresStore) GetSandboxByVMName(ctx context.Context, vmName string) (*store.Sandbox, error) { - var model SandboxModel - if err := s.db.WithContext(ctx). - Where("vm_name = ? AND deleted_at IS NULL", vmName). - First(&model).Error; err != nil { - return nil, mapDBError(err) - } - return sandboxFromModel(&model), nil -} - -func (s *postgresStore) ListSandboxes(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - tx := s.db.WithContext(ctx).Model(&SandboxModel{}).Where("deleted_at IS NULL") - if filter.AgentID != nil { - tx = tx.Where("agent_id = ?", *filter.AgentID) - } - if filter.JobID != nil { - tx = tx.Where("job_id = ?", *filter.JobID) - } - if filter.BaseImage != nil { - tx = tx.Where("base_image = ?", *filter.BaseImage) - } - if filter.State != nil { - tx = tx.Where("state = ?", string(*filter.State)) - } - if filter.VMName != nil { - tx = tx.Where("vm_name = ?", *filter.VMName) - } - - tx = applyListOptions(tx, opt, map[string]string{ - "created_at": "created_at", - "updated_at": "updated_at", - "vm_name": "vm_name", - }) - - var models []SandboxModel - if err := tx.Find(&models).Error; err != nil { - return nil, mapDBError(err) - } - - out := make([]*store.Sandbox, 0, len(models)) - for i := range models { - out = append(out, sandboxFromModel(&models[i])) - } - return out, nil -} - -func (s *postgresStore) UpdateSandbox(ctx context.Context, sb *store.Sandbox) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: UpdateSandbox: %w", store.ErrInvalid) - } - if sb == nil || sb.ID == "" { - return fmt.Errorf("postgres: UpdateSandbox: %w", store.ErrInvalid) - } - sb.UpdatedAt = time.Now().UTC() - model := sandboxToModel(sb) - - res := s.db.WithContext(ctx). - Model(&SandboxModel{}). - Where("id = ? AND deleted_at IS NULL", sb.ID). - Updates(map[string]any{ - "job_id": model.JobID, - "agent_id": model.AgentID, - "sandbox_name": model.SandboxName, - "base_image": model.BaseImage, - "network": model.Network, - "ip": model.IPAddress, - "state": model.State, - "ttl_seconds": model.TTLSeconds, - "updated_at": model.UpdatedAt, - }) - - if err := mapDBError(res.Error); err != nil { - return err - } - if res.RowsAffected == 0 { - return store.ErrNotFound - } - return nil -} - -func (s *postgresStore) UpdateSandboxState(ctx context.Context, id string, newState store.SandboxState, ipAddr *string) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: UpdateSandboxState: %w", store.ErrInvalid) - } - if id == "" { - return fmt.Errorf("postgres: UpdateSandboxState: %w", store.ErrInvalid) - } - - res := s.db.WithContext(ctx).Model(&SandboxModel{}). - Where("id = ? AND deleted_at IS NULL", id). - Updates(map[string]any{ - "state": string(newState), - "ip": copyString(ipAddr), - "updated_at": time.Now().UTC(), - }) - if err := mapDBError(res.Error); err != nil { - return err - } - if res.RowsAffected == 0 { - return store.ErrNotFound - } - return nil -} - -func (s *postgresStore) DeleteSandbox(ctx context.Context, id string) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: DeleteSandbox: %w", store.ErrInvalid) - } - if id == "" { - return fmt.Errorf("postgres: DeleteSandbox: %w", store.ErrInvalid) - } - now := time.Now().UTC() - res := s.db.WithContext(ctx).Model(&SandboxModel{}). - Where("id = ? AND deleted_at IS NULL", id). - Updates(map[string]any{ - "state": string(store.SandboxStateDestroyed), - "deleted_at": &now, - "updated_at": now, - }) - if err := mapDBError(res.Error); err != nil { - return err - } - if res.RowsAffected == 0 { - return store.ErrNotFound - } - return nil -} - -// ListExpiredSandboxes returns sandboxes that have exceeded their TTL. -// A sandbox is considered expired if: -// - TTLSeconds is set AND created_at + ttl_seconds < now -// - OR defaultTTL > 0, TTLSeconds is NULL, AND created_at + defaultTTL < now -func (s *postgresStore) ListExpiredSandboxes(ctx context.Context, defaultTTL time.Duration) ([]*store.Sandbox, error) { - now := time.Now().UTC() - defaultTTLSeconds := int(defaultTTL.Seconds()) - - // Build query for expired sandboxes in RUNNING or STARTING state - tx := s.db.WithContext(ctx).Model(&SandboxModel{}). - Where("deleted_at IS NULL"). - Where("state IN ?", []string{string(store.SandboxStateRunning), string(store.SandboxStateStarting)}) - - // Condition: either sandbox has its own TTL that's expired, or default TTL is set and sandbox has no TTL - if defaultTTLSeconds > 0 { - // With default TTL: (has TTL AND expired) OR (no TTL AND default expired) - tx = tx.Where( - "(ttl_seconds IS NOT NULL AND created_at + (ttl_seconds || ' seconds')::interval < ?) "+ - "OR (ttl_seconds IS NULL AND created_at + (? || ' seconds')::interval < ?)", - now, defaultTTLSeconds, now, - ) - } else { - // No default TTL: only check sandboxes with explicit TTL - tx = tx.Where("ttl_seconds IS NOT NULL AND created_at + (ttl_seconds || ' seconds')::interval < ?", now) - } - - var models []SandboxModel - if err := tx.Find(&models).Error; err != nil { - return nil, mapDBError(err) - } - - out := make([]*store.Sandbox, 0, len(models)) - for i := range models { - out = append(out, sandboxFromModel(&models[i])) - } - return out, nil -} - -// --- Snapshot --- - -func (s *postgresStore) CreateSnapshot(ctx context.Context, sn *store.Snapshot) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: CreateSnapshot: %w", store.ErrInvalid) - } - if sn == nil || sn.ID == "" || sn.SandboxID == "" || sn.Name == "" || sn.Ref == "" || sn.Kind == "" { - return fmt.Errorf("postgres: CreateSnapshot: %w", store.ErrInvalid) - } - if sn.CreatedAt.IsZero() { - sn.CreatedAt = time.Now().UTC() - } - if err := s.db.WithContext(ctx).Create(snapshotToModel(sn)).Error; err != nil { - return mapDBError(err) - } - return nil -} - -func (s *postgresStore) GetSnapshot(ctx context.Context, id string) (*store.Snapshot, error) { - var model SnapshotModel - if err := s.db.WithContext(ctx).Where("id = ?", id).First(&model).Error; err != nil { - return nil, mapDBError(err) - } - return snapshotFromModel(&model), nil -} - -func (s *postgresStore) GetSnapshotByName(ctx context.Context, sandboxID, name string) (*store.Snapshot, error) { - var model SnapshotModel - if err := s.db.WithContext(ctx). - Where("sandbox_id = ? AND name = ?", sandboxID, name). - First(&model).Error; err != nil { - return nil, mapDBError(err) - } - return snapshotFromModel(&model), nil -} - -func (s *postgresStore) ListSnapshots(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Snapshot, error) { - tx := s.db.WithContext(ctx).Model(&SnapshotModel{}).Where("sandbox_id = ?", sandboxID) - tx = applyListOptions(tx, opt, map[string]string{ - "created_at": "created_at", - "name": "name", - }) - - var models []SnapshotModel - if err := tx.Find(&models).Error; err != nil { - return nil, mapDBError(err) - } - out := make([]*store.Snapshot, 0, len(models)) - for i := range models { - out = append(out, snapshotFromModel(&models[i])) - } - return out, nil -} - -// --- Command --- - -func (s *postgresStore) SaveCommand(ctx context.Context, cmd *store.Command) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: SaveCommand: %w", store.ErrInvalid) - } - if cmd == nil || cmd.ID == "" || cmd.SandboxID == "" || cmd.Command == "" { - return fmt.Errorf("postgres: SaveCommand: %w", store.ErrInvalid) - } - if cmd.StartedAt.IsZero() { - cmd.StartedAt = time.Now().UTC() - } - if cmd.EndedAt.IsZero() { - cmd.EndedAt = time.Now().UTC() - } - - if err := s.db.WithContext(ctx).Create(commandToModel(cmd)).Error; err != nil { - return mapDBError(err) - } - return nil -} - -func (s *postgresStore) GetCommand(ctx context.Context, id string) (*store.Command, error) { - var model CommandModel - if err := s.db.WithContext(ctx).Where("id = ?", id).First(&model).Error; err != nil { - return nil, mapDBError(err) - } - return commandFromModel(&model), nil -} - -func (s *postgresStore) ListCommands(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Command, error) { - tx := s.db.WithContext(ctx).Model(&CommandModel{}).Where("sandbox_id = ?", sandboxID) - tx = applyListOptions(tx, opt, map[string]string{ - "started_at": "started_at", - "ended_at": "ended_at", - }) - - var models []CommandModel - if err := tx.Find(&models).Error; err != nil { - return nil, mapDBError(err) - } - out := make([]*store.Command, 0, len(models)) - for i := range models { - out = append(out, commandFromModel(&models[i])) - } - return out, nil -} - -// --- Diff --- - -func (s *postgresStore) SaveDiff(ctx context.Context, d *store.Diff) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: SaveDiff: %w", store.ErrInvalid) - } - if d == nil || d.ID == "" || d.SandboxID == "" || d.FromSnapshot == "" || d.ToSnapshot == "" { - return fmt.Errorf("postgres: SaveDiff: %w", store.ErrInvalid) - } - if d.CreatedAt.IsZero() { - d.CreatedAt = time.Now().UTC() - } - model, err := diffToModel(d) - if err != nil { - return err - } - if err := s.db.WithContext(ctx).Create(model).Error; err != nil { - return mapDBError(err) - } - return nil -} - -func (s *postgresStore) GetDiff(ctx context.Context, id string) (*store.Diff, error) { - var model DiffModel - if err := s.db.WithContext(ctx).Where("id = ?", id).First(&model).Error; err != nil { - return nil, mapDBError(err) - } - return diffFromModel(&model) -} - -func (s *postgresStore) GetDiffBySnapshots(ctx context.Context, sandboxID, fromSnapshot, toSnapshot string) (*store.Diff, error) { - var model DiffModel - if err := s.db.WithContext(ctx). - Where("sandbox_id = ? AND from_snapshot = ? AND to_snapshot = ?", sandboxID, fromSnapshot, toSnapshot). - First(&model).Error; err != nil { - return nil, mapDBError(err) - } - return diffFromModel(&model) -} - -// --- ChangeSet --- - -func (s *postgresStore) CreateChangeSet(ctx context.Context, cs *store.ChangeSet) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: CreateChangeSet: %w", store.ErrInvalid) - } - if cs == nil || cs.ID == "" || cs.JobID == "" || cs.SandboxID == "" || cs.DiffID == "" || - cs.PathAnsible == "" || cs.PathPuppet == "" { - return fmt.Errorf("postgres: CreateChangeSet: %w", store.ErrInvalid) - } - if cs.CreatedAt.IsZero() { - cs.CreatedAt = time.Now().UTC() - } - if err := s.db.WithContext(ctx).Create(changeSetToModel(cs)).Error; err != nil { - return mapDBError(err) - } - return nil -} - -func (s *postgresStore) GetChangeSet(ctx context.Context, id string) (*store.ChangeSet, error) { - var model ChangeSetModel - if err := s.db.WithContext(ctx).Where("id = ?", id).First(&model).Error; err != nil { - return nil, mapDBError(err) - } - return changeSetFromModel(&model), nil -} - -func (s *postgresStore) GetChangeSetByJob(ctx context.Context, jobID string) (*store.ChangeSet, error) { - var model ChangeSetModel - if err := s.db.WithContext(ctx).Where("job_id = ?", jobID).First(&model).Error; err != nil { - return nil, mapDBError(err) - } - return changeSetFromModel(&model), nil -} - -// --- Publication --- - -func (s *postgresStore) CreatePublication(ctx context.Context, p *store.Publication) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: CreatePublication: %w", store.ErrInvalid) - } - if p == nil || p.ID == "" || p.JobID == "" || p.RepoURL == "" || p.Branch == "" || p.Status == "" { - return fmt.Errorf("postgres: CreatePublication: %w", store.ErrInvalid) - } - now := time.Now().UTC() - if p.CreatedAt.IsZero() { - p.CreatedAt = now - } - if p.UpdatedAt.IsZero() { - p.UpdatedAt = now - } - if err := s.db.WithContext(ctx).Create(publicationToModel(p)).Error; err != nil { - return mapDBError(err) - } - return nil -} - -func (s *postgresStore) UpdatePublicationStatus(ctx context.Context, id string, status store.PublicationStatus, commitSHA, prURL, errMsg *string) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: UpdatePublicationStatus: %w", store.ErrInvalid) - } - now := time.Now().UTC() - res := s.db.WithContext(ctx).Model(&PublicationModel{}). - Where("id = ?", id). - Updates(map[string]any{ - "status": string(status), - "commit_sha": copyString(commitSHA), - "pr_url": copyString(prURL), - "error_msg": copyString(errMsg), - "updated_at": now, - }) - if err := mapDBError(res.Error); err != nil { - return err - } - if res.RowsAffected == 0 { - return store.ErrNotFound - } - return nil -} - -func (s *postgresStore) GetPublication(ctx context.Context, id string) (*store.Publication, error) { - var model PublicationModel - if err := s.db.WithContext(ctx).Where("id = ?", id).First(&model).Error; err != nil { - return nil, mapDBError(err) - } - return publicationFromModel(&model), nil -} - -// --- Playbook --- - -func (s *postgresStore) CreatePlaybook(ctx context.Context, pb *store.Playbook) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: CreatePlaybook: %w", store.ErrInvalid) - } - if pb == nil || pb.ID == "" || pb.Name == "" || pb.Hosts == "" { - return fmt.Errorf("postgres: CreatePlaybook: %w", store.ErrInvalid) - } - now := time.Now().UTC() - pb.CreatedAt = now - pb.UpdatedAt = now - - if err := s.db.WithContext(ctx).Create(playbookToModel(pb)).Error; err != nil { - return mapDBError(err) - } - return nil -} - -func (s *postgresStore) GetPlaybook(ctx context.Context, id string) (*store.Playbook, error) { - var model PlaybookModel - if err := s.db.WithContext(ctx).Where("id = ?", id).First(&model).Error; err != nil { - return nil, mapDBError(err) - } - return playbookFromModel(&model), nil -} - -func (s *postgresStore) GetPlaybookByName(ctx context.Context, name string) (*store.Playbook, error) { - var model PlaybookModel - if err := s.db.WithContext(ctx).Where("name = ?", name).First(&model).Error; err != nil { - return nil, mapDBError(err) - } - return playbookFromModel(&model), nil -} - -func (s *postgresStore) ListPlaybooks(ctx context.Context, opt *store.ListOptions) ([]*store.Playbook, error) { - tx := s.db.WithContext(ctx).Model(&PlaybookModel{}) - tx = applyListOptions(tx, opt, map[string]string{ - "created_at": "created_at", - "updated_at": "updated_at", - "name": "name", - }) - - var models []PlaybookModel - if err := tx.Find(&models).Error; err != nil { - return nil, mapDBError(err) - } - out := make([]*store.Playbook, 0, len(models)) - for i := range models { - out = append(out, playbookFromModel(&models[i])) - } - return out, nil -} - -func (s *postgresStore) UpdatePlaybook(ctx context.Context, pb *store.Playbook) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: UpdatePlaybook: %w", store.ErrInvalid) - } - if pb == nil || pb.ID == "" { - return fmt.Errorf("postgres: UpdatePlaybook: %w", store.ErrInvalid) - } - pb.UpdatedAt = time.Now().UTC() - model := playbookToModel(pb) - - res := s.db.WithContext(ctx). - Model(&PlaybookModel{}). - Where("id = ?", pb.ID). - Updates(map[string]any{ - "name": model.Name, - "hosts": model.Hosts, - "become": model.Become, - "file_path": model.FilePath, - "updated_at": model.UpdatedAt, - }) - - if err := mapDBError(res.Error); err != nil { - return err - } - if res.RowsAffected == 0 { - return store.ErrNotFound - } - return nil -} - -func (s *postgresStore) DeletePlaybook(ctx context.Context, id string) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: DeletePlaybook: %w", store.ErrInvalid) - } - if id == "" { - return fmt.Errorf("postgres: DeletePlaybook: %w", store.ErrInvalid) - } - - // Delete associated tasks first - if err := s.db.WithContext(ctx).Where("playbook_id = ?", id).Delete(&PlaybookTaskModel{}).Error; err != nil { - return mapDBError(err) - } - - res := s.db.WithContext(ctx).Where("id = ?", id).Delete(&PlaybookModel{}) - if err := mapDBError(res.Error); err != nil { - return err - } - if res.RowsAffected == 0 { - return store.ErrNotFound - } - return nil -} - -// --- PlaybookTask --- - -func (s *postgresStore) CreatePlaybookTask(ctx context.Context, task *store.PlaybookTask) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: CreatePlaybookTask: %w", store.ErrInvalid) - } - if task == nil || task.ID == "" || task.PlaybookID == "" || task.Name == "" || task.Module == "" { - return fmt.Errorf("postgres: CreatePlaybookTask: %w", store.ErrInvalid) - } - if task.CreatedAt.IsZero() { - task.CreatedAt = time.Now().UTC() - } - - model, err := playbookTaskToModel(task) - if err != nil { - return err - } - if err := s.db.WithContext(ctx).Create(model).Error; err != nil { - return mapDBError(err) - } - return nil -} - -func (s *postgresStore) GetPlaybookTask(ctx context.Context, id string) (*store.PlaybookTask, error) { - var model PlaybookTaskModel - if err := s.db.WithContext(ctx).Where("id = ?", id).First(&model).Error; err != nil { - return nil, mapDBError(err) - } - return playbookTaskFromModel(&model) -} - -func (s *postgresStore) ListPlaybookTasks(ctx context.Context, playbookID string, opt *store.ListOptions) ([]*store.PlaybookTask, error) { - tx := s.db.WithContext(ctx).Model(&PlaybookTaskModel{}).Where("playbook_id = ?", playbookID) - - // Default ordering by position - if opt == nil || opt.OrderBy == "" { - tx = tx.Order("position ASC") - } else { - tx = applyListOptions(tx, opt, map[string]string{ - "position": "position", - "created_at": "created_at", - "name": "name", - }) - } - - if opt != nil && opt.Limit > 0 { - tx = tx.Limit(opt.Limit) - if opt.Offset > 0 { - tx = tx.Offset(opt.Offset) - } - } - - var models []PlaybookTaskModel - if err := tx.Find(&models).Error; err != nil { - return nil, mapDBError(err) - } - out := make([]*store.PlaybookTask, 0, len(models)) - for i := range models { - task, err := playbookTaskFromModel(&models[i]) - if err != nil { - return nil, err - } - out = append(out, task) - } - return out, nil -} - -func (s *postgresStore) UpdatePlaybookTask(ctx context.Context, task *store.PlaybookTask) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: UpdatePlaybookTask: %w", store.ErrInvalid) - } - if task == nil || task.ID == "" { - return fmt.Errorf("postgres: UpdatePlaybookTask: %w", store.ErrInvalid) - } - - model, err := playbookTaskToModel(task) - if err != nil { - return err - } - - res := s.db.WithContext(ctx). - Model(&PlaybookTaskModel{}). - Where("id = ?", task.ID). - Updates(map[string]any{ - "name": model.Name, - "module": model.Module, - "params": model.Params, - "position": model.Position, - }) - - if err := mapDBError(res.Error); err != nil { - return err - } - if res.RowsAffected == 0 { - return store.ErrNotFound - } - return nil -} - -func (s *postgresStore) DeletePlaybookTask(ctx context.Context, id string) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: DeletePlaybookTask: %w", store.ErrInvalid) - } - if id == "" { - return fmt.Errorf("postgres: DeletePlaybookTask: %w", store.ErrInvalid) - } - - res := s.db.WithContext(ctx).Where("id = ?", id).Delete(&PlaybookTaskModel{}) - if err := mapDBError(res.Error); err != nil { - return err - } - if res.RowsAffected == 0 { - return store.ErrNotFound - } - return nil -} - -func (s *postgresStore) ReorderPlaybookTasks(ctx context.Context, playbookID string, taskIDs []string) error { - if s.conf.ReadOnly { - return fmt.Errorf("postgres: ReorderPlaybookTasks: %w", store.ErrInvalid) - } - if playbookID == "" || len(taskIDs) == 0 { - return fmt.Errorf("postgres: ReorderPlaybookTasks: %w", store.ErrInvalid) - } - - return s.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error { - for i, taskID := range taskIDs { - res := tx.Model(&PlaybookTaskModel{}). - Where("id = ? AND playbook_id = ?", taskID, playbookID). - Update("position", i) - if res.Error != nil { - return mapDBError(res.Error) - } - if res.RowsAffected == 0 { - return fmt.Errorf("task %s not found in playbook %s", taskID, playbookID) - } - } - return nil - }) -} - -func (s *postgresStore) GetNextTaskPosition(ctx context.Context, playbookID string) (int, error) { - var maxPos *int - err := s.db.WithContext(ctx). - Model(&PlaybookTaskModel{}). - Where("playbook_id = ?", playbookID). - Select("MAX(position)"). - Scan(&maxPos).Error - if err != nil { - return 0, mapDBError(err) - } - if maxPos == nil { - return 0, nil - } - return *maxPos + 1, nil -} - -// --- Migration --- - -func (s *postgresStore) autoMigrate(ctx context.Context) error { - if err := s.db.WithContext(ctx).AutoMigrate( - &SandboxModel{}, - &SnapshotModel{}, - &CommandModel{}, - &DiffModel{}, - &ChangeSetModel{}, - &PublicationModel{}, - &PlaybookModel{}, - &PlaybookTaskModel{}, - ); err != nil { - return err - } - - // Create partial unique index on sandbox_name for non-deleted rows only. - // This allows reusing sandbox names after soft-delete. - // We use CREATE INDEX IF NOT EXISTS to be idempotent. - if err := s.db.WithContext(ctx).Exec(` - DROP INDEX IF EXISTS idx_sandboxes_sandbox_name; - CREATE UNIQUE INDEX IF NOT EXISTS idx_sandbox_name_active - ON sandboxes (sandbox_name) - WHERE deleted_at IS NULL - `).Error; err != nil { - return fmt.Errorf("create partial unique index: %w", err) - } - - return nil -} - -// --- Models & Converters --- - -type SandboxModel struct { - ID string `gorm:"primaryKey;column:id"` - JobID string `gorm:"column:job_id;not null;index"` - AgentID string `gorm:"column:agent_id;not null;index"` - SandboxName string `gorm:"column:sandbox_name;not null"` // Partial unique index created in autoMigrate - BaseImage string `gorm:"column:base_image;not null;index"` - Network string `gorm:"column:network;not null"` - IPAddress *string `gorm:"column:ip"` - State string `gorm:"column:state;not null;index"` - TTLSeconds *int `gorm:"column:ttl_seconds"` - CreatedAt time.Time `gorm:"column:created_at;not null"` - UpdatedAt time.Time `gorm:"column:updated_at;not null"` - DeletedAt *time.Time `gorm:"column:deleted_at;index"` -} - -func (SandboxModel) TableName() string { return "sandboxes" } - -type SnapshotModel struct { - ID string `gorm:"primaryKey;column:id"` - SandboxID string `gorm:"column:sandbox_id;not null;index;index:idx_snapshots_sandbox_name,unique"` - Name string `gorm:"column:name;not null;index:idx_snapshots_sandbox_name,unique"` - Kind string `gorm:"column:kind;not null"` - Ref string `gorm:"column:ref;not null"` - CreatedAt time.Time `gorm:"column:created_at;not null"` - MetaJSON *string `gorm:"column:meta_json;type:jsonb"` -} - -func (SnapshotModel) TableName() string { return "snapshots" } - -type CommandModel struct { - ID string `gorm:"primaryKey;column:id"` - SandboxID string `gorm:"column:sandbox_id;not null;index"` - Command string `gorm:"column:command;not null"` - EnvJSON *string `gorm:"column:env_json;type:jsonb"` - Stdout string `gorm:"column:stdout;not null"` - Stderr string `gorm:"column:stderr;not null"` - ExitCode int `gorm:"column:exit_code;not null"` - StartedAt time.Time `gorm:"column:started_at;not null;index"` - EndedAt time.Time `gorm:"column:ended_at;not null"` -} - -func (CommandModel) TableName() string { return "commands" } - -type DiffModel struct { - ID string `gorm:"primaryKey;column:id"` - SandboxID string `gorm:"column:sandbox_id;not null;index;index:idx_diffs_sandbox_snapshots,unique"` - FromSnapshot string `gorm:"column:from_snapshot;not null;index:idx_diffs_sandbox_snapshots,unique"` - ToSnapshot string `gorm:"column:to_snapshot;not null;index:idx_diffs_sandbox_snapshots,unique"` - DiffJSON datatypes.JSON `gorm:"column:diff_json;type:jsonb;not null"` - CreatedAt time.Time `gorm:"column:created_at;not null"` -} - -func (DiffModel) TableName() string { return "diffs" } - -type ChangeSetModel struct { - ID string `gorm:"primaryKey;column:id"` - JobID string `gorm:"column:job_id;not null;uniqueIndex"` - SandboxID string `gorm:"column:sandbox_id;not null;index"` - DiffID string `gorm:"column:diff_id;not null;index"` - PathAnsible string `gorm:"column:path_ansible;not null"` - PathPuppet string `gorm:"column:path_puppet;not null"` - MetaJSON *string `gorm:"column:meta_json;type:jsonb"` - CreatedAt time.Time `gorm:"column:created_at;not null"` -} - -func (ChangeSetModel) TableName() string { return "changesets" } - -type PublicationModel struct { - ID string `gorm:"primaryKey;column:id"` - JobID string `gorm:"column:job_id;not null;index"` - RepoURL string `gorm:"column:repo_url;not null"` - Branch string `gorm:"column:branch;not null"` - CommitSHA *string `gorm:"column:commit_sha"` - PRURL *string `gorm:"column:pr_url"` - Status string `gorm:"column:status;not null;index"` - ErrorMsg *string `gorm:"column:error_msg"` - CreatedAt time.Time `gorm:"column:created_at;not null"` - UpdatedAt time.Time `gorm:"column:updated_at;not null"` -} - -func (PublicationModel) TableName() string { return "publications" } - -type PlaybookModel struct { - ID string `gorm:"primaryKey;column:id"` - Name string `gorm:"column:name;not null;uniqueIndex"` - Hosts string `gorm:"column:hosts;not null"` - Become bool `gorm:"column:become;not null;default:false"` - FilePath *string `gorm:"column:file_path"` - CreatedAt time.Time `gorm:"column:created_at;not null"` - UpdatedAt time.Time `gorm:"column:updated_at;not null"` -} - -func (PlaybookModel) TableName() string { return "playbooks" } - -type PlaybookTaskModel struct { - ID string `gorm:"primaryKey;column:id"` - PlaybookID string `gorm:"column:playbook_id;not null;index"` - Position int `gorm:"column:position;not null;index"` - Name string `gorm:"column:name;not null"` - Module string `gorm:"column:module;not null"` - Params datatypes.JSON `gorm:"column:params;type:jsonb;not null"` - CreatedAt time.Time `gorm:"column:created_at;not null"` -} - -func (PlaybookTaskModel) TableName() string { return "playbook_tasks" } - -func sandboxToModel(sb *store.Sandbox) *SandboxModel { - return &SandboxModel{ - ID: sb.ID, - JobID: sb.JobID, - AgentID: sb.AgentID, - SandboxName: sb.SandboxName, - BaseImage: sb.BaseImage, - Network: sb.Network, - IPAddress: copyString(sb.IPAddress), - State: string(sb.State), - TTLSeconds: copyInt(sb.TTLSeconds), - CreatedAt: sb.CreatedAt, - UpdatedAt: sb.UpdatedAt, - DeletedAt: copyTime(sb.DeletedAt), - } -} - -func sandboxFromModel(m *SandboxModel) *store.Sandbox { - return &store.Sandbox{ - ID: m.ID, - JobID: m.JobID, - AgentID: m.AgentID, - SandboxName: m.SandboxName, - BaseImage: m.BaseImage, - Network: m.Network, - IPAddress: copyString(m.IPAddress), - State: store.SandboxState(m.State), - TTLSeconds: copyInt(m.TTLSeconds), - CreatedAt: m.CreatedAt, - UpdatedAt: m.UpdatedAt, - DeletedAt: copyTime(m.DeletedAt), - } -} - -func snapshotToModel(sn *store.Snapshot) *SnapshotModel { - return &SnapshotModel{ - ID: sn.ID, - SandboxID: sn.SandboxID, - Name: sn.Name, - Kind: string(sn.Kind), - Ref: sn.Ref, - CreatedAt: sn.CreatedAt, - MetaJSON: copyString(sn.MetaJSON), - } -} - -func snapshotFromModel(m *SnapshotModel) *store.Snapshot { - return &store.Snapshot{ - ID: m.ID, - SandboxID: m.SandboxID, - Name: m.Name, - Kind: store.SnapshotKind(m.Kind), - Ref: m.Ref, - CreatedAt: m.CreatedAt, - MetaJSON: copyString(m.MetaJSON), - } -} - -func commandToModel(cmd *store.Command) *CommandModel { - return &CommandModel{ - ID: cmd.ID, - SandboxID: cmd.SandboxID, - Command: cmd.Command, - EnvJSON: copyString(cmd.EnvJSON), - Stdout: cmd.Stdout, - Stderr: cmd.Stderr, - ExitCode: cmd.ExitCode, - StartedAt: cmd.StartedAt, - EndedAt: cmd.EndedAt, - } -} - -func commandFromModel(m *CommandModel) *store.Command { - return &store.Command{ - ID: m.ID, - SandboxID: m.SandboxID, - Command: m.Command, - EnvJSON: copyString(m.EnvJSON), - Stdout: m.Stdout, - Stderr: m.Stderr, - ExitCode: m.ExitCode, - StartedAt: m.StartedAt, - EndedAt: m.EndedAt, - } -} - -func diffToModel(d *store.Diff) (*DiffModel, error) { - payload, err := json.Marshal(d.DiffJSON) - if err != nil { - return nil, fmt.Errorf("postgres: marshal diff_json: %w", err) - } - return &DiffModel{ - ID: d.ID, - SandboxID: d.SandboxID, - FromSnapshot: d.FromSnapshot, - ToSnapshot: d.ToSnapshot, - DiffJSON: datatypes.JSON(payload), - CreatedAt: d.CreatedAt, - }, nil -} - -func diffFromModel(m *DiffModel) (*store.Diff, error) { - var diff store.Diff - diff.ID = m.ID - diff.SandboxID = m.SandboxID - diff.FromSnapshot = m.FromSnapshot - diff.ToSnapshot = m.ToSnapshot - diff.CreatedAt = m.CreatedAt - if err := json.Unmarshal([]byte(m.DiffJSON), &diff.DiffJSON); err != nil { - return nil, fmt.Errorf("postgres: unmarshal diff_json: %w", err) - } - return &diff, nil -} - -func changeSetToModel(cs *store.ChangeSet) *ChangeSetModel { - return &ChangeSetModel{ - ID: cs.ID, - JobID: cs.JobID, - SandboxID: cs.SandboxID, - DiffID: cs.DiffID, - PathAnsible: cs.PathAnsible, - PathPuppet: cs.PathPuppet, - MetaJSON: copyString(cs.MetaJSON), - CreatedAt: cs.CreatedAt, - } -} - -func changeSetFromModel(m *ChangeSetModel) *store.ChangeSet { - return &store.ChangeSet{ - ID: m.ID, - JobID: m.JobID, - SandboxID: m.SandboxID, - DiffID: m.DiffID, - PathAnsible: m.PathAnsible, - PathPuppet: m.PathPuppet, - MetaJSON: copyString(m.MetaJSON), - CreatedAt: m.CreatedAt, - } -} - -func publicationToModel(p *store.Publication) *PublicationModel { - return &PublicationModel{ - ID: p.ID, - JobID: p.JobID, - RepoURL: p.RepoURL, - Branch: p.Branch, - CommitSHA: copyString(p.CommitSHA), - PRURL: copyString(p.PRURL), - Status: string(p.Status), - ErrorMsg: copyString(p.ErrorMsg), - CreatedAt: p.CreatedAt, - UpdatedAt: p.UpdatedAt, - } -} - -func publicationFromModel(m *PublicationModel) *store.Publication { - return &store.Publication{ - ID: m.ID, - JobID: m.JobID, - RepoURL: m.RepoURL, - Branch: m.Branch, - CommitSHA: copyString(m.CommitSHA), - PRURL: copyString(m.PRURL), - Status: store.PublicationStatus(m.Status), - ErrorMsg: copyString(m.ErrorMsg), - CreatedAt: m.CreatedAt, - UpdatedAt: m.UpdatedAt, - } -} - -func playbookToModel(pb *store.Playbook) *PlaybookModel { - return &PlaybookModel{ - ID: pb.ID, - Name: pb.Name, - Hosts: pb.Hosts, - Become: pb.Become, - FilePath: copyString(pb.FilePath), - CreatedAt: pb.CreatedAt, - UpdatedAt: pb.UpdatedAt, - } -} - -func playbookFromModel(m *PlaybookModel) *store.Playbook { - return &store.Playbook{ - ID: m.ID, - Name: m.Name, - Hosts: m.Hosts, - Become: m.Become, - FilePath: copyString(m.FilePath), - CreatedAt: m.CreatedAt, - UpdatedAt: m.UpdatedAt, - } -} - -func playbookTaskToModel(task *store.PlaybookTask) (*PlaybookTaskModel, error) { - params, err := json.Marshal(task.Params) - if err != nil { - return nil, fmt.Errorf("postgres: marshal task params: %w", err) - } - return &PlaybookTaskModel{ - ID: task.ID, - PlaybookID: task.PlaybookID, - Position: task.Position, - Name: task.Name, - Module: task.Module, - Params: datatypes.JSON(params), - CreatedAt: task.CreatedAt, - }, nil -} - -func playbookTaskFromModel(m *PlaybookTaskModel) (*store.PlaybookTask, error) { - var params map[string]any - if len(m.Params) > 0 { - if err := json.Unmarshal([]byte(m.Params), ¶ms); err != nil { - return nil, fmt.Errorf("postgres: unmarshal task params: %w", err) - } - } - return &store.PlaybookTask{ - ID: m.ID, - PlaybookID: m.PlaybookID, - Position: m.Position, - Name: m.Name, - Module: m.Module, - Params: params, - CreatedAt: m.CreatedAt, - }, nil -} - -// --- Helpers --- - -func applyListOptions(tx *gorm.DB, opt *store.ListOptions, whitelist map[string]string) *gorm.DB { - orderApplied := false - if opt != nil { - if col, ok := whitelist[opt.OrderBy]; ok { - dir := "DESC" - if opt.Asc { - dir = "ASC" - } - tx = tx.Order(fmt.Sprintf("%s %s", col, dir)) - orderApplied = true - } - if opt.Limit > 0 { - tx = tx.Limit(opt.Limit) - if opt.Offset > 0 { - tx = tx.Offset(opt.Offset) - } - } - } - if !orderApplied { - tx = tx.Order("created_at DESC") - } - return tx -} - -func copyString(src *string) *string { - if src == nil { - return nil - } - val := *src - return &val -} - -func copyInt(src *int) *int { - if src == nil { - return nil - } - val := *src - return &val -} - -func copyTime(src *time.Time) *time.Time { - if src == nil { - return nil - } - val := *src - return &val -} - -func mapDBError(err error) error { - if err == nil { - return nil - } - if errors.Is(err, gorm.ErrRecordNotFound) { - return store.ErrNotFound - } - if errors.Is(err, gorm.ErrDuplicatedKey) { - return store.ErrAlreadyExists - } - var pgErr *pgconn.PgError - if errors.As(err, &pgErr) { - switch pgErr.Code { - case "23505": - return store.ErrAlreadyExists - case "23503": - return store.ErrInvalid - } - } - return err -} diff --git a/fluid-remote/internal/store/store.go b/fluid-remote/internal/store/store.go deleted file mode 100755 index 1489ecc3..00000000 --- a/fluid-remote/internal/store/store.go +++ /dev/null @@ -1,319 +0,0 @@ -package store - -import ( - "context" - "errors" - "time" -) - -// Domain model and persistence contracts for the VM sandbox system. -// This package declares the data structures persisted in the DB and the -// storage interfaces that concrete implementations (SQLite/Postgres) must provide. - -// Config describes database-related configuration for a Store implementation. -type Config struct { - // DatabaseURL is the DSN/URL used to connect to the database. - // Examples: - // - Postgres: postgres://user:pass@host:5432/dbname?sslmode=disable - DatabaseURL string `json:"database_url"` - - // MaxOpenConns sets the maximum number of open connections to the database. - MaxOpenConns int `json:"max_open_conns"` - - // MaxIdleConns sets the maximum number of connections in the idle connection pool. - MaxIdleConns int `json:"max_idle_conns"` - - // ConnMaxLifetime sets the maximum amount of time a connection may be reused. - ConnMaxLifetime time.Duration `json:"conn_max_lifetime"` - - // AutoMigrate, when true, allows the store to create/update schema automatically. - AutoMigrate bool `json:"auto_migrate"` - - // ReadOnly, when true, disallows mutating operations. - ReadOnly bool `json:"read_only"` -} - -// ListOptions supports pagination and ordering for list operations. -type ListOptions struct { - Limit int // Max records to return (0 = default/backend-defined) - Offset int // Records to skip - OrderBy string // Column to order by (implementation should whitelist) - Asc bool // Ascending if true, descending if false -} - -// Common sentinel errors for store implementations. -var ( - ErrNotFound = errors.New("store: not found") - ErrAlreadyExists = errors.New("store: already exists") - ErrConflict = errors.New("store: conflict") - ErrInvalid = errors.New("store: invalid data") -) - -// SandboxState enumerates lifecycle states for a sandbox VM. -type SandboxState string - -const ( - SandboxStateCreated SandboxState = "CREATED" - SandboxStateStarting SandboxState = "STARTING" - SandboxStateRunning SandboxState = "RUNNING" - SandboxStateStopped SandboxState = "STOPPED" - SandboxStateDestroyed SandboxState = "DESTROYED" - SandboxStateError SandboxState = "ERROR" -) - -// SnapshotKind describes how a snapshot is taken/stored. -type SnapshotKind string - -const ( - // SnapshotKindInternal refers to libvirt/qemu internal snapshot (domain-managed). - SnapshotKindInternal SnapshotKind = "INTERNAL" - // SnapshotKindExternal refers to external snapshot (file/overlay). - SnapshotKindExternal SnapshotKind = "EXTERNAL" -) - -// PublicationStatus tracks GitOps publishing lifecycle. -type PublicationStatus string - -const ( - PublicationStatusPending PublicationStatus = "PENDING" - PublicationStatusCommitted PublicationStatus = "COMMITTED" - PublicationStatusPRCreated PublicationStatus = "PR_CREATED" - PublicationStatusMerged PublicationStatus = "MERGED" - PublicationStatusFailed PublicationStatus = "FAILED" -) - -// Sandbox represents a disposable VM environment cloned from a golden image. -type Sandbox struct { - ID string `json:"id" db:"id"` // e.g., "SBX-0001" - JobID string `json:"job_id" db:"job_id"` // correlation id for the end-to-end change set - AgentID string `json:"agent_id" db:"agent_id"` // requesting agent identity - SandboxName string `json:"sandbox_name" db:"sandbox_name"` // libvirt domain name - BaseImage string `json:"base_image" db:"base_image"` // base qcow2 filename - Network string `json:"network" db:"network"` // libvirt network name - IPAddress *string `json:"ip_address,omitempty" db:"ip"` // discovered IP (if any) - State SandboxState `json:"state" db:"state"` - TTLSeconds *int `json:"ttl_seconds,omitempty" db:"ttl_seconds"` // optional TTL for auto GC - - // Multi-host support: identifies which libvirt host this sandbox runs on - HostName *string `json:"host_name,omitempty" db:"host_name"` // display name of the host (e.g., "kvm-01") - HostAddress *string `json:"host_address,omitempty" db:"host_address"` // IP or hostname of the libvirt host - - // Metadata - CreatedAt time.Time `json:"created_at" db:"created_at"` - UpdatedAt time.Time `json:"updated_at" db:"updated_at"` - DeletedAt *time.Time `json:"deleted_at,omitempty" db:"deleted_at"` -} - -// SandboxFilter enables scoped queries for sandboxes. -type SandboxFilter struct { - AgentID *string - JobID *string - BaseImage *string - State *SandboxState - VMName *string -} - -// Snapshot represents a VM snapshot reference. -type Snapshot struct { - ID string `json:"id" db:"id"` - SandboxID string `json:"sandbox_id" db:"sandbox_id"` - Name string `json:"name" db:"name"` // logical name (unique per sandbox) - Kind SnapshotKind `json:"kind" db:"kind"` - // Ref is a backend-specific reference: for internal snapshots this could be a UUID or name, - // for external snapshots it could be a file path to the overlay qcow2. - Ref string `json:"ref" db:"ref"` - CreatedAt time.Time `json:"created_at" db:"created_at"` - MetaJSON *string `json:"meta_json,omitempty" db:"meta_json"` // optional JSON metadata -} - -// Command captures an executed command inside a sandbox. -type Command struct { - ID string `json:"id" db:"id"` - SandboxID string `json:"sandbox_id" db:"sandbox_id"` - Command string `json:"command" db:"command"` - EnvJSON *string `json:"env_json,omitempty" db:"env_json"` // JSON-encoded env map - Stdout string `json:"stdout" db:"stdout"` - Stderr string `json:"stderr" db:"stderr"` - ExitCode int `json:"exit_code" db:"exit_code"` - StartedAt time.Time `json:"started_at" db:"started_at"` - EndedAt time.Time `json:"ended_at" db:"ended_at"` - Metadata *CommandExecRecord `json:"metadata,omitempty" db:"-"` -} - -// CommandExecRecord is a non-persisted helper payload commonly serialized into Metadata fields. -// It can be persisted by serializing to JSON and storing in an auxiliary column if desired. -type CommandExecRecord struct { - User string `json:"user,omitempty"` - WorkDir string `json:"work_dir,omitempty"` - Timeout *time.Duration `json:"timeout,omitempty"` - Redacted map[string]string `json:"redacted,omitempty"` // placeholders for secrets redaction -} - -// Diff represents a computed difference between two snapshots of a sandbox. -type Diff struct { - ID string `json:"id" db:"id"` - SandboxID string `json:"sandbox_id" db:"sandbox_id"` - FromSnapshot string `json:"from_snapshot" db:"from_snapshot"` - ToSnapshot string `json:"to_snapshot" db:"to_snapshot"` - DiffJSON ChangeDiff `json:"diff_json" db:"diff_json"` // JSON-encoded change diff - CreatedAt time.Time `json:"created_at" db:"created_at"` -} - -// PackageInfo captures package name and version. -type PackageInfo struct { - Name string `json:"name"` - Version string `json:"version,omitempty"` -} - -// ServiceChange represents a system service change. -type ServiceChange struct { - Name string `json:"name"` - Enabled *bool `json:"enabled,omitempty"` - State string `json:"state,omitempty"` // started|stopped|restarted|reloaded -} - -// CommandSummary summarizes executed commands affecting the diff. -type CommandSummary struct { - Cmd string `json:"cmd"` - ExitCode int `json:"exit_code"` - At time.Time `json:"at"` -} - -// ChangeDiff is the normalized change representation generated by diffing snapshots. -type ChangeDiff struct { - FilesModified []string `json:"files_modified,omitempty"` - FilesAdded []string `json:"files_added,omitempty"` - FilesRemoved []string `json:"files_removed,omitempty"` - PackagesAdded []PackageInfo `json:"packages_added,omitempty"` - PackagesRemoved []PackageInfo `json:"packages_removed,omitempty"` - ServicesChanged []ServiceChange `json:"services_changed,omitempty"` - CommandsRun []CommandSummary `json:"commands_run,omitempty"` -} - -// ChangeSet captures generator outputs (Ansible/Puppet) for a job. -type ChangeSet struct { - ID string `json:"id" db:"id"` - JobID string `json:"job_id" db:"job_id"` - SandboxID string `json:"sandbox_id" db:"sandbox_id"` - DiffID string `json:"diff_id" db:"diff_id"` - PathAnsible string `json:"path_ansible" db:"path_ansible"` // e.g., /changes/{job_id}/ansible - PathPuppet string `json:"path_puppet" db:"path_puppet"` // e.g., /changes/{job_id}/puppet - MetaJSON *string `json:"meta_json,omitempty" db:"meta_json"` - CreatedAt time.Time `json:"created_at" db:"created_at"` -} - -// Publication records a GitOps publication attempt and status. -type Publication struct { - ID string `json:"id" db:"id"` - JobID string `json:"job_id" db:"job_id"` - RepoURL string `json:"repo_url" db:"repo_url"` - Branch string `json:"branch" db:"branch"` - CommitSHA *string `json:"commit_sha,omitempty" db:"commit_sha"` - PRURL *string `json:"pr_url,omitempty" db:"pr_url"` - Status PublicationStatus `json:"status" db:"status"` - ErrorMsg *string `json:"error_msg,omitempty" db:"error_msg"` - CreatedAt time.Time `json:"created_at" db:"created_at"` - UpdatedAt time.Time `json:"updated_at" db:"updated_at"` -} - -// Playbook represents an Ansible playbook stored in the database. -type Playbook struct { - ID string `json:"id" db:"id"` - Name string `json:"name" db:"name"` // unique playbook name - Hosts string `json:"hosts" db:"hosts"` // target hosts pattern (e.g., "all", "webservers") - Become bool `json:"become" db:"become"` // whether to use privilege escalation - FilePath *string `json:"file_path,omitempty" db:"file_path"` // rendered YAML file path - CreatedAt time.Time `json:"created_at" db:"created_at"` - UpdatedAt time.Time `json:"updated_at" db:"updated_at"` -} - -// PlaybookTask represents a single task within an Ansible playbook. -type PlaybookTask struct { - ID string `json:"id" db:"id"` - PlaybookID string `json:"playbook_id" db:"playbook_id"` - Position int `json:"position" db:"position"` // ordering within playbook - Name string `json:"name" db:"name"` // task name/description - Module string `json:"module" db:"module"` // ansible module (apt, shell, copy, etc.) - Params map[string]any `json:"params" db:"params" swaggertype:"object"` // module-specific parameters - CreatedAt time.Time `json:"created_at" db:"created_at"` -} - -// DataStore declares data operations. This is transaction-friendly and -// can be implemented by both the root Store and a transactional context. -type DataStore interface { - // Sandbox - CreateSandbox(ctx context.Context, sb *Sandbox) error - GetSandbox(ctx context.Context, id string) (*Sandbox, error) - GetSandboxByVMName(ctx context.Context, vmName string) (*Sandbox, error) - ListSandboxes(ctx context.Context, filter SandboxFilter, opt *ListOptions) ([]*Sandbox, error) - // ListExpiredSandboxes returns sandboxes that have exceeded their TTL. - // It checks for sandboxes in RUNNING or STARTING state where: - // - TTLSeconds is set AND created_at + ttl_seconds < now - // - OR defaultTTL > 0, TTLSeconds is NULL, AND created_at + defaultTTL < now - ListExpiredSandboxes(ctx context.Context, defaultTTL time.Duration) ([]*Sandbox, error) - UpdateSandbox(ctx context.Context, sb *Sandbox) error - UpdateSandboxState(ctx context.Context, id string, newState SandboxState, ipAddr *string) error - DeleteSandbox(ctx context.Context, id string) error - - // Snapshot - CreateSnapshot(ctx context.Context, sn *Snapshot) error - GetSnapshot(ctx context.Context, id string) (*Snapshot, error) - GetSnapshotByName(ctx context.Context, sandboxID, name string) (*Snapshot, error) - ListSnapshots(ctx context.Context, sandboxID string, opt *ListOptions) ([]*Snapshot, error) - - // Command - SaveCommand(ctx context.Context, cmd *Command) error - GetCommand(ctx context.Context, id string) (*Command, error) - ListCommands(ctx context.Context, sandboxID string, opt *ListOptions) ([]*Command, error) - - // Diff - SaveDiff(ctx context.Context, d *Diff) error - GetDiff(ctx context.Context, id string) (*Diff, error) - GetDiffBySnapshots(ctx context.Context, sandboxID, fromSnapshot, toSnapshot string) (*Diff, error) - - // ChangeSet - CreateChangeSet(ctx context.Context, cs *ChangeSet) error - GetChangeSet(ctx context.Context, id string) (*ChangeSet, error) - GetChangeSetByJob(ctx context.Context, jobID string) (*ChangeSet, error) - - // Publication - CreatePublication(ctx context.Context, p *Publication) error - UpdatePublicationStatus(ctx context.Context, id string, status PublicationStatus, commitSHA, prURL, errMsg *string) error - GetPublication(ctx context.Context, id string) (*Publication, error) - - // Playbook - CreatePlaybook(ctx context.Context, pb *Playbook) error - GetPlaybook(ctx context.Context, id string) (*Playbook, error) - GetPlaybookByName(ctx context.Context, name string) (*Playbook, error) - ListPlaybooks(ctx context.Context, opt *ListOptions) ([]*Playbook, error) - UpdatePlaybook(ctx context.Context, pb *Playbook) error - DeletePlaybook(ctx context.Context, id string) error - - // PlaybookTask - CreatePlaybookTask(ctx context.Context, task *PlaybookTask) error - GetPlaybookTask(ctx context.Context, id string) (*PlaybookTask, error) - ListPlaybookTasks(ctx context.Context, playbookID string, opt *ListOptions) ([]*PlaybookTask, error) - UpdatePlaybookTask(ctx context.Context, task *PlaybookTask) error - DeletePlaybookTask(ctx context.Context, id string) error - ReorderPlaybookTasks(ctx context.Context, playbookID string, taskIDs []string) error - GetNextTaskPosition(ctx context.Context, playbookID string) (int, error) -} - -// Store is the root database handle. It can produce transactional views and -// exposes liveness and lifecycle methods in addition to the DataStore. -type Store interface { - DataStore - - // Config returns the configuration the store was created with. - Config() Config - - // Ping verifies DB connectivity/health. - Ping(ctx context.Context) error - - // WithTx runs fn in a transaction. The provided DataStore must be used for - // all DB calls within fn and is committed if fn returns nil, rolled back otherwise. - WithTx(ctx context.Context, fn func(tx DataStore) error) error - - // Close releases resources held by the Store. - Close() error -} diff --git a/fluid-remote/internal/telemetry/telemetry.go b/fluid-remote/internal/telemetry/telemetry.go deleted file mode 100644 index b66a7358..00000000 --- a/fluid-remote/internal/telemetry/telemetry.go +++ /dev/null @@ -1,81 +0,0 @@ -package telemetry - -import ( - "runtime" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/config" - - "github.com/google/uuid" - "github.com/posthog/posthog-go" -) - -// posthogAPIKey is the PostHog API key. By default uses dev key. -// Override at build time with: -ldflags "-X github.com/aspectrr/fluid.sh/fluid-remote/internal/telemetry.posthogAPIKey=YOUR_KEY" -var posthogAPIKey = "phc_nZdxqaqWmZhHpWPIsUFqmwtr9WfyYaae0IOdRmh8YGT" - -// Service defines the interface for telemetry operations. -type Service interface { - Track(event string, properties map[string]any) - Close() -} - -// NoopService is a telemetry service that does nothing. -// Use this when telemetry is disabled or initialization fails. -type NoopService struct{} - -func (s *NoopService) Track(event string, properties map[string]any) {} -func (s *NoopService) Close() {} - -// NewNoopService returns a telemetry service that does nothing. -// Use this as a fallback when telemetry initialization fails -// or when you explicitly want to disable telemetry. -func NewNoopService() Service { - return &NoopService{} -} - -type posthogService struct { - client posthog.Client - distinctID string -} - -// NewService creates a new telemetry service based on configuration. -func NewService(cfg config.TelemetryConfig) (Service, error) { - if !cfg.EnableAnonymousUsage { - return &NoopService{}, nil - } - - client, err := posthog.NewWithConfig(posthogAPIKey, posthog.Config{Endpoint: "https://nautilus.fluid.sh"}) - if err != nil { - return nil, err - } - - // Generate a unique ID for this session. - // In a real application, you might want to persist this ID. - distinctID := uuid.New().String() - - return &posthogService{ - client: client, - distinctID: distinctID, - }, nil -} - -func (s *posthogService) Track(event string, properties map[string]any) { - if properties == nil { - properties = make(map[string]any) - } - - // Add common properties - properties["os"] = runtime.GOOS - properties["arch"] = runtime.GOARCH - properties["go_version"] = runtime.Version() - - _ = s.client.Enqueue(posthog.Capture{ - DistinctId: s.distinctID, - Event: event, - Properties: properties, - }) -} - -func (s *posthogService) Close() { - _ = s.client.Close() -} diff --git a/fluid-remote/internal/telemetry/telemetry_test.go b/fluid-remote/internal/telemetry/telemetry_test.go deleted file mode 100644 index 1bfc00f8..00000000 --- a/fluid-remote/internal/telemetry/telemetry_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package telemetry - -import ( - "testing" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/config" -) - -func TestNewNoopService(t *testing.T) { - svc := NewNoopService() - if svc == nil { - t.Fatal("expected non-nil service") - } - - // Verify it implements Service interface - _ = svc - - // NoopService should accept calls without panicking - svc.Track("test_event", nil) - svc.Track("test_event", map[string]any{"key": "value"}) - svc.Close() -} - -func TestNewServiceDisabled(t *testing.T) { - cfg := config.TelemetryConfig{ - EnableAnonymousUsage: false, - } - - svc, err := NewService(cfg) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if svc == nil { - t.Fatal("expected non-nil service") - } - - // Should return a NoopService when disabled - if _, ok := svc.(*NoopService); !ok { - t.Errorf("expected *NoopService, got %T", svc) - } - - // Should work without panicking - svc.Track("test_event", nil) - svc.Close() -} - -func TestNoopServiceMethods(t *testing.T) { - svc := &NoopService{} - - // Track should not panic with nil properties - svc.Track("event", nil) - - // Track should not panic with properties - svc.Track("event", map[string]any{ - "string": "value", - "int": 42, - "float": 3.14, - "bool": true, - "nested": map[string]any{"inner": "value"}, - }) - - // Close should not panic - svc.Close() -} diff --git a/fluid-remote/internal/vm/service.go b/fluid-remote/internal/vm/service.go deleted file mode 100755 index 3eb5273d..00000000 --- a/fluid-remote/internal/vm/service.go +++ /dev/null @@ -1,1483 +0,0 @@ -package vm - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "log/slog" - "os" - "os/exec" - "strings" - "time" - - "github.com/google/uuid" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/config" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/libvirt" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/sshkeys" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/store" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/telemetry" -) - -// Service orchestrates libvirt operations and data persistence. -// It represents the main application layer for sandbox lifecycle, command exec, -// snapshotting, diffing, and artifact generation orchestration. -type Service struct { - mgr libvirt.Manager - store store.Store - ssh SSHRunner - keyMgr sshkeys.KeyProvider // Optional: manages SSH keys for RunCommand - telemetry telemetry.Service - cfg Config - virshCfg libvirt.Config // Virsh config for creating remote managers - timeNowFn func() time.Time - logger *slog.Logger -} - -// Config controls default VM parameters and timeouts used by the service. -type Config struct { - // Default libvirt network name (e.g., "default") used when creating VMs. - Network string - - // Default shape if not provided by callers. - DefaultVCPUs int - DefaultMemoryMB int - - // CommandTimeout sets a default timeout for RunCommand when caller doesn't provide one. - CommandTimeout time.Duration - - // IPDiscoveryTimeout controls how long StartSandbox waits for the VM IP (when requested). - IPDiscoveryTimeout time.Duration - - // SSHReadinessTimeout controls how long to wait for SSH to become available after IP discovery. - // If zero, SSH readiness check is skipped. Default: 60s - SSHReadinessTimeout time.Duration - - // SSHProxyJump specifies a jump host for SSH connections to VMs. - // Format: "user@host:port" or just "host" for default user/port. - // Required when VMs are on an isolated network not directly reachable. - SSHProxyJump string -} - -// Option configures the Service during construction. -type Option func(*Service) - -// WithSSHRunner overrides the default SSH runner implementation. -func WithSSHRunner(r SSHRunner) Option { - return func(s *Service) { s.ssh = r } -} - -// WithTelemetry sets the telemetry service. -func WithTelemetry(t telemetry.Service) Option { - return func(s *Service) { s.telemetry = t } -} - -// WithTimeNow overrides the clock (useful for tests). -func WithTimeNow(fn func() time.Time) Option { - return func(s *Service) { s.timeNowFn = fn } -} - -// WithLogger sets a custom logger for the service. -func WithLogger(l *slog.Logger) Option { - return func(s *Service) { s.logger = l } -} - -// WithKeyManager sets a key manager for managed SSH credentials. -// When set, RunCommand can be called without explicit privateKeyPath. -func WithKeyManager(km sshkeys.KeyProvider) Option { - return func(s *Service) { s.keyMgr = km } -} - -// WithVirshConfig sets the libvirt/virsh configuration for creating remote managers. -func WithVirshConfig(cfg libvirt.Config) Option { - return func(s *Service) { s.virshCfg = cfg } -} - -// NewService constructs a VM service with the provided libvirt manager, store and config. -func NewService(mgr libvirt.Manager, st store.Store, cfg Config, opts ...Option) *Service { - if cfg.DefaultVCPUs <= 0 { - cfg.DefaultVCPUs = 2 - } - if cfg.DefaultMemoryMB <= 0 { - cfg.DefaultMemoryMB = 2048 - } - if cfg.CommandTimeout <= 0 { - cfg.CommandTimeout = 10 * time.Minute - } - if cfg.IPDiscoveryTimeout <= 0 { - cfg.IPDiscoveryTimeout = 2 * time.Minute - } - if cfg.SSHReadinessTimeout <= 0 { - cfg.SSHReadinessTimeout = 60 * time.Second - } - s := &Service{ - mgr: mgr, - store: st, - cfg: cfg, - ssh: &DefaultSSHRunner{ProxyJump: cfg.SSHProxyJump}, - timeNowFn: time.Now, - logger: slog.Default(), - } - for _, o := range opts { - o(s) - } - // Default to noop telemetry if not provided - if s.telemetry == nil { - s.telemetry = telemetry.NewNoopService() - } - return s -} - -// CreateSandbox clones a VM from an existing VM and persists a Sandbox record. -// -// sourceSandboxName is the name of the existing VM in libvirt to clone from. -// SandboxName is optional; if empty, a name will be generated. -// cpu and memoryMB are optional; if <=0 the service defaults are used. -// ttlSeconds is optional; if provided, sets the TTL for auto garbage collection. -// autoStart if true will start the VM immediately after creation. -// waitForIP if true (and autoStart is true), will wait for IP discovery. -// Returns the sandbox, the discovered IP (if autoStart and waitForIP), and any error. -// validateIPUniqueness checks if the given IP is already assigned to another running sandbox. -// Returns an error if the IP is assigned to a different sandbox that is still running. -func (s *Service) validateIPUniqueness(ctx context.Context, currentSandboxID, ip string) error { - // Check both RUNNING and STARTING sandboxes to prevent race conditions - // where two sandboxes might discover the same IP simultaneously - statesToCheck := []store.SandboxState{ - store.SandboxStateRunning, - store.SandboxStateStarting, - } - - for _, state := range statesToCheck { - stateFilter := state - sandboxes, err := s.store.ListSandboxes(ctx, store.SandboxFilter{ - State: &stateFilter, - }, nil) - if err != nil { - return fmt.Errorf("list sandboxes (state=%s) for IP validation: %w", state, err) - } - - for _, sb := range sandboxes { - if sb.ID == currentSandboxID { - continue // Skip the current sandbox - } - if sb.IPAddress != nil && *sb.IPAddress == ip { - s.logger.Error("IP address conflict detected", - "conflict_ip", ip, - "current_sandbox_id", currentSandboxID, - "conflicting_sandbox_id", sb.ID, - "conflicting_sandbox_name", sb.SandboxName, - "conflicting_sandbox_state", sb.State, - ) - return fmt.Errorf("IP %s is already assigned to sandbox %s (vm: %s, state: %s)", ip, sb.ID, sb.SandboxName, sb.State) - } - } - } - return nil -} - -// waitForSSH waits until SSH is accepting connections on the given IP. -// It uses exponential backoff to probe SSH readiness. -func (s *Service) waitForSSH(ctx context.Context, sandboxID, ip string, timeout time.Duration) error { - if timeout <= 0 { - return nil // SSH readiness check disabled - } - - // Skip if no key manager configured - if s.keyMgr == nil { - s.logger.Debug("no key manager configured, skipping SSH readiness check") - return nil - } - - s.logger.Info("waiting for SSH to become ready", - "sandbox_id", sandboxID, - "ip", ip, - "timeout", timeout, - ) - - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - // Get credentials for SSH probe - use default "sandbox" user - creds, err := s.keyMgr.GetCredentials(ctx, sandboxID, "sandbox") - if err != nil { - s.logger.Warn("failed to get SSH credentials for readiness check, skipping", - "sandbox_id", sandboxID, - "error", err, - ) - return nil // Don't fail sandbox creation if we can't get creds - } - - // Use short command timeout for probes - probeTimeout := 10 * time.Second - - // Exponential backoff: 1s, 2s, 4s, 8s, 16s (capped) - initialDelay := 1 * time.Second - maxDelay := 16 * time.Second - attempt := 0 - - for { - select { - case <-ctx.Done(): - return fmt.Errorf("SSH readiness timeout after %v: %w", timeout, ctx.Err()) - default: - } - - // Try to run a simple command - _, _, exitCode, runErr := s.ssh.RunWithCert( - ctx, - ip, - creds.Username, - creds.PrivateKeyPath, - creds.CertificatePath, - "true", // Simple command that succeeds if SSH works - probeTimeout, - nil, - ) - - if runErr == nil && exitCode == 0 { - s.logger.Info("SSH is ready", - "sandbox_id", sandboxID, - "ip", ip, - "attempts", attempt+1, - ) - return nil - } - - // Calculate backoff delay - delay := initialDelay * time.Duration(1< maxDelay { - delay = maxDelay - } - - s.logger.Debug("SSH not ready, retrying", - "sandbox_id", sandboxID, - "ip", ip, - "attempt", attempt+1, - "delay", delay, - "error", runErr, - ) - - select { - case <-time.After(delay): - attempt++ - case <-ctx.Done(): - return fmt.Errorf("SSH readiness timeout after %v: %w", timeout, ctx.Err()) - } - } -} - -func (s *Service) validateResources(ctx context.Context, mgr libvirt.Manager, sourceVMName string, cpu, memoryMB int) error { - // 1. Validate source VM - vmValidation, err := mgr.ValidateSourceVM(ctx, sourceVMName) - if err != nil { - s.logger.Warn("source VM validation failed", "source_vm", sourceVMName, "error", err) - } else if !vmValidation.Valid { - return fmt.Errorf("source VM %s validation failed: %s", sourceVMName, strings.Join(vmValidation.Errors, "; ")) - } - - // 2. Check host resources - resourceCheck, err := mgr.CheckHostResources(ctx, cpu, memoryMB) - if err != nil { - s.logger.Warn("host resource check failed", "error", err) - } else if !resourceCheck.Valid { - return fmt.Errorf("host resource check failed: %s", strings.Join(resourceCheck.Errors, "; ")) - } - - return nil -} - -func (s *Service) CreateSandbox(ctx context.Context, sourceSandboxName, agentID string, cpu, memoryMB int, ttlSeconds *int, autoStart, waitForIP bool) (*store.Sandbox, string, error) { - if strings.TrimSpace(sourceSandboxName) == "" { - return nil, "", fmt.Errorf("sourceSandboxName is required") - } - if strings.TrimSpace(agentID) == "" { - return nil, "", fmt.Errorf("agentID is required") - } - if cpu <= 0 { - cpu = s.cfg.DefaultVCPUs - } - if memoryMB <= 0 { - memoryMB = s.cfg.DefaultMemoryMB - } - - // Validate resources before cloning - if err := s.validateResources(ctx, s.mgr, sourceSandboxName, cpu, memoryMB); err != nil { - return nil, "", err - } - - // Always auto-generate sandbox name with sbx- prefix - sandboxName := fmt.Sprintf("sbx-%s", shortID()) - - s.logger.Info("creating sandbox", - "source_vm_name", sourceSandboxName, - "agent_id", agentID, - "sandbox_name", sandboxName, - "cpu", cpu, - "memory_mb", memoryMB, - "auto_start", autoStart, - "wait_for_ip", waitForIP, - ) - - jobID := fmt.Sprintf("JOB-%s", shortID()) - - // Create the VM via libvirt manager by cloning from existing VM - _, err := s.mgr.CloneFromVM(ctx, sourceSandboxName, sandboxName, cpu, memoryMB, s.cfg.Network) - if err != nil { - s.logger.Error("failed to clone VM", - "source_vm_name", sourceSandboxName, - "sandbox_name", sandboxName, - "error", err, - ) - return nil, "", fmt.Errorf("clone vm: %w", err) - } - - sb := &store.Sandbox{ - ID: fmt.Sprintf("SBX-%s", shortID()), - JobID: jobID, - AgentID: agentID, - SandboxName: sandboxName, - BaseImage: sourceSandboxName, // Store the source VM name for reference - Network: s.cfg.Network, - State: store.SandboxStateCreated, - TTLSeconds: ttlSeconds, - CreatedAt: s.timeNowFn().UTC(), - UpdatedAt: s.timeNowFn().UTC(), - } - if err := s.store.CreateSandbox(ctx, sb); err != nil { - return nil, "", fmt.Errorf("persist sandbox: %w", err) - } - - s.logger.Debug("sandbox cloned successfully", - "sandbox_id", sb.ID, - "sandbox_name", sandboxName, - ) - - // If autoStart is requested, start the VM immediately - var ip string - if autoStart { - s.logger.Info("auto-starting sandbox", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - ) - - if err := s.mgr.StartVM(ctx, sb.SandboxName); err != nil { - s.logger.Error("auto-start failed", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateError, nil) - return sb, "", fmt.Errorf("auto-start vm: %w", err) - } - - // Update state -> STARTING - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateStarting, nil); err != nil { - return sb, "", err - } - sb.State = store.SandboxStateStarting - - if waitForIP { - s.logger.Info("waiting for IP address", - "sandbox_id", sb.ID, - "timeout", s.cfg.IPDiscoveryTimeout, - ) - - var mac string - ip, mac, err = s.mgr.GetIPAddress(ctx, sb.SandboxName, s.cfg.IPDiscoveryTimeout) - if err != nil { - s.logger.Warn("IP discovery failed", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "error", err, - ) - // Still mark as running even if we couldn't discover the IP - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil) - sb.State = store.SandboxStateRunning - return sb, "", fmt.Errorf("get ip: %w", err) - } - - // Validate IP uniqueness before storing - if err := s.validateIPUniqueness(ctx, sb.ID, ip); err != nil { - s.logger.Error("IP conflict during sandbox creation", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "ip_address", ip, - "mac_address", mac, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil) - sb.State = store.SandboxStateRunning - return sb, "", fmt.Errorf("ip conflict: %w", err) - } - - // Wait for SSH to become ready before marking as RUNNING - if err := s.waitForSSH(ctx, sb.ID, ip, s.cfg.SSHReadinessTimeout); err != nil { - s.logger.Warn("SSH readiness check failed", - "sandbox_id", sb.ID, - "ip_address", ip, - "error", err, - ) - // Don't fail - sandbox is still usable, just may need retries - } - - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, &ip); err != nil { - return sb, ip, err - } - sb.State = store.SandboxStateRunning - sb.IPAddress = &ip - } else { - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil); err != nil { - return sb, "", err - } - sb.State = store.SandboxStateRunning - } - } - - s.logger.Info("sandbox created", - "sandbox_id", sb.ID, - "state", sb.State, - "ip_address", ip, - ) - - s.telemetry.Track("sandbox_create", map[string]interface{}{ - "sandbox_id": sb.ID, - "base_image": sb.BaseImage, - "cpu": cpu, - "memory_mb": memoryMB, - "auto_start": autoStart, - "wait_for_ip": waitForIP, - "agent_id": agentID, - "success": true, - }) - - return sb, ip, nil -} - -// CreateSandboxOnHost creates a sandbox on a specific remote host. -// This is used when multi-host support is enabled and the source VM is on a remote host. -func (s *Service) CreateSandboxOnHost(ctx context.Context, host *config.HostConfig, sourceSandboxName, agentID string, cpu, memoryMB int, ttlSeconds *int, autoStart, waitForIP bool) (*store.Sandbox, string, error) { - if host == nil { - return nil, "", fmt.Errorf("host is required for remote sandbox creation") - } - if strings.TrimSpace(sourceSandboxName) == "" { - return nil, "", fmt.Errorf("sourceSandboxName is required") - } - if strings.TrimSpace(agentID) == "" { - return nil, "", fmt.Errorf("agentID is required") - } - if cpu <= 0 { - cpu = s.cfg.DefaultVCPUs - } - if memoryMB <= 0 { - memoryMB = s.cfg.DefaultMemoryMB - } - // Always auto-generate sandbox name with sbx- prefix - sandboxName := fmt.Sprintf("sbx-%s", shortID()) - - s.logger.Info("creating sandbox on remote host", - "host_name", host.Name, - "host_address", host.Address, - "source_vm_name", sourceSandboxName, - "agent_id", agentID, - "sandbox_name", sandboxName, - "cpu", cpu, - "memory_mb", memoryMB, - "auto_start", autoStart, - "wait_for_ip", waitForIP, - ) - - // Create a remote manager for this host - remoteMgr := libvirt.NewRemoteVirshManager(*host, s.virshCfg, s.logger) - - // Validate resources before cloning on remote host - if err := s.validateResources(ctx, remoteMgr, sourceSandboxName, cpu, memoryMB); err != nil { - return nil, "", err - } - - jobID := fmt.Sprintf("JOB-%s", shortID()) - - // Create the VM via remote libvirt manager - _, err := remoteMgr.CloneFromVM(ctx, sourceSandboxName, sandboxName, cpu, memoryMB, s.cfg.Network) - if err != nil { - s.logger.Error("failed to clone VM on remote host", - "host", host.Name, - "source_vm_name", sourceSandboxName, - "sandbox_name", sandboxName, - "error", err, - ) - return nil, "", fmt.Errorf("clone vm on host %s: %w", host.Name, err) - } - - hostName := host.Name - hostAddr := host.Address - sb := &store.Sandbox{ - ID: fmt.Sprintf("SBX-%s", shortID()), - JobID: jobID, - AgentID: agentID, - SandboxName: sandboxName, - BaseImage: sourceSandboxName, - Network: s.cfg.Network, - State: store.SandboxStateCreated, - TTLSeconds: ttlSeconds, - HostName: &hostName, - HostAddress: &hostAddr, - CreatedAt: s.timeNowFn().UTC(), - UpdatedAt: s.timeNowFn().UTC(), - } - if err := s.store.CreateSandbox(ctx, sb); err != nil { - return nil, "", fmt.Errorf("persist sandbox: %w", err) - } - - s.logger.Debug("sandbox cloned on remote host", - "sandbox_id", sb.ID, - "sandbox_name", sandboxName, - "host", host.Name, - ) - - // If autoStart is requested, start the VM immediately - var ip string - if autoStart { - s.logger.Info("auto-starting sandbox on remote host", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "host", host.Name, - ) - - if err := remoteMgr.StartVM(ctx, sb.SandboxName); err != nil { - s.logger.Error("auto-start failed on remote host", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "host", host.Name, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateError, nil) - return sb, "", fmt.Errorf("auto-start vm on host %s: %w", host.Name, err) - } - - // Update state -> STARTING - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateStarting, nil); err != nil { - return sb, "", err - } - sb.State = store.SandboxStateStarting - - if waitForIP { - s.logger.Info("waiting for IP address on remote host", - "sandbox_id", sb.ID, - "host", host.Name, - "timeout", s.cfg.IPDiscoveryTimeout, - ) - - var mac string - ip, mac, err = remoteMgr.GetIPAddress(ctx, sb.SandboxName, s.cfg.IPDiscoveryTimeout) - if err != nil { - s.logger.Warn("IP discovery failed on remote host", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "host", host.Name, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil) - sb.State = store.SandboxStateRunning - return sb, "", fmt.Errorf("get ip on host %s: %w", host.Name, err) - } - - // Validate IP uniqueness - if err := s.validateIPUniqueness(ctx, sb.ID, ip); err != nil { - s.logger.Error("IP conflict on remote host", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "ip_address", ip, - "mac_address", mac, - "host", host.Name, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil) - sb.State = store.SandboxStateRunning - return sb, "", fmt.Errorf("ip conflict on host %s: %w", host.Name, err) - } - - // Wait for SSH to become ready before marking as RUNNING - if err := s.waitForSSH(ctx, sb.ID, ip, s.cfg.SSHReadinessTimeout); err != nil { - s.logger.Warn("SSH readiness check failed on remote host", - "sandbox_id", sb.ID, - "ip_address", ip, - "host", host.Name, - "error", err, - ) - // Don't fail - sandbox is still usable, just may need retries - } - - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, &ip); err != nil { - return sb, ip, err - } - sb.State = store.SandboxStateRunning - sb.IPAddress = &ip - } else { - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil); err != nil { - return sb, "", err - } - sb.State = store.SandboxStateRunning - } - } - - s.logger.Info("sandbox created on remote host", - "sandbox_id", sb.ID, - "host", host.Name, - "state", sb.State, - "ip_address", ip, - ) - - s.telemetry.Track("sandbox_create", map[string]interface{}{ - "sandbox_id": sb.ID, - "base_image": sb.BaseImage, - "cpu": cpu, - "memory_mb": memoryMB, - "auto_start": autoStart, - "wait_for_ip": waitForIP, - "agent_id": agentID, - "host_name": host.Name, - "host_address": host.Address, - "success": true, - }) - - return sb, ip, nil -} - -func (s *Service) GetSandboxes(ctx context.Context, filter store.SandboxFilter, opts *store.ListOptions) ([]*store.Sandbox, error) { - return s.store.ListSandboxes(ctx, filter, opts) -} - -// GetSandbox retrieves a single sandbox by ID. -func (s *Service) GetSandbox(ctx context.Context, sandboxID string) (*store.Sandbox, error) { - if strings.TrimSpace(sandboxID) == "" { - return nil, fmt.Errorf("sandboxID is required") - } - return s.store.GetSandbox(ctx, sandboxID) -} - -// GetSandboxCommands retrieves all commands executed in a sandbox. -func (s *Service) GetSandboxCommands(ctx context.Context, sandboxID string, opts *store.ListOptions) ([]*store.Command, error) { - if strings.TrimSpace(sandboxID) == "" { - return nil, fmt.Errorf("sandboxID is required") - } - // Verify sandbox exists - if _, err := s.store.GetSandbox(ctx, sandboxID); err != nil { - return nil, err - } - return s.store.ListCommands(ctx, sandboxID, opts) -} - -// InjectSSHKey injects a public key for a user into the VM disk prior to boot. -func (s *Service) InjectSSHKey(ctx context.Context, sandboxID, username, publicKey string) error { - if strings.TrimSpace(sandboxID) == "" { - return fmt.Errorf("sandboxID is required") - } - if strings.TrimSpace(username) == "" { - return fmt.Errorf("username is required") - } - if strings.TrimSpace(publicKey) == "" { - return fmt.Errorf("publicKey is required") - } - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return err - } - if err := s.mgr.InjectSSHKey(ctx, sb.SandboxName, username, publicKey); err != nil { - return fmt.Errorf("inject ssh key: %w", err) - } - sb.UpdatedAt = s.timeNowFn().UTC() - return s.store.UpdateSandbox(ctx, sb) -} - -// StartSandbox boots the VM and optionally waits for IP discovery. -// Returns the discovered IP if waitForIP is true and discovery succeeds (empty string otherwise). -func (s *Service) StartSandbox(ctx context.Context, sandboxID string, waitForIP bool) (string, error) { - if strings.TrimSpace(sandboxID) == "" { - return "", fmt.Errorf("sandboxID is required") - } - - s.logger.Info("starting sandbox", - "sandbox_id", sandboxID, - "wait_for_ip", waitForIP, - ) - - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return "", err - } - - s.logger.Debug("sandbox found", - "sandbox_name", sb.SandboxName, - "current_state", sb.State, - ) - - if err := s.mgr.StartVM(ctx, sb.SandboxName); err != nil { - s.logger.Error("failed to start VM", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateError, nil) - return "", fmt.Errorf("start vm: %w", err) - } - - // Update state -> STARTING - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateStarting, nil); err != nil { - return "", err - } - - var ip string - if waitForIP { - s.logger.Info("waiting for IP address", - "sandbox_id", sb.ID, - "timeout", s.cfg.IPDiscoveryTimeout, - ) - - var mac string - ip, mac, err = s.mgr.GetIPAddress(ctx, sb.SandboxName, s.cfg.IPDiscoveryTimeout) - if err != nil { - s.logger.Warn("IP discovery failed", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "error", err, - ) - // Still mark as running even if we couldn't discover the IP - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil) - return "", fmt.Errorf("get ip: %w", err) - } - - // Validate IP uniqueness before storing - if err := s.validateIPUniqueness(ctx, sb.ID, ip); err != nil { - s.logger.Error("IP conflict during sandbox start", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "ip_address", ip, - "mac_address", mac, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil) - return "", fmt.Errorf("ip conflict: %w", err) - } - - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, &ip); err != nil { - return "", err - } - } else { - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil); err != nil { - return "", err - } - } - - s.logger.Info("sandbox started", - "sandbox_id", sb.ID, - "ip_address", ip, - ) - - s.telemetry.Track("sandbox_start", map[string]interface{}{ - "sandbox_id": sb.ID, - "wait_for_ip": waitForIP, - "success": true, - }) - - return ip, nil -} - -// DiscoverIP attempts to discover the IP address for a sandbox. -// This is useful for async workflows where wait_for_ip was false during start. -// Returns the discovered IP address, or an error if discovery fails. -func (s *Service) DiscoverIP(ctx context.Context, sandboxID string) (string, error) { - if strings.TrimSpace(sandboxID) == "" { - return "", fmt.Errorf("sandboxID is required") - } - - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return "", err - } - - // Check if VM is in a state where IP discovery makes sense - if sb.State != store.SandboxStateRunning && sb.State != store.SandboxStateStarting { - return "", fmt.Errorf("sandbox is in state %s, must be running or starting for IP discovery", sb.State) - } - - s.logger.Info("discovering IP for sandbox", - "sandbox_id", sandboxID, - "sandbox_name", sb.SandboxName, - ) - - ip, mac, err := s.mgr.GetIPAddress(ctx, sb.SandboxName, s.cfg.IPDiscoveryTimeout) - if err != nil { - return "", fmt.Errorf("ip discovery failed: %w", err) - } - - // Validate IP uniqueness - if err := s.validateIPUniqueness(ctx, sb.ID, ip); err != nil { - s.logger.Warn("IP conflict during discovery", - "sandbox_id", sb.ID, - "ip_address", ip, - "mac_address", mac, - "error", err, - ) - return "", fmt.Errorf("ip conflict: %w", err) - } - - // Update the sandbox with the discovered IP - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, &ip); err != nil { - return "", fmt.Errorf("persist ip: %w", err) - } - - s.logger.Info("IP discovered and stored", - "sandbox_id", sandboxID, - "ip_address", ip, - "mac_address", mac, - ) - - return ip, nil -} - -// StopSandbox gracefully shuts down the VM or forces if force is true. -func (s *Service) StopSandbox(ctx context.Context, sandboxID string, force bool) error { - if strings.TrimSpace(sandboxID) == "" { - return fmt.Errorf("sandboxID is required") - } - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return err - } - if err := s.mgr.StopVM(ctx, sb.SandboxName, force); err != nil { - return fmt.Errorf("stop vm: %w", err) - } - err = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateStopped, sb.IPAddress) - if err == nil { - s.telemetry.Track("sandbox_stop", map[string]interface{}{ - "sandbox_id": sb.ID, - "force": force, - "success": true, - }) - } - return err -} - -// DestroySandbox forcibly destroys and undefines the VM and removes its workspace. -// The sandbox is then soft-deleted from the store. Returns the sandbox info after destruction. -func (s *Service) DestroySandbox(ctx context.Context, sandboxID string) (*store.Sandbox, error) { - if strings.TrimSpace(sandboxID) == "" { - return nil, fmt.Errorf("sandboxID is required") - } - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return nil, err - } - - // Cleanup managed SSH keys for this sandbox (non-fatal if it fails) - if s.keyMgr != nil { - if err := s.keyMgr.CleanupSandbox(ctx, sandboxID); err != nil { - s.logger.Warn("failed to cleanup SSH keys", - "sandbox_id", sandboxID, - "error", err, - ) - } - } - - if err := s.mgr.DestroyVM(ctx, sb.SandboxName); err != nil { - return nil, fmt.Errorf("destroy vm: %w", err) - } - if err := s.store.DeleteSandbox(ctx, sandboxID); err != nil { - return nil, err - } - // Update state to reflect destruction - sb.State = store.SandboxStateDestroyed - - s.telemetry.Track("sandbox_destroy", map[string]interface{}{ - "sandbox_id": sandboxID, - "success": true, - }) - - return sb, nil -} - -// CreateSnapshot creates a snapshot and persists a Snapshot record. -func (s *Service) CreateSnapshot(ctx context.Context, sandboxID, name string, external bool) (*store.Snapshot, error) { - if strings.TrimSpace(sandboxID) == "" || strings.TrimSpace(name) == "" { - return nil, fmt.Errorf("sandboxID and name are required") - } - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return nil, err - } - ref, err := s.mgr.CreateSnapshot(ctx, sb.SandboxName, name, external) - if err != nil { - return nil, fmt.Errorf("create snapshot: %w", err) - } - sn := &store.Snapshot{ - ID: fmt.Sprintf("SNP-%s", shortID()), - SandboxID: sb.ID, - Name: ref.Name, - Kind: snapshotKindFromString(ref.Kind), - Ref: ref.Ref, - CreatedAt: s.timeNowFn().UTC(), - } - if err := s.store.CreateSnapshot(ctx, sn); err != nil { - return nil, err - } - - s.telemetry.Track("snapshot_create", map[string]interface{}{ - "sandbox_id": sandboxID, - "snapshot_name": name, - "snapshot_kind": ref.Kind, - "external": external, - "success": true, - }) - - return sn, nil -} - -// DiffSnapshots computes a normalized change set between two snapshots and persists a Diff. -// Note: This implementation currently aggregates command history into CommandsRun and -// leaves file/package/service diffs empty. A dedicated diff engine should populate these fields -// by mounting snapshots and computing differences. -func (s *Service) DiffSnapshots(ctx context.Context, sandboxID, from, to string) (*store.Diff, error) { - if strings.TrimSpace(sandboxID) == "" || strings.TrimSpace(from) == "" || strings.TrimSpace(to) == "" { - return nil, fmt.Errorf("sandboxID, from, to are required") - } - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return nil, err - } - - // Best-effort: get a plan (notes/instructions) from manager; ignore failure. - _, _ = s.mgr.DiffSnapshot(ctx, sb.SandboxName, from, to) - - // For now, compose CommandsRun from command history as partial diff signal. - cmds, err := s.store.ListCommands(ctx, sandboxID, &store.ListOptions{OrderBy: "started_at", Asc: true}) - if err != nil && !errors.Is(err, store.ErrNotFound) { - return nil, fmt.Errorf("list commands: %w", err) - } - var cr []store.CommandSummary - for _, c := range cmds { - cr = append(cr, store.CommandSummary{ - Cmd: c.Command, - ExitCode: c.ExitCode, - At: c.EndedAt, - }) - } - - diff := &store.Diff{ - ID: fmt.Sprintf("DIF-%s", shortID()), - SandboxID: sandboxID, - FromSnapshot: from, - ToSnapshot: to, - DiffJSON: store.ChangeDiff{ - FilesModified: []string{}, - FilesAdded: []string{}, - FilesRemoved: []string{}, - PackagesAdded: []store.PackageInfo{}, - PackagesRemoved: []store.PackageInfo{}, - ServicesChanged: []store.ServiceChange{}, - CommandsRun: cr, - }, - CreatedAt: s.timeNowFn().UTC(), - } - if err := s.store.SaveDiff(ctx, diff); err != nil { - return nil, err - } - - s.telemetry.Track("snapshot_diff", map[string]interface{}{ - "sandbox_id": sandboxID, - "from_snapshot": from, - "to_snapshot": to, - "success": true, - }) - - return diff, nil -} - -// RunCommand executes a command inside the sandbox via SSH. -// If privateKeyPath is empty and a key manager is configured, managed credentials will be used. -// Otherwise, username and privateKeyPath are required for SSH auth. -func (s *Service) RunCommand(ctx context.Context, sandboxID, username, privateKeyPath, command string, timeout time.Duration, env map[string]string) (*store.Command, error) { - if strings.TrimSpace(sandboxID) == "" { - return nil, fmt.Errorf("sandboxID is required") - } - if strings.TrimSpace(command) == "" { - return nil, fmt.Errorf("command is required") - } - if timeout <= 0 { - timeout = s.cfg.CommandTimeout - } - - // Determine if we're using managed credentials - var useManagedCreds bool - var certPath string - if strings.TrimSpace(privateKeyPath) == "" { - if s.keyMgr == nil { - return nil, fmt.Errorf("privateKeyPath is required (no key manager configured)") - } - useManagedCreds = true - // Default username for managed credentials - if strings.TrimSpace(username) == "" { - username = "sandbox" - } - } else { - // Traditional mode: username is required - if strings.TrimSpace(username) == "" { - return nil, fmt.Errorf("username is required") - } - } - - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return nil, err - } - - // Always re-discover IP to ensure we have the correct one for THIS sandbox. - // This is important because: - // 1. Cached IPs might be stale if the VM was restarted - // 2. Another sandbox might have been assigned the same IP erroneously - // 3. DHCP leases can change - ip, mac, err := s.mgr.GetIPAddress(ctx, sb.SandboxName, s.cfg.IPDiscoveryTimeout) - if err != nil { - return nil, fmt.Errorf("discover ip for sandbox %s (vm: %s): %w", sb.ID, sb.SandboxName, err) - } - - // Check if this IP is already assigned to a DIFFERENT running sandbox - if err := s.validateIPUniqueness(ctx, sb.ID, ip); err != nil { - s.logger.Warn("IP conflict detected", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "ip_address", ip, - "mac_address", mac, - "error", err, - ) - return nil, fmt.Errorf("ip conflict: %w", err) - } - - // Update IP if it changed or wasn't set - if sb.IPAddress == nil || *sb.IPAddress != ip { - if err := s.store.UpdateSandboxState(ctx, sb.ID, sb.State, &ip); err != nil { - return nil, fmt.Errorf("persist ip: %w", err) - } - } - - // Get managed credentials if needed - if useManagedCreds { - creds, err := s.keyMgr.GetCredentials(ctx, sandboxID, username) - if err != nil { - return nil, fmt.Errorf("get managed credentials: %w", err) - } - privateKeyPath = creds.PrivateKeyPath - certPath = creds.CertificatePath - username = creds.Username - } - - cmdID := fmt.Sprintf("CMD-%s", shortID()) - now := s.timeNowFn().UTC() - - // Encode environment for persistence. - var envJSON *string - if len(env) > 0 { - b, _ := json.Marshal(env) - tmp := string(b) - envJSON = &tmp - } - - // Execute SSH command - var stdout, stderr string - var code int - var runErr error - if useManagedCreds { - stdout, stderr, code, runErr = s.ssh.RunWithCert(ctx, ip, username, privateKeyPath, certPath, commandWithEnv(command, env), timeout, env) - } else { - stdout, stderr, code, runErr = s.ssh.Run(ctx, ip, username, privateKeyPath, commandWithEnv(command, env), timeout, env) - } - - cmd := &store.Command{ - ID: cmdID, - SandboxID: sandboxID, - Command: command, - EnvJSON: envJSON, - Stdout: stdout, - Stderr: stderr, - ExitCode: code, - StartedAt: now, - EndedAt: s.timeNowFn().UTC(), - } - if err := s.store.SaveCommand(ctx, cmd); err != nil { - return nil, fmt.Errorf("save command: %w", err) - } - - s.telemetry.Track("sandbox_command", map[string]interface{}{ - "sandbox_id": sandboxID, - "command_id": cmdID, - "exit_code": code, - "duration_ms": cmd.EndedAt.Sub(cmd.StartedAt).Milliseconds(), - "success": true, - }) - - if runErr != nil { - return cmd, fmt.Errorf("ssh run: %w", runErr) - } - return cmd, nil -} - -// SSHRunner executes commands on a remote host via SSH. -type SSHRunner interface { - // Run executes command on user@addr using the provided private key file. - // Returns stdout, stderr, and exit code. Implementations should use StrictHostKeyChecking=no - // or a known_hosts strategy appropriate for ephemeral sandboxes. - Run(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string) (stdout, stderr string, exitCode int, err error) - - // RunWithCert executes command using certificate-based authentication. - // The certPath should point to the SSH certificate file (key-cert.pub). - RunWithCert(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string) (stdout, stderr string, exitCode int, err error) -} - -// DefaultSSHRunner is a simple implementation backed by the system's ssh binary. -type DefaultSSHRunner struct { - // ProxyJump specifies a jump host for SSH connections. - // Format: "user@host:port" or just "host" for default user/port. - // If empty, direct connections are made. - ProxyJump string - - // MaxRetries is the maximum number of retry attempts for transient SSH failures. - // Default: 5 - MaxRetries int - - // InitialRetryDelay is the initial delay before the first retry. - // Default: 2s - InitialRetryDelay time.Duration - - // MaxRetryDelay is the maximum delay between retries. - // Default: 30s - MaxRetryDelay time.Duration -} - -// sshRetryConfig returns the retry configuration with defaults applied. -func (r *DefaultSSHRunner) sshRetryConfig() (maxRetries int, initialDelay, maxDelay time.Duration) { - maxRetries = r.MaxRetries - if maxRetries <= 0 { - maxRetries = 5 - } - initialDelay = r.InitialRetryDelay - if initialDelay <= 0 { - initialDelay = 2 * time.Second - } - maxDelay = r.MaxRetryDelay - if maxDelay <= 0 { - maxDelay = 30 * time.Second - } - return -} - -// isRetryableSSHError checks if the error indicates a transient SSH failure -// that should be retried (e.g., connection refused, sshd not ready). -func isRetryableSSHError(stderr string, exitCode int) bool { - // Exit code 255 indicates SSH connection failure - if exitCode != 255 { - return false - } - // Check for common transient connection errors - retryablePatterns := []string{ - "Connection refused", - "Connection closed", - "Connection reset", - "Connection timed out", - "No route to host", - "Network is unreachable", - "Host is down", - "port 22: Connection refused", - "port 65535", // Malformed connection error - "UNKNOWN", // SSH parsing error during connection failure - } - stderrLower := strings.ToLower(stderr) - for _, pattern := range retryablePatterns { - if strings.Contains(stderrLower, strings.ToLower(pattern)) { - return true - } - } - return false -} - -// Run implements SSHRunner.Run using the local ssh client. -// It disables strict host key checking and sets a connect timeout. -// It assumes the VM is reachable on the default SSH port (22). -// Includes retry logic with exponential backoff for transient connection failures. -func (r *DefaultSSHRunner) Run(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, _ map[string]string) (string, string, int, error) { - // Pre-flight check: verify the private key file exists and has correct permissions - keyInfo, err := os.Stat(privateKeyPath) - if err != nil { - if os.IsNotExist(err) { - return "", "", 255, fmt.Errorf("ssh key file not found: %s", privateKeyPath) - } - return "", "", 255, fmt.Errorf("ssh key file error: %w", err) - } - // Check permissions - SSH keys should not be world-readable - if keyInfo.Mode().Perm()&0o077 != 0 { - return "", "", 255, fmt.Errorf("ssh key file %s has insecure permissions %o (should be 0600 or stricter)", privateKeyPath, keyInfo.Mode().Perm()) - } - - if _, ok := ctx.Deadline(); !ok && timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - args := []string{ - "-i", privateKeyPath, - "-o", "StrictHostKeyChecking=no", - "-o", "UserKnownHostsFile=/dev/null", - "-o", "ConnectTimeout=15", - } - // Add ProxyJump if configured - if r.ProxyJump != "" { - args = append(args, "-J", r.ProxyJump) - } - args = append(args, - fmt.Sprintf("%s@%s", user, addr), - "--", - command, - ) - - maxRetries, initialDelay, maxDelay := r.sshRetryConfig() - var lastStdout, lastStderr string - var lastExitCode int - var lastErr error - - for attempt := 0; attempt <= maxRetries; attempt++ { - // Check context before each attempt - if ctx.Err() != nil { - return lastStdout, lastStderr, lastExitCode, fmt.Errorf("context cancelled after %d attempts: %w", attempt, ctx.Err()) - } - - var stdout, stderr bytes.Buffer - cmd := exec.CommandContext(ctx, "ssh", args...) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - err = cmd.Run() - exitCode := 0 - if err != nil { - // Best-effort extract exit code - var ee *exec.ExitError - if errors.As(err, &ee) { - exitCode = ee.ExitCode() - } else { - exitCode = 255 - } - stderrStr := stderr.String() - - // Check if this is a retryable error - if attempt < maxRetries && isRetryableSSHError(stderrStr, exitCode) { - // Calculate backoff delay: 2s, 4s, 8s, 16s, 30s (capped) - delay := initialDelay * time.Duration(1< maxDelay { - delay = maxDelay - } - slog.Default().Warn("SSH connection failed, retrying", - "attempt", attempt+1, - "max_retries", maxRetries, - "delay", delay, - "addr", addr, - "stderr", stderrStr, - ) - select { - case <-time.After(delay): - // Continue to next attempt - case <-ctx.Done(): - return stdout.String(), stderrStr, exitCode, fmt.Errorf("context cancelled during retry backoff: %w", ctx.Err()) - } - lastStdout, lastStderr, lastExitCode, lastErr = stdout.String(), stderrStr, exitCode, err - continue - } - - // Not retryable or max retries exceeded - if stderrStr != "" { - err = fmt.Errorf("%w: %s", err, stderrStr) - } - return stdout.String(), stderrStr, exitCode, err - } - - // Success - if cmd.ProcessState != nil { - exitCode = cmd.ProcessState.ExitCode() - } - return stdout.String(), stderr.String(), exitCode, nil - } - - // Should not reach here, but return last error if we do - return lastStdout, lastStderr, lastExitCode, fmt.Errorf("max retries (%d) exceeded: %w", maxRetries, lastErr) -} - -// RunWithCert implements SSHRunner.RunWithCert using the local ssh client with certificate auth. -// Includes retry logic with exponential backoff for transient connection failures. -func (r *DefaultSSHRunner) RunWithCert(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, _ map[string]string) (string, string, int, error) { - // Pre-flight check: verify the private key file exists and has correct permissions - keyInfo, err := os.Stat(privateKeyPath) - if err != nil { - if os.IsNotExist(err) { - return "", "", 255, fmt.Errorf("ssh key file not found: %s", privateKeyPath) - } - return "", "", 255, fmt.Errorf("ssh key file error: %w", err) - } - if keyInfo.Mode().Perm()&0o077 != 0 { - return "", "", 255, fmt.Errorf("ssh key file %s has insecure permissions %o (should be 0600 or stricter)", privateKeyPath, keyInfo.Mode().Perm()) - } - - // Check certificate file exists - if _, err := os.Stat(certPath); err != nil { - if os.IsNotExist(err) { - return "", "", 255, fmt.Errorf("ssh certificate file not found: %s", certPath) - } - return "", "", 255, fmt.Errorf("ssh certificate file error: %w", err) - } - - if _, ok := ctx.Deadline(); !ok && timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - args := []string{ - "-i", privateKeyPath, - "-o", fmt.Sprintf("CertificateFile=%s", certPath), - "-o", "StrictHostKeyChecking=no", - "-o", "UserKnownHostsFile=/dev/null", - "-o", "ConnectTimeout=15", - } - // Add ProxyJump if configured - if r.ProxyJump != "" { - args = append(args, "-J", r.ProxyJump) - } - args = append(args, - fmt.Sprintf("%s@%s", user, addr), - "--", - command, - ) - - maxRetries, initialDelay, maxDelay := r.sshRetryConfig() - var lastStdout, lastStderr string - var lastExitCode int - var lastErr error - - for attempt := 0; attempt <= maxRetries; attempt++ { - // Check context before each attempt - if ctx.Err() != nil { - return lastStdout, lastStderr, lastExitCode, fmt.Errorf("context cancelled after %d attempts: %w", attempt, ctx.Err()) - } - - var stdout, stderr bytes.Buffer - cmd := exec.CommandContext(ctx, "ssh", args...) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - err = cmd.Run() - exitCode := 0 - if err != nil { - var ee *exec.ExitError - if errors.As(err, &ee) { - exitCode = ee.ExitCode() - } else { - exitCode = 255 - } - stderrStr := stderr.String() - - // Check if this is a retryable error - if attempt < maxRetries && isRetryableSSHError(stderrStr, exitCode) { - // Calculate backoff delay: 2s, 4s, 8s, 16s, 30s (capped) - delay := initialDelay * time.Duration(1< maxDelay { - delay = maxDelay - } - slog.Default().Warn("SSH connection failed (cert auth), retrying", - "attempt", attempt+1, - "max_retries", maxRetries, - "delay", delay, - "addr", addr, - "stderr", stderrStr, - ) - select { - case <-time.After(delay): - // Continue to next attempt - case <-ctx.Done(): - return stdout.String(), stderrStr, exitCode, fmt.Errorf("context cancelled during retry backoff: %w", ctx.Err()) - } - lastStdout, lastStderr, lastExitCode, lastErr = stdout.String(), stderrStr, exitCode, err - continue - } - - // Not retryable or max retries exceeded - if stderrStr != "" { - err = fmt.Errorf("%w: %s", err, stderrStr) - } - return stdout.String(), stderrStr, exitCode, err - } - - // Success - if cmd.ProcessState != nil { - exitCode = cmd.ProcessState.ExitCode() - } - return stdout.String(), stderr.String(), exitCode, nil - } - - // Should not reach here, but return last error if we do - return lastStdout, lastStderr, lastExitCode, fmt.Errorf("max retries (%d) exceeded: %w", maxRetries, lastErr) -} - -// Helpers - -func snapshotKindFromString(k string) store.SnapshotKind { - switch strings.ToUpper(k) { - case "EXTERNAL": - return store.SnapshotKindExternal - default: - return store.SnapshotKindInternal - } -} - -func shortID() string { - id := uuid.NewString() - if i := strings.IndexByte(id, '-'); i > 0 { - return id[:i] - } - return id -} - -func commandWithEnv(cmd string, env map[string]string) string { - if len(env) == 0 { - // Execute in login shell to emulate typical interactive environment - return fmt.Sprintf("bash -lc %q", cmd) - } - var exports []string - for k, v := range env { - exports = append(exports, fmt.Sprintf(`export %s=%s`, safeShellIdent(k), shellQuote(v))) - } - preamble := strings.Join(exports, "; ") + "; " - return fmt.Sprintf("bash -lc %q", preamble+cmd) -} - -func shellQuote(s string) string { - // Basic single-quote shell escaping - return "'" + strings.ReplaceAll(s, "'", `'\''`) + "'" -} - -func safeShellIdent(s string) string { - // Allow alnum and underscore, replace others with underscore - var b strings.Builder - for _, r := range s { - if (r >= 'a' && r <= 'z') || - (r >= 'A' && r <= 'Z') || - (r >= '0' && r <= '9') || - r == '_' { - b.WriteRune(r) - } else { - b.WriteRune('_') - } - } - out := b.String() - if out == "" { - return "VAR" - } - return out -} diff --git a/fluid-remote/internal/vm/service_test.go b/fluid-remote/internal/vm/service_test.go deleted file mode 100755 index 53431a19..00000000 --- a/fluid-remote/internal/vm/service_test.go +++ /dev/null @@ -1,866 +0,0 @@ -package vm - -import ( - "context" - "errors" - "log/slog" - "strings" - "testing" - "time" - - "github.com/aspectrr/fluid.sh/fluid-remote/internal/libvirt" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/store" - "github.com/aspectrr/fluid.sh/fluid-remote/internal/telemetry" -) - -// mockStore implements store.Store for testing -type mockStore struct { - getSandboxFn func(ctx context.Context, id string) (*store.Sandbox, error) - listCommandsFn func(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Command, error) - listSandboxesFn func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) -} - -func (m *mockStore) Config() store.Config { return store.Config{} } -func (m *mockStore) Ping(ctx context.Context) error { - return nil -} - -func (m *mockStore) WithTx(ctx context.Context, fn func(tx store.DataStore) error) error { - return fn(m) -} -func (m *mockStore) Close() error { return nil } - -func (m *mockStore) CreateSandbox(ctx context.Context, sb *store.Sandbox) error { - return nil -} - -func (m *mockStore) GetSandbox(ctx context.Context, id string) (*store.Sandbox, error) { - if m.getSandboxFn != nil { - return m.getSandboxFn(ctx, id) - } - return nil, store.ErrNotFound -} - -func (m *mockStore) GetSandboxByVMName(ctx context.Context, vmName string) (*store.Sandbox, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) ListSandboxes(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - if m.listSandboxesFn != nil { - return m.listSandboxesFn(ctx, filter, opt) - } - return nil, nil -} - -func (m *mockStore) UpdateSandbox(ctx context.Context, sb *store.Sandbox) error { - return nil -} - -func (m *mockStore) UpdateSandboxState(ctx context.Context, id string, newState store.SandboxState, ipAddr *string) error { - return nil -} - -func (m *mockStore) DeleteSandbox(ctx context.Context, id string) error { - return nil -} - -func (m *mockStore) ListExpiredSandboxes(ctx context.Context, defaultTTL time.Duration) ([]*store.Sandbox, error) { - return nil, nil -} - -func (m *mockStore) CreateSnapshot(ctx context.Context, sn *store.Snapshot) error { - return nil -} - -func (m *mockStore) GetSnapshot(ctx context.Context, id string) (*store.Snapshot, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) GetSnapshotByName(ctx context.Context, sandboxID, name string) (*store.Snapshot, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) ListSnapshots(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Snapshot, error) { - return nil, nil -} - -func (m *mockStore) SaveCommand(ctx context.Context, cmd *store.Command) error { - return nil -} - -func (m *mockStore) GetCommand(ctx context.Context, id string) (*store.Command, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) ListCommands(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Command, error) { - if m.listCommandsFn != nil { - return m.listCommandsFn(ctx, sandboxID, opt) - } - return nil, nil -} - -func (m *mockStore) SaveDiff(ctx context.Context, d *store.Diff) error { - return nil -} - -func (m *mockStore) GetDiff(ctx context.Context, id string) (*store.Diff, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) GetDiffBySnapshots(ctx context.Context, sandboxID, fromSnapshot, toSnapshot string) (*store.Diff, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) CreateChangeSet(ctx context.Context, cs *store.ChangeSet) error { - return nil -} - -func (m *mockStore) GetChangeSet(ctx context.Context, id string) (*store.ChangeSet, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) GetChangeSetByJob(ctx context.Context, jobID string) (*store.ChangeSet, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) CreatePublication(ctx context.Context, p *store.Publication) error { - return nil -} - -func (m *mockStore) UpdatePublicationStatus(ctx context.Context, id string, status store.PublicationStatus, commitSHA, prURL, errMsg *string) error { - return nil -} - -func (m *mockStore) GetPublication(ctx context.Context, id string) (*store.Publication, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) CreatePlaybook(ctx context.Context, pb *store.Playbook) error { - return nil -} - -func (m *mockStore) GetPlaybook(ctx context.Context, id string) (*store.Playbook, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) GetPlaybookByName(ctx context.Context, name string) (*store.Playbook, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) ListPlaybooks(ctx context.Context, opt *store.ListOptions) ([]*store.Playbook, error) { - return nil, nil -} - -func (m *mockStore) UpdatePlaybook(ctx context.Context, pb *store.Playbook) error { - return nil -} - -func (m *mockStore) DeletePlaybook(ctx context.Context, id string) error { - return nil -} - -func (m *mockStore) CreatePlaybookTask(ctx context.Context, task *store.PlaybookTask) error { - return nil -} - -func (m *mockStore) GetPlaybookTask(ctx context.Context, id string) (*store.PlaybookTask, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) ListPlaybookTasks(ctx context.Context, playbookID string, opt *store.ListOptions) ([]*store.PlaybookTask, error) { - return nil, nil -} - -func (m *mockStore) UpdatePlaybookTask(ctx context.Context, task *store.PlaybookTask) error { - return nil -} - -func (m *mockStore) DeletePlaybookTask(ctx context.Context, id string) error { - return nil -} - -func (m *mockStore) ReorderPlaybookTasks(ctx context.Context, playbookID string, taskIDs []string) error { - return nil -} - -func (m *mockStore) GetNextTaskPosition(ctx context.Context, playbookID string) (int, error) { - return 0, nil -} - -func TestGetSandbox_Success(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ - ID: id, - JobID: "JOB-123", - AgentID: "agent-456", - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - } - - sb, err := svc.GetSandbox(context.Background(), "SBX-123") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if sb == nil { - t.Fatal("expected sandbox, got nil") - } - if sb.ID != "SBX-123" { - t.Errorf("expected ID %q, got %q", "SBX-123", sb.ID) - } - if sb.State != store.SandboxStateRunning { - t.Errorf("expected state %s, got %s", store.SandboxStateRunning, sb.State) - } -} - -func TestGetSandbox_NotFound(t *testing.T) { - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return nil, store.ErrNotFound - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - } - - _, err := svc.GetSandbox(context.Background(), "nonexistent-id") - if err == nil { - t.Fatal("expected error, got nil") - } - if !errors.Is(err, store.ErrNotFound) { - t.Errorf("expected ErrNotFound, got %v", err) - } -} - -func TestGetSandbox_EmptyID(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.GetSandbox(context.Background(), "") - if err == nil { - t.Fatal("expected error for empty ID, got nil") - } -} - -func TestGetSandbox_WhitespaceID(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.GetSandbox(context.Background(), " ") - if err == nil { - t.Fatal("expected error for whitespace ID, got nil") - } -} - -func TestGetSandboxCommands_Success(t *testing.T) { - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ID: id}, nil - }, - listCommandsFn: func(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Command, error) { - return []*store.Command{ - { - ID: "CMD-001", - SandboxID: sandboxID, - Command: "ls -la", - Stdout: "total 0\n", - ExitCode: 0, - }, - { - ID: "CMD-002", - SandboxID: sandboxID, - Command: "pwd", - Stdout: "/home/user\n", - ExitCode: 0, - }, - }, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - } - - cmds, err := svc.GetSandboxCommands(context.Background(), "SBX-123", nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if len(cmds) != 2 { - t.Errorf("expected 2 commands, got %d", len(cmds)) - } - if cmds[0].Command != "ls -la" { - t.Errorf("expected command %q, got %q", "ls -la", cmds[0].Command) - } -} - -func TestGetSandboxCommands_SandboxNotFound(t *testing.T) { - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return nil, store.ErrNotFound - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - } - - _, err := svc.GetSandboxCommands(context.Background(), "nonexistent-id", nil) - if err == nil { - t.Fatal("expected error, got nil") - } - if !errors.Is(err, store.ErrNotFound) { - t.Errorf("expected ErrNotFound, got %v", err) - } -} - -func TestGetSandboxCommands_EmptyID(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.GetSandboxCommands(context.Background(), "", nil) - if err == nil { - t.Fatal("expected error for empty ID, got nil") - } -} - -func TestGetSandboxCommands_WithListOptions(t *testing.T) { - var capturedOpts *store.ListOptions - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ID: id}, nil - }, - listCommandsFn: func(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Command, error) { - capturedOpts = opt - return []*store.Command{}, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - } - - opts := &store.ListOptions{Limit: 10, Offset: 5} - _, err := svc.GetSandboxCommands(context.Background(), "SBX-123", opts) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if capturedOpts == nil { - t.Fatal("expected list options to be passed") - } - if capturedOpts.Limit != 10 { - t.Errorf("expected limit %d, got %d", 10, capturedOpts.Limit) - } - if capturedOpts.Offset != 5 { - t.Errorf("expected offset %d, got %d", 5, capturedOpts.Offset) - } -} - -// mockSSHRunner is a mock implementation of SSHRunner for testing -type mockSSHRunner struct { - runFn func(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string) (stdout, stderr string, exitCode int, err error) - runWithCertFn func(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string) (stdout, stderr string, exitCode int, err error) -} - -func (m *mockSSHRunner) Run(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string) (string, string, int, error) { - if m.runFn != nil { - return m.runFn(ctx, addr, user, privateKeyPath, command, timeout, env) - } - return "", "", 0, nil -} - -func (m *mockSSHRunner) RunWithCert(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string) (string, string, int, error) { - if m.runWithCertFn != nil { - return m.runWithCertFn(ctx, addr, user, privateKeyPath, certPath, command, timeout, env) - } - // Fall back to runFn if runWithCertFn is not set - if m.runFn != nil { - return m.runFn(ctx, addr, user, privateKeyPath, command, timeout, env) - } - return "", "", 0, nil -} - -// mockManager is a mock implementation of libvirt.Manager for testing -type mockManager struct { - getIPAddressFn func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) -} - -func (m *mockManager) CloneVM(ctx context.Context, baseImage, newVMName string, cpu, memoryMB int, network string) (libvirt.DomainRef, error) { - return libvirt.DomainRef{}, nil -} - -func (m *mockManager) CloneFromVM(ctx context.Context, sourceVMName, newVMName string, cpu, memoryMB int, network string) (libvirt.DomainRef, error) { - return libvirt.DomainRef{}, nil -} - -func (m *mockManager) InjectSSHKey(ctx context.Context, sandboxName, username, publicKey string) error { - return nil -} - -func (m *mockManager) StartVM(ctx context.Context, vmName string) error { - return nil -} - -func (m *mockManager) StopVM(ctx context.Context, vmName string, force bool) error { - return nil -} - -func (m *mockManager) DestroyVM(ctx context.Context, vmName string) error { - return nil -} - -func (m *mockManager) CreateSnapshot(ctx context.Context, vmName, snapshotName string, external bool) (libvirt.SnapshotRef, error) { - return libvirt.SnapshotRef{}, nil -} - -func (m *mockManager) DiffSnapshot(ctx context.Context, vmName, fromSnapshot, toSnapshot string) (*libvirt.FSComparePlan, error) { - return nil, nil -} - -func (m *mockManager) GetIPAddress(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - if m.getIPAddressFn != nil { - return m.getIPAddressFn(ctx, vmName, timeout) - } - return "192.168.1.100", "52:54:00:12:34:56", nil -} - -func (m *mockManager) GetVMState(ctx context.Context, vmName string) (libvirt.VMState, error) { - return libvirt.VMState("running"), nil -} - -func (m *mockManager) ValidateSourceVM(ctx context.Context, vmName string) (*libvirt.VMValidationResult, error) { - return &libvirt.VMValidationResult{Valid: true}, nil -} - -func (m *mockManager) CheckHostResources(ctx context.Context, requiredCPUs, requiredMemoryMB int) (*libvirt.ResourceCheckResult, error) { - return &libvirt.ResourceCheckResult{Valid: true}, nil -} - -func TestRunCommand_Success(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ - ID: id, - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, nil - }, - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - // Return empty list - no other sandboxes with this IP - return []*store.Sandbox{}, nil - }, - } - - mockSSH := &mockSSHRunner{ - runFn: func(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string) (string, string, int, error) { - return "file1.txt\nfile2.txt\n", "", 0, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - ssh: mockSSH, - mgr: &mockManager{}, - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - cmd, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "/path/to/key", "ls -l", 60*time.Second, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if cmd == nil { - t.Fatal("expected command result, got nil") - } - if cmd.Stdout != "file1.txt\nfile2.txt\n" { - t.Errorf("expected stdout %q, got %q", "file1.txt\nfile2.txt\n", cmd.Stdout) - } - if cmd.ExitCode != 0 { - t.Errorf("expected exit code 0, got %d", cmd.ExitCode) - } -} - -func TestRunCommand_SSHConnectionFailed(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ - ID: id, - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, nil - }, - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - return []*store.Sandbox{}, nil - }, - } - - mockSSH := &mockSSHRunner{ - runFn: func(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string) (string, string, int, error) { - return "", "ssh: connect to host 192.168.1.100 port 22: Connection refused", 255, errors.New("exit status 255: ssh: connect to host 192.168.1.100 port 22: Connection refused") - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - ssh: mockSSH, - mgr: &mockManager{}, - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - cmd, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "/path/to/key", "ls -l", 60*time.Second, nil) - - // Should return error but also the command with stderr - if err == nil { - t.Fatal("expected error for SSH connection failure") - } - if cmd == nil { - t.Fatal("expected command result with stderr even on SSH failure") - } - if cmd.ExitCode != 255 { - t.Errorf("expected exit code 255, got %d", cmd.ExitCode) - } - if cmd.Stderr != "ssh: connect to host 192.168.1.100 port 22: Connection refused" { - t.Errorf("expected stderr to contain SSH error, got %q", cmd.Stderr) - } -} - -func TestRunCommand_CommandFailed(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ - ID: id, - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, nil - }, - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - return []*store.Sandbox{}, nil - }, - } - - mockSSH := &mockSSHRunner{ - runFn: func(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string) (string, string, int, error) { - // Command ran but returned non-zero exit code (not an SSH error) - return "", "ls: cannot access '/nonexistent': No such file or directory", 2, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - ssh: mockSSH, - mgr: &mockManager{}, - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - cmd, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "/path/to/key", "ls /nonexistent", 60*time.Second, nil) - if err != nil { - t.Fatalf("unexpected error for command with non-zero exit: %v", err) - } - - if cmd == nil { - t.Fatal("expected command result, got nil") - } - if cmd.ExitCode != 2 { - t.Errorf("expected exit code 2, got %d", cmd.ExitCode) - } - if cmd.Stderr == "" { - t.Error("expected stderr to contain error message") - } -} - -func TestRunCommand_EmptySandboxID(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.RunCommand(context.Background(), "", "ubuntu", "/path/to/key", "ls", 60*time.Second, nil) - if err == nil { - t.Fatal("expected error for empty sandbox ID") - } -} - -func TestRunCommand_EmptyUsername(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.RunCommand(context.Background(), "SBX-123", "", "/path/to/key", "ls", 60*time.Second, nil) - if err == nil { - t.Fatal("expected error for empty username") - } -} - -func TestRunCommand_EmptyPrivateKeyPath(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "", "ls", 60*time.Second, nil) - if err == nil { - t.Fatal("expected error for empty private key path") - } -} - -func TestRunCommand_EmptyCommand(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "/path/to/key", "", 60*time.Second, nil) - if err == nil { - t.Fatal("expected error for empty command") - } -} - -func TestRunCommand_SandboxNotFound(t *testing.T) { - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return nil, store.ErrNotFound - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - } - - _, err := svc.RunCommand(context.Background(), "nonexistent", "ubuntu", "/path/to/key", "ls", 60*time.Second, nil) - if err == nil { - t.Fatal("expected error for sandbox not found") - } - if !errors.Is(err, store.ErrNotFound) { - t.Errorf("expected ErrNotFound, got %v", err) - } -} - -func TestRunCommand_WithEnvironmentVariables(t *testing.T) { - ip := "192.168.1.100" - var capturedEnv map[string]string - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ - ID: id, - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, nil - }, - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - return []*store.Sandbox{}, nil - }, - } - - mockSSH := &mockSSHRunner{ - runFn: func(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string) (string, string, int, error) { - capturedEnv = env - return "test\n", "", 0, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - ssh: mockSSH, - mgr: &mockManager{}, - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - env := map[string]string{"MY_VAR": "test_value", "OTHER_VAR": "other"} - _, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "/path/to/key", "echo $MY_VAR", 60*time.Second, env) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if capturedEnv == nil { - t.Fatal("expected environment variables to be passed") - } - if capturedEnv["MY_VAR"] != "test_value" { - t.Errorf("expected MY_VAR=%q, got %q", "test_value", capturedEnv["MY_VAR"]) - } -} - -func TestRunCommand_IPConflictDetected(t *testing.T) { - ip := "192.168.1.100" - otherIP := "192.168.1.100" // Same IP as another sandbox - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ - ID: id, - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, nil - }, - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - // Return another sandbox with the same IP - simulating a conflict - return []*store.Sandbox{ - { - ID: "SBX-OTHER", - SandboxName: "other-sandbox", - State: store.SandboxStateRunning, - IPAddress: &otherIP, - }, - }, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - mgr: &mockManager{}, - timeNowFn: time.Now, - logger: slog.Default(), - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - _, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "/path/to/key", "ls -l", 60*time.Second, nil) - if err == nil { - t.Fatal("expected error for IP conflict, got nil") - } - if !strings.Contains(err.Error(), "ip conflict") { - t.Errorf("expected error to contain 'ip conflict', got: %v", err) - } -} - -func TestValidateIPUniqueness_NoConflict(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - // Return a sandbox with a different IP - otherIP := "192.168.1.200" - return []*store.Sandbox{ - { - ID: "SBX-OTHER", - SandboxName: "other-sandbox", - State: store.SandboxStateRunning, - IPAddress: &otherIP, - }, - }, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - logger: slog.Default(), - } - - err := svc.validateIPUniqueness(context.Background(), "SBX-123", ip) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestValidateIPUniqueness_Conflict(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - // Return another sandbox with the same IP - return []*store.Sandbox{ - { - ID: "SBX-OTHER", - SandboxName: "other-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, - }, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - logger: slog.Default(), - } - - err := svc.validateIPUniqueness(context.Background(), "SBX-123", ip) - if err == nil { - t.Fatal("expected error for IP conflict, got nil") - } - if !strings.Contains(err.Error(), "already assigned") { - t.Errorf("expected error to contain 'already assigned', got: %v", err) - } -} - -func TestValidateIPUniqueness_SameSandboxIgnored(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - // Return the same sandbox - should be ignored - return []*store.Sandbox{ - { - ID: "SBX-123", // Same ID as the one being validated - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, - }, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - logger: slog.Default(), - } - - err := svc.validateIPUniqueness(context.Background(), "SBX-123", ip) - if err != nil { - t.Fatalf("unexpected error (same sandbox should be ignored): %v", err) - } -} diff --git a/fluid/AGENTS.md b/fluid/AGENTS.md deleted file mode 100644 index e8b974d1..00000000 --- a/fluid/AGENTS.md +++ /dev/null @@ -1,360 +0,0 @@ -# Fluid - Development Guide - -Fluid is an embedded CLI tool that lets AI agents create and manage VM sandboxes directly - no HTTP server required. Local SQLite for state, direct libvirt access via local socket or SSH. - -## Architecture - -``` -AI Agent (Claude Code, etc.) - | - v (subprocess/tool calls) -fluid CLI - | - +-- SQLite store (~/.fluid/state.db) - +-- Libvirt manager - +-- VM service - | - v -libvirt (qemu:///system or qemu+ssh://host/system) -``` - -## Quick Start - -```bash -# Build the CLI -make build - -# Initialize configuration (creates ~/.fluid/config.yaml) -./bin/fluid init - -# List available VMs to clone from -./bin/fluid vms - -# Create a sandbox from a source VM -./bin/fluid create --source-vm=ubuntu-base - -# Run commands in the sandbox -./bin/fluid run "whoami" - -# Destroy when done -./bin/fluid destroy -``` - -## CLI Commands - -| Command | Description | -|---------|-------------| -| `fluid init` | Initialize configuration | -| `fluid create` | Create a new sandbox | -| `fluid list` | List sandboxes | -| `fluid get ` | Get sandbox details | -| `fluid destroy ` | Destroy a sandbox | -| `fluid start ` | Start a sandbox | -| `fluid stop ` | Stop a sandbox | -| `fluid ip ` | Discover IP address | -| `fluid run ` | Run a command | -| `fluid ssh-inject ` | Inject SSH public key | -| `fluid snapshot ` | Create a snapshot | -| `fluid diff ` | Compare snapshots | -| `fluid vms` | List available VMs | -| `fluid version` | Print version | -| `fluid tui` | Launch interactive TUI | -| `fluid mcp` | Start MCP server on stdio | - -All commands output JSON by default for easy agent parsing. - -## Interactive TUI - -Fluid includes an interactive terminal UI for human operators, built with Bubble Tea, Bubbles, and Lipgloss. - -```bash -# Launch the TUI -./bin/fluid tui -``` - -### TUI Features - -- **Real-time feedback**: See tool calls and their results as they happen -- **Conversation view**: Scrollable history with markdown rendering -- **Thinking indicator**: Animated spinner while processing -- **Tool result display**: Success/failure indicators with result summaries - -### TUI Commands - -The TUI accepts natural commands: - -| Command | Description | -|---------|-------------| -| `list` (ls) | List all sandboxes | -| `create ` | Create a new sandbox | -| `destroy ` | Destroy a sandbox | -| `get ` | Get sandbox details | -| `start ` | Start a stopped sandbox | -| `stop ` | Stop a running sandbox | -| `run ` | Run a command in a sandbox | -| `snapshot [name]` | Create a snapshot | -| `vms` | List available VMs for cloning | -| `help` | Show help message | - -### TUI Keyboard Shortcuts - -| Key | Action | -|-----|--------| -| `Enter` | Send message | -| `/settings` | Open settings editor | -| `PgUp/PgDn` | Scroll conversation history | -| `Ctrl+R` | Reset conversation | -| `Ctrl+C` | Quit | - -### Settings Editor - -Type `/settings` or `settings` to open the configuration editor. The settings screen allows you to edit: - -**Host Configuration:** -- Host name and address - -**Libvirt Configuration:** -- Libvirt URI (e.g., `qemu:///system` or `qemu+ssh://user@host/system`) -- Network name -- Base image directory -- Work directory -- SSH key injection method - -**VM Defaults:** -- Default vCPUs -- Default memory (MB) -- Command timeout -- IP discovery timeout - -**SSH Configuration:** -- Default SSH user -- SSH proxy jump (for isolated networks) - -Settings editor shortcuts: -| Key | Action | -|-----|--------| -| `Tab/Down` | Next field | -| `Shift+Tab/Up` | Previous field | -| `Ctrl+S` | Save and exit | -| `Esc` | Cancel and exit | - -### Example TUI Session - -``` -> list - v list_sandboxes - -> {"count":1,"sandboxes":[{"id":"SBX-abc123",...}]} - -Found 1 sandbox(es): -- sbx-test (SBX-abc123) - State: RUNNING | IP: 192.168.122.45 - -> run SBX-abc123 whoami - v run_command - -> {"exit_code":0,"stdout":"root\n",...} - -Command completed (exit code: 0) -**stdout:** -root -``` - -## MCP Server - -Fluid exposes all sandbox management tools via the [Model Context Protocol](https://modelcontextprotocol.io/) for use with Claude Code, Cursor, and other MCP clients. - -### Starting the MCP Server - -```bash -./bin/fluid mcp -``` - -This starts a JSON-RPC server on stdio. All logging goes to `~/.fluid/fluid-mcp.log` since stdout is the MCP transport. - -### Client Configuration - -**Claude Code** (`~/.claude.json`): - -```json -{ - "mcpServers": { - "fluid": { - "command": "/path/to/fluid", - "args": ["mcp"] - } - } -} -``` - -**Cursor** (`.cursor/mcp.json`): - -```json -{ - "mcpServers": { - "fluid": { - "command": "/path/to/fluid", - "args": ["mcp"] - } - } -} -``` - -### Available Tools - -| Tool | Parameters | Description | -|------|-----------|-------------| -| `list_sandboxes` | (none) | List all sandboxes with state and IPs | -| `create_sandbox` | `source_vm` (required), `host`, `cpu`, `memory_mb` | Create a sandbox by cloning a source VM | -| `destroy_sandbox` | `sandbox_id` (required) | Destroy a sandbox and remove storage | -| `run_command` | `sandbox_id` (required), `command` (required) | Execute a shell command via SSH | -| `start_sandbox` | `sandbox_id` (required) | Start a stopped sandbox | -| `stop_sandbox` | `sandbox_id` (required) | Stop a running sandbox | -| `get_sandbox` | `sandbox_id` (required) | Get detailed sandbox info | -| `list_vms` | (none) | List available VMs for cloning | -| `create_snapshot` | `sandbox_id` (required), `name` | Snapshot current sandbox state | -| `create_playbook` | `name` (required), `hosts`, `become` | Create an Ansible playbook | -| `add_playbook_task` | `playbook_id` (required), `name` (required), `module` (required), `params` | Add a task to a playbook | -| `edit_file` | `sandbox_id` (required), `path` (required), `new_str` (required), `old_str` | Edit or create a file in a sandbox | -| `read_file` | `sandbox_id` (required), `path` (required) | Read a file from a sandbox | -| `list_playbooks` | (none) | List all created playbooks | -| `get_playbook` | `playbook_id` (required) | Get playbook definition and YAML | -| `run_source_command` | `source_vm` (required), `command` (required) | Run read-only command on a source VM | -| `read_source_file` | `source_vm` (required), `path` (required) | Read a file from a source VM | - -### Differences from TUI Agent - -- **No approval flows**: MCP clients handle their own approval. Sandbox creation and command execution proceed without interactive confirmation. -- **No streaming**: Commands return complete results. Use `run_command` not the streaming variant. -- **No source VM auto-preparation**: If a source VM isn't prepared for read-only access, the error propagates. Run `fluid source prepare ` separately. -- **Agent ID**: MCP-created sandboxes use `agent_id: "mcp-agent"` to distinguish from TUI-created ones. - -## Configuration - -Default config location: `~/.fluid/config.yaml` - -```yaml -libvirt: - uri: qemu:///system # or qemu+ssh://user@host/system - network: default - base_image_dir: /var/lib/libvirt/images/base - work_dir: /var/lib/libvirt/images/sandboxes - ssh_key_inject_method: virt-customize - -vm: - default_vcpus: 2 - default_memory_mb: 2048 - command_timeout: 5m - ip_discovery_timeout: 2m - -ssh: - proxy_jump: "" # Optional: user@jumphost for isolated networks - default_user: sandbox -``` - -## Development - -### Prerequisites - -- Go 1.22+ -- libvirt/KVM installed and running -- virsh command available - -### Build - -```bash -# Build the fluid CLI -make build -# Output: bin/fluid - -# Clean build artifacts -make clean -``` - -### Testing - -```bash -# Run all tests -make test - -# Run tests with coverage -make test-coverage -# Generates: coverage.out, coverage.html -``` - -### Code Quality - -```bash -# Format code -make fmt - -# Run go vet -make vet - -# Run all checks -make check -``` - -### Dependencies - -```bash -# Download dependencies -make deps - -# Tidy and verify dependencies -make tidy -``` - -## Makefile Targets - -Run `make help` to see all available targets: - -| Target | Description | -|--------|-------------| -| `all` | Run fmt, vet, test, and build (default) | -| `build` | Build the fluid CLI binary | -| `run` | Build and run the CLI | -| `clean` | Clean build artifacts | -| `fmt` | Format code | -| `vet` | Run go vet | -| `test` | Run tests | -| `test-coverage` | Run tests with coverage | -| `check` | Run all code quality checks | -| `deps` | Download dependencies | -| `tidy` | Tidy and verify dependencies | -| `install` | Install fluid to GOPATH/bin | -| `help` | Show help message | - -## Example Agent Usage - -```bash -# Agent creates sandbox -$ fluid create --source-vm=ubuntu-base -{"sandbox_id": "SBX-abc123", "name": "sbx-xyz", "state": "RUNNING", "ip": "192.168.122.45"} - -# Agent runs commands -$ fluid run SBX-abc123 "apt update && apt install -y nginx" -{"sandbox_id": "SBX-abc123", "exit_code": 0, "stdout": "...", "stderr": ""} - -# Agent takes snapshot -$ fluid snapshot SBX-abc123 --name=after-nginx -{"snapshot_id": "SNP-xyz", "sandbox_id": "SBX-abc123", "name": "after-nginx"} - -# Agent checks diff -$ fluid diff SBX-abc123 --from=initial --to=after-nginx -{"diff_id": "DIF-xyz", "files_added": ["/etc/nginx/..."], "files_modified": [...]} - -# Agent destroys sandbox -$ fluid destroy SBX-abc123 -{"destroyed": true, "sandbox_id": "SBX-abc123"} -``` - -## Data Storage - -State is stored in SQLite at `~/.fluid/state.db`: -- Sandboxes -- Snapshots -- Commands -- Diffs - -The database is auto-migrated on first run. - -If you remove a parameter from a function, don't just pass in nil/null/empty string in a different layer, make sure to remove the extra parameter from every place. diff --git a/fluid/CLAUDE.md b/fluid/CLAUDE.md deleted file mode 100644 index 43c994c2..00000000 --- a/fluid/CLAUDE.md +++ /dev/null @@ -1 +0,0 @@ -@AGENTS.md diff --git a/fluid/cmd/fluid/main.go b/fluid/cmd/fluid/main.go deleted file mode 100644 index e98f7e78..00000000 --- a/fluid/cmd/fluid/main.go +++ /dev/null @@ -1,1539 +0,0 @@ -package main - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "log/slog" - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - "github.com/spf13/cobra" - "gopkg.in/yaml.v3" - - "github.com/aspectrr/fluid.sh/fluid/internal/config" - "github.com/aspectrr/fluid.sh/fluid/internal/libvirt" - fluidmcp "github.com/aspectrr/fluid.sh/fluid/internal/mcp" - "github.com/aspectrr/fluid.sh/fluid/internal/provider" - "github.com/aspectrr/fluid.sh/fluid/internal/proxmox" - "github.com/aspectrr/fluid.sh/fluid/internal/readonly" - "github.com/aspectrr/fluid.sh/fluid/internal/sshca" - "github.com/aspectrr/fluid.sh/fluid/internal/sshkeys" - "github.com/aspectrr/fluid.sh/fluid/internal/store" - "github.com/aspectrr/fluid.sh/fluid/internal/store/sqlite" - "github.com/aspectrr/fluid.sh/fluid/internal/telemetry" - "github.com/aspectrr/fluid.sh/fluid/internal/tui" - "github.com/aspectrr/fluid.sh/fluid/internal/vm" -) - -var ( - cfgFile string - outputJSON bool - cfg *config.Config - dataStore store.Store - vmService *vm.Service - providerMgr provider.Manager - telemetryService telemetry.Service -) - -func main() { - if err := rootCmd.Execute(); err != nil { - outputError(err) - os.Exit(1) - } -} - -var rootCmd = &cobra.Command{ - Use: "fluid", - Short: "Fluid - Make Infrastructure Safe for AI", - Long: "Fluid is a terminal agent that AI manage infrastructure via sandboxed resources, audit trails and human approval.", - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - // Skip init for these commands (they handle their own init) - if cmd.Name() == "init" || cmd.Name() == "version" || cmd.Name() == "help" || cmd.Name() == "tui" || cmd.Name() == "mcp" || cmd.Name() == "fluid" { - return nil - } - return initServices() - }, - PersistentPostRunE: func(cmd *cobra.Command, args []string) error { - if telemetryService != nil { - telemetryService.Close() - } - if dataStore != nil { - return dataStore.Close() - } - return nil - }, - // Default to TUI when no subcommand is provided - RunE: func(cmd *cobra.Command, args []string) error { - return runTUI() - }, -} - -func init() { - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default ~/.fluid/config.yaml)") - rootCmd.PersistentFlags().BoolVar(&outputJSON, "json", true, "output JSON (default true)") - - rootCmd.AddCommand(initCmd) - rootCmd.AddCommand(createCmd) - rootCmd.AddCommand(listCmd) - rootCmd.AddCommand(getCmd) - rootCmd.AddCommand(destroyCmd) - rootCmd.AddCommand(startCmd) - rootCmd.AddCommand(stopCmd) - rootCmd.AddCommand(ipCmd) - rootCmd.AddCommand(runCmd) - rootCmd.AddCommand(sshInjectCmd) - rootCmd.AddCommand(snapshotCmd) - rootCmd.AddCommand(diffCmd) - rootCmd.AddCommand(vmsCmd) - rootCmd.AddCommand(hostsCmd) - rootCmd.AddCommand(validateCmd) - rootCmd.AddCommand(versionCmd) - rootCmd.AddCommand(playbooksCmd) - rootCmd.AddCommand(tuiCmd) - rootCmd.AddCommand(mcpCmd) - - sourceCmd.AddCommand(sourcePrepareCmd) - rootCmd.AddCommand(sourceCmd) -} - -func initServices() error { - var err error - - // Determine config path - configPath := cfgFile - if configPath == "" { - home, _ := os.UserHomeDir() - configPath = filepath.Join(home, ".fluid", "config.yaml") - } - - // Ensure config directory and file exist with defaults - if err := ensureConfigFile(configPath); err != nil { - return fmt.Errorf("ensure config: %w", err) - } - - // Load config - var warnings []string - cfg, warnings, err = config.LoadWithEnvOverride(configPath) - if err != nil { - return fmt.Errorf("load config: %w", err) - } - for _, w := range warnings { - fmt.Fprintf(os.Stderr, "Warning: %s\n", w) - } - - // Ensure SSH CA exists - generate if missing - created, err := sshca.EnsureSSHCA(cfg.SSH.CAKeyPath, cfg.SSH.CAPubPath, "fluid-ssh-ca") - if err != nil { - return fmt.Errorf("ensure SSH CA: %w", err) - } - if created { - // Log that we created the CA (in JSON format for agent consumption) - fmt.Fprintf(os.Stderr, `{"event":"ssh_ca_created","ca_key":"%s","ca_pub":"%s"}`+"\n", - cfg.SSH.CAKeyPath, cfg.SSH.CAPubPath) - } - - // Open SQLite store - ctx := context.Background() - dataStore, err = sqlite.New(ctx, store.Config{ - AutoMigrate: true, - }) - if err != nil { - return fmt.Errorf("open store: %w", err) - } - - // Create and initialize SSH CA for key management - ca, err := sshca.NewCA(sshca.Config{ - CAKeyPath: cfg.SSH.CAKeyPath, - CAPubKeyPath: cfg.SSH.CAPubPath, - WorkDir: cfg.SSH.WorkDir, - DefaultTTL: cfg.SSH.CertTTL, - MaxTTL: cfg.SSH.MaxTTL, - DefaultPrincipals: []string{cfg.SSH.DefaultUser}, - EnforceKeyPermissions: true, - }) - if err != nil { - return fmt.Errorf("create SSH CA: %w", err) - } - if err := ca.Initialize(ctx); err != nil { - return fmt.Errorf("initialize SSH CA: %w", err) - } - - // Create key manager for managed SSH credentials - keyMgr, err := sshkeys.NewKeyManager(ca, sshkeys.Config{ - KeyDir: cfg.SSH.KeyDir, - CertificateTTL: cfg.SSH.CertTTL, - DefaultUsername: cfg.SSH.DefaultUser, - }, slog.Default()) - if err != nil { - return fmt.Errorf("create key manager: %w", err) - } - - // Read SSH CA public key for injection into VMs via cloud-init - sshCAPubKey := "" - if pubKeyBytes, err := os.ReadFile(cfg.SSH.CAPubPath); err == nil { - sshCAPubKey = strings.TrimSpace(string(pubKeyBytes)) - } - - // Create provider manager based on configured provider - var remoteFactory vm.RemoteManagerFactory - switch cfg.Provider { - case "proxmox": - proxmoxCfg := proxmox.Config{ - Host: cfg.Proxmox.Host, - TokenID: cfg.Proxmox.TokenID, - Secret: cfg.Proxmox.Secret, - Node: cfg.Proxmox.Node, - VerifySSL: cfg.Proxmox.VerifySSL, - Storage: cfg.Proxmox.Storage, - Bridge: cfg.Proxmox.Bridge, - CloneMode: cfg.Proxmox.CloneMode, - VMIDStart: cfg.Proxmox.VMIDStart, - VMIDEnd: cfg.Proxmox.VMIDEnd, - } - mgr, mgrErr := proxmox.NewProxmoxManager(proxmoxCfg, slog.Default()) - if mgrErr != nil { - return fmt.Errorf("create proxmox manager: %w", mgrErr) - } - providerMgr = mgr - default: - virshCfg := libvirt.Config{ - LibvirtURI: cfg.Libvirt.URI, - BaseImageDir: cfg.Libvirt.BaseImageDir, - WorkDir: cfg.Libvirt.WorkDir, - SSHKeyInjectMethod: cfg.Libvirt.SSHKeyInjectMethod, - SocketVMNetWrapper: cfg.Libvirt.SocketVMNetWrapper, - SSHCAPubKey: sshCAPubKey, - } - providerMgr = libvirt.NewVirshManager(virshCfg, slog.Default()) - remoteFactory = func(host config.HostConfig) provider.Manager { - return libvirt.NewRemoteVirshManager(host, virshCfg, slog.Default()) - } - } - - // Initialize telemetry - telemetryService, err = telemetry.NewService(cfg.Telemetry) - if err != nil { - // Fallback to no-op if telemetry fails - telemetryService = telemetry.NewNoopService() - } - - // Create VM service with key manager and remote factory - var serviceOpts []vm.Option - serviceOpts = append(serviceOpts, vm.WithKeyManager(keyMgr), vm.WithTelemetry(telemetryService)) - if remoteFactory != nil { - serviceOpts = append(serviceOpts, vm.WithRemoteManagerFactory(remoteFactory)) - } - vmService = vm.NewService(providerMgr, dataStore, vm.Config{ - Network: cfg.Libvirt.Network, - DefaultVCPUs: cfg.VM.DefaultVCPUs, - DefaultMemoryMB: cfg.VM.DefaultMemoryMB, - CommandTimeout: cfg.VM.CommandTimeout, - IPDiscoveryTimeout: cfg.VM.IPDiscoveryTimeout, - SSHProxyJump: cfg.SSH.ProxyJump, - }, serviceOpts...) - - return nil -} - -// ensureConfigFile creates a default config file if it doesn't exist. -func ensureConfigFile(configPath string) error { - // Check if config file already exists - if _, err := os.Stat(configPath); err == nil { - return nil // File exists - } - - // Create config directory - configDir := filepath.Dir(configPath) - if err := os.MkdirAll(configDir, 0o755); err != nil { - return fmt.Errorf("create config dir: %w", err) - } - - // Write default config - defaultCfg := `# Fluid CLI Configuration -# Auto-generated on first run - -libvirt: - uri: qemu:///system # or qemu+ssh://user@host/system - network: default - base_image_dir: /var/lib/libvirt/images/base - work_dir: /var/lib/libvirt/images/sandboxes - ssh_key_inject_method: virt-customize - -vm: - default_vcpus: 2 - default_memory_mb: 4096 - command_timeout: 5m - ip_discovery_timeout: 2m - -ssh: - proxy_jump: "" # Optional: user@jumphost for isolated networks - default_user: sandbox - # SSH CA paths are auto-configured to ~/.fluid/ssh-ca/ -` - - if err := os.WriteFile(configPath, []byte(defaultCfg), 0o644); err != nil { - return fmt.Errorf("write config: %w", err) - } - - return nil -} - -// --- Init Command --- - -var initCmd = &cobra.Command{ - Use: "init", - Short: "Initialize fluid configuration", - Long: `Creates default config file at ~/.fluid/config.yaml`, - RunE: func(cmd *cobra.Command, args []string) error { - home, err := os.UserHomeDir() - if err != nil { - return err - } - - configDir := filepath.Join(home, ".fluid") - configPath := filepath.Join(configDir, "config.yaml") - - // Check if config already exists - if _, err := os.Stat(configPath); err == nil { - output(map[string]any{ - "status": "exists", - "path": configPath, - "message": "Config file already exists", - }) - return nil - } - - // Create directory - if err := os.MkdirAll(configDir, 0o755); err != nil { - return fmt.Errorf("create config dir: %w", err) - } - - // Write default config - defaultCfg := `# Fluid CLI Configuration - -libvirt: - uri: qemu:///system # or qemu+ssh://user@host/system - network: default - base_image_dir: /var/lib/libvirt/images/base - work_dir: /var/lib/libvirt/images/sandboxes - ssh_key_inject_method: virt-customize - -vm: - default_vcpus: 2 - default_memory_mb: 4096 - command_timeout: 5m - ip_discovery_timeout: 2m - -ssh: - proxy_jump: "" # Optional: user@jumphost for isolated networks - default_user: sandbox -` - - if err := os.WriteFile(configPath, []byte(defaultCfg), 0o644); err != nil { - return fmt.Errorf("write config: %w", err) - } - - output(map[string]any{ - "status": "created", - "path": configPath, - }) - return nil - }, -} - -// --- Create Command --- - -var createCmd = &cobra.Command{ - Use: "create", - Short: "Create a new sandbox", - Long: `Create a new sandbox VM by cloning from a source VM`, - RunE: func(cmd *cobra.Command, args []string) error { - sourceVM, _ := cmd.Flags().GetString("source-vm") - agentID, _ := cmd.Flags().GetString("agent-id") - cpu, _ := cmd.Flags().GetInt("cpu") - memory, _ := cmd.Flags().GetInt("memory") - autoStart, _ := cmd.Flags().GetBool("auto-start") - waitIP, _ := cmd.Flags().GetBool("wait-ip") - - if sourceVM == "" { - return fmt.Errorf("--source-vm is required") - } - if agentID == "" { - agentID = "cli-agent" - } - - ctx := context.Background() - - // Check resources first - if insufficient memory, prompt for approval - mgr := vmService.GetManager() - memoryMB := memory - if memoryMB <= 0 { - memoryMB = vmService.GetDefaultMemory() - } - cpuCount := cpu - if cpuCount <= 0 { - cpuCount = vmService.GetDefaultCPUs() - } - - validation := vmService.CheckResourcesForSandbox(ctx, mgr, sourceVM, cpuCount, memoryMB) - if !validation.SourceVMValid { - return fmt.Errorf("source VM validation failed: %s", strings.Join(validation.VMErrors, "; ")) - } - - // If resources are insufficient, require interactive approval via TUI - if validation.NeedsApproval { - request := tui.MemoryApprovalRequest{ - SourceVM: sourceVM, - RequiredMemoryMB: validation.ResourceCheck.RequiredMemoryMB, - AvailableMemoryMB: validation.ResourceCheck.AvailableMemoryMB, - TotalMemoryMB: validation.ResourceCheck.TotalMemoryMB, - Warnings: validation.ResourceCheck.Warnings, - Errors: validation.ResourceCheck.Errors, - } - - approved, err := tui.RunConfirmDialog(request) - if err != nil { - return fmt.Errorf("approval dialog: %w", err) - } - if !approved { - return fmt.Errorf("sandbox creation cancelled: insufficient memory (need %d MB, have %d MB available)", - validation.ResourceCheck.RequiredMemoryMB, validation.ResourceCheck.AvailableMemoryMB) - } - } - - sb, ip, err := vmService.CreateSandbox(ctx, sourceVM, agentID, "", cpu, memory, nil, autoStart, waitIP) - if err != nil { - return err - } - - result := map[string]any{ - "sandbox_id": sb.ID, - "name": sb.SandboxName, - "state": sb.State, - } - if ip != "" { - result["ip"] = ip - } - - output(result) - return nil - }, -} - -func init() { - createCmd.Flags().String("source-vm", "", "Source VM to clone from (required)") - createCmd.Flags().String("agent-id", "", "Agent ID (default: cli-agent)") - createCmd.Flags().Int("cpu", 0, "Number of vCPUs (default from config)") - createCmd.Flags().Int("memory", 0, "Memory in MB (default from config)") - createCmd.Flags().Bool("auto-start", true, "Auto-start the VM after creation") - createCmd.Flags().Bool("wait-ip", true, "Wait for IP address discovery") -} - -// --- List Command --- - -var listCmd = &cobra.Command{ - Use: "list", - Short: "List sandboxes", - Long: `List all sandboxes with optional filtering`, - RunE: func(cmd *cobra.Command, args []string) error { - state, _ := cmd.Flags().GetString("state") - - ctx := context.Background() - filter := store.SandboxFilter{} - if state != "" { - s := store.SandboxState(strings.ToUpper(state)) - filter.State = &s - } - - sandboxes, err := vmService.GetSandboxes(ctx, filter, nil) - if err != nil { - return err - } - - result := make([]map[string]any, 0, len(sandboxes)) - for _, sb := range sandboxes { - item := map[string]any{ - "sandbox_id": sb.ID, - "name": sb.SandboxName, - "state": sb.State, - "base_image": sb.BaseImage, - "created_at": sb.CreatedAt.Format(time.RFC3339), - } - if sb.IPAddress != nil { - item["ip"] = *sb.IPAddress - } - result = append(result, item) - } - - output(map[string]any{ - "sandboxes": result, - "count": len(result), - }) - return nil - }, -} - -func init() { - listCmd.Flags().String("state", "", "Filter by state (CREATED, RUNNING, STOPPED, etc.)") -} - -// --- Get Command --- - -var getCmd = &cobra.Command{ - Use: "get ", - Short: "Get sandbox details", - Long: `Get detailed information about a specific sandbox`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - ctx := context.Background() - sb, err := vmService.GetSandbox(ctx, args[0]) - if err != nil { - return err - } - - result := map[string]any{ - "sandbox_id": sb.ID, - "name": sb.SandboxName, - "state": sb.State, - "base_image": sb.BaseImage, - "network": sb.Network, - "agent_id": sb.AgentID, - "job_id": sb.JobID, - "created_at": sb.CreatedAt.Format(time.RFC3339), - "updated_at": sb.UpdatedAt.Format(time.RFC3339), - } - if sb.IPAddress != nil { - result["ip"] = *sb.IPAddress - } - - output(result) - return nil - }, -} - -// --- Destroy Command --- - -var destroyCmd = &cobra.Command{ - Use: "destroy ", - Short: "Destroy a sandbox", - Long: `Completely destroy a sandbox VM and remove its storage`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - ctx := context.Background() - _, err := vmService.DestroySandbox(ctx, args[0]) - if err != nil { - return err - } - - output(map[string]any{ - "destroyed": true, - "sandbox_id": args[0], - }) - return nil - }, -} - -// --- Start Command --- - -var startCmd = &cobra.Command{ - Use: "start ", - Short: "Start a sandbox", - Long: `Start a stopped sandbox VM`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - waitIP, _ := cmd.Flags().GetBool("wait-ip") - - ctx := context.Background() - ip, err := vmService.StartSandbox(ctx, args[0], waitIP) - if err != nil { - return err - } - - result := map[string]any{ - "started": true, - "sandbox_id": args[0], - } - if ip != "" { - result["ip"] = ip - } - - output(result) - return nil - }, -} - -func init() { - startCmd.Flags().Bool("wait-ip", true, "Wait for IP address discovery") -} - -// --- Stop Command --- - -var stopCmd = &cobra.Command{ - Use: "stop ", - Short: "Stop a sandbox", - Long: `Stop a running sandbox VM`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - force, _ := cmd.Flags().GetBool("force") - - ctx := context.Background() - err := vmService.StopSandbox(ctx, args[0], force) - if err != nil { - return err - } - - output(map[string]any{ - "stopped": true, - "sandbox_id": args[0], - }) - return nil - }, -} - -func init() { - stopCmd.Flags().Bool("force", false, "Force stop (hard shutdown)") -} - -// --- IP Command --- - -var ipCmd = &cobra.Command{ - Use: "ip ", - Short: "Discover IP address", - Long: `Discover or rediscover the IP address for a sandbox`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - ctx := context.Background() - ip, err := vmService.DiscoverIP(ctx, args[0]) - if err != nil { - return err - } - - output(map[string]any{ - "sandbox_id": args[0], - "ip": ip, - }) - return nil - }, -} - -// --- Run Command --- - -var runCmd = &cobra.Command{ - Use: "run ", - Short: "Run a command in a sandbox", - Long: `Execute a command inside the sandbox via SSH`, - Args: cobra.MinimumNArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - sandboxID := args[0] - command := strings.Join(args[1:], " ") - - user, _ := cmd.Flags().GetString("user") - key, _ := cmd.Flags().GetString("key") - timeout, _ := cmd.Flags().GetDuration("timeout") - - if user == "" { - user = cfg.SSH.DefaultUser - } - - ctx := context.Background() - result, err := vmService.RunCommand(ctx, sandboxID, user, key, command, timeout, nil) - if err != nil { - // Still return partial result if available - if result != nil { - output(map[string]any{ - "sandbox_id": sandboxID, - "exit_code": result.ExitCode, - "stdout": result.Stdout, - "stderr": result.Stderr, - "error": err.Error(), - }) - return nil - } - return err - } - - output(map[string]any{ - "sandbox_id": sandboxID, - "exit_code": result.ExitCode, - "stdout": result.Stdout, - "stderr": result.Stderr, - }) - return nil - }, -} - -func init() { - runCmd.Flags().String("user", "", "SSH user (default from config)") - runCmd.Flags().String("key", "", "SSH private key path") - runCmd.Flags().Duration("timeout", 0, "Command timeout (default from config)") -} - -// --- SSH Inject Command --- - -var sshInjectCmd = &cobra.Command{ - Use: "ssh-inject ", - Short: "Inject SSH public key", - Long: `Inject an SSH public key into the sandbox for the specified user`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - sandboxID := args[0] - pubkey, _ := cmd.Flags().GetString("pubkey") - user, _ := cmd.Flags().GetString("user") - - if pubkey == "" { - return fmt.Errorf("--pubkey is required") - } - if user == "" { - user = cfg.SSH.DefaultUser - } - - ctx := context.Background() - err := vmService.InjectSSHKey(ctx, sandboxID, user, pubkey) - if err != nil { - return err - } - - output(map[string]any{ - "sandbox_id": sandboxID, - "user": user, - "injected": true, - }) - return nil - }, -} - -func init() { - sshInjectCmd.Flags().String("pubkey", "", "SSH public key to inject (required)") - sshInjectCmd.Flags().String("user", "", "Target user (default from config)") -} - -// --- Snapshot Command --- - -var snapshotCmd = &cobra.Command{ - Use: "snapshot ", - Short: "Create a snapshot", - Long: `Create a snapshot of the current sandbox state`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - sandboxID := args[0] - name, _ := cmd.Flags().GetString("name") - external, _ := cmd.Flags().GetBool("external") - - if name == "" { - name = fmt.Sprintf("snap-%d", time.Now().Unix()) - } - - ctx := context.Background() - snap, err := vmService.CreateSnapshot(ctx, sandboxID, name, external) - if err != nil { - return err - } - - output(map[string]any{ - "snapshot_id": snap.ID, - "sandbox_id": sandboxID, - "name": snap.Name, - "kind": snap.Kind, - }) - return nil - }, -} - -func init() { - snapshotCmd.Flags().String("name", "", "Snapshot name (auto-generated if empty)") - snapshotCmd.Flags().Bool("external", false, "Create external snapshot") -} - -// --- Diff Command --- - -var diffCmd = &cobra.Command{ - Use: "diff ", - Short: "Compare snapshots", - Long: `Compute differences between two snapshots`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - sandboxID := args[0] - from, _ := cmd.Flags().GetString("from") - to, _ := cmd.Flags().GetString("to") - - if from == "" || to == "" { - return fmt.Errorf("--from and --to are required") - } - - ctx := context.Background() - diff, err := vmService.DiffSnapshots(ctx, sandboxID, from, to) - if err != nil { - return err - } - - output(map[string]any{ - "diff_id": diff.ID, - "sandbox_id": sandboxID, - "from_snapshot": diff.FromSnapshot, - "to_snapshot": diff.ToSnapshot, - "files_added": diff.DiffJSON.FilesAdded, - "files_modified": diff.DiffJSON.FilesModified, - "files_removed": diff.DiffJSON.FilesRemoved, - }) - return nil - }, -} - -func init() { - diffCmd.Flags().String("from", "", "Source snapshot name (required)") - diffCmd.Flags().String("to", "", "Target snapshot name (required)") -} - -// --- VMs Command --- - -var vmsCmd = &cobra.Command{ - Use: "vms", - Short: "List available VMs", - Long: `List all VMs available for cloning (libvirt or Proxmox depending on provider)`, - RunE: func(cmd *cobra.Command, args []string) error { - ctx := context.Background() - - // Load config to check for provider and hosts - configPath := cfgFile - if configPath == "" { - home, _ := os.UserHomeDir() - configPath = filepath.Join(home, ".fluid", "config.yaml") - } - - loadedCfg, _, err := config.LoadWithEnvOverride(configPath) - if err != nil { - // If no config, fall back to local virsh - vms, err := listVMsViaVirsh(ctx) - if err != nil { - return err - } - output(map[string]any{ - "vms": vms, - "count": len(vms), - }) - return nil - } - - // Proxmox provider: list VMs via API - if loadedCfg.Provider == "proxmox" { - proxmoxCfg := proxmox.Config{ - Host: loadedCfg.Proxmox.Host, - TokenID: loadedCfg.Proxmox.TokenID, - Secret: loadedCfg.Proxmox.Secret, - Node: loadedCfg.Proxmox.Node, - VerifySSL: loadedCfg.Proxmox.VerifySSL, - VMIDStart: loadedCfg.Proxmox.VMIDStart, - VMIDEnd: loadedCfg.Proxmox.VMIDEnd, - } - mnm := proxmox.NewMultiNodeManager(proxmoxCfg, slog.Default()) - result, err := mnm.ListVMs(ctx) - if err != nil { - return fmt.Errorf("query proxmox: %w", err) - } - - vms := make([]map[string]any, 0, len(result.VMs)) - for _, vm := range result.VMs { - vms = append(vms, map[string]any{ - "name": vm.Name, - "vmid": vm.UUID, - "state": vm.State, - "host": vm.HostName, - }) - } - - resp := map[string]any{ - "vms": vms, - "count": len(vms), - "provider": "proxmox", - } - - if len(result.HostErrors) > 0 { - errors := make([]map[string]any, 0, len(result.HostErrors)) - for _, e := range result.HostErrors { - errors = append(errors, map[string]any{ - "host": e.HostName, - "error": e.Error, - }) - } - resp["host_errors"] = errors - } - - output(resp) - return nil - } - - // Libvirt provider: if hosts are configured, query remote hosts - if len(loadedCfg.Hosts) > 0 { - multiHostMgr := libvirt.NewMultiHostDomainManager(loadedCfg.Hosts, slog.Default()) - result, err := multiHostMgr.ListDomains(ctx) - if err != nil { - return fmt.Errorf("query hosts: %w", err) - } - - vms := make([]map[string]any, 0, len(result.Domains)) - for _, d := range result.Domains { - vms = append(vms, map[string]any{ - "name": d.Name, - "state": d.State.String(), - "host": d.HostName, - }) - } - - resp := map[string]any{ - "vms": vms, - "count": len(vms), - } - - // Include host errors if any - if len(result.HostErrors) > 0 { - errors := make([]map[string]any, 0, len(result.HostErrors)) - for _, e := range result.HostErrors { - errors = append(errors, map[string]any{ - "host": e.HostName, - "error": e.Error, - }) - } - resp["host_errors"] = errors - } - - output(resp) - return nil - } - - // No hosts configured, use local virsh - vms, err := listVMsViaVirsh(ctx) - if err != nil { - return err - } - - output(map[string]any{ - "vms": vms, - "count": len(vms), - }) - return nil - }, -} - -func listVMsViaVirsh(ctx context.Context) ([]map[string]any, error) { - var stdout, stderr bytes.Buffer - cmd := exec.CommandContext(ctx, "virsh", "list", "--all", "--name") - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("virsh list: %w: %s", err, stderr.String()) - } - - result := make([]map[string]any, 0) - for _, name := range strings.Split(stdout.String(), "\n") { - name = strings.TrimSpace(name) - if name == "" { - continue - } - vmInfo := map[string]any{ - "name": name, - } - - // Get additional info about each VM if providerMgr is available - if providerMgr != nil { - // Get VM state - state, err := providerMgr.GetVMState(ctx, name) - if err == nil { - vmInfo["state"] = string(state) - } - - // Get IP address (only for running VMs) - if state == libvirt.VMStateRunning { - ip, mac, err := providerMgr.GetIPAddress(ctx, name, 1*time.Second) - if err == nil && ip != "" { - vmInfo["ip"] = ip - vmInfo["mac"] = mac - } - } - } - - result = append(result, vmInfo) - } - - return result, nil -} - -// --- Validate Command --- - -var validateCmd = &cobra.Command{ - Use: "validate ", - Short: "Validate a source VM and host resources", - Long: `Run pre-flight validation checks on a source VM before creating sandboxes. - -This command checks: -- Source VM state (running/shut off) -- Network interface configuration -- MAC address assignment -- IP address (if VM is running) -- Host memory availability -- Host disk space`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - sourceVM := args[0] - memory, _ := cmd.Flags().GetInt("memory") - - ctx := context.Background() - - result := map[string]any{ - "source_vm": sourceVM, - "valid": true, - "warnings": []string{}, - "errors": []string{}, - } - - var allWarnings []string - var allErrors []string - - // Validate source VM - vmValidation, err := providerMgr.ValidateSourceVM(ctx, sourceVM) - if err != nil { - allErrors = append(allErrors, fmt.Sprintf("Failed to validate source VM: %v", err)) - result["valid"] = false - } else { - result["vm_state"] = vmValidation.State - result["has_network"] = vmValidation.HasNetwork - if vmValidation.MACAddress != "" { - result["mac_address"] = vmValidation.MACAddress - } - if vmValidation.IPAddress != "" { - result["ip_address"] = vmValidation.IPAddress - } - if !vmValidation.Valid { - result["valid"] = false - } - allWarnings = append(allWarnings, vmValidation.Warnings...) - allErrors = append(allErrors, vmValidation.Errors...) - } - - // Check host resources - memoryToCheck := memory - if memoryToCheck <= 0 { - memoryToCheck = cfg.VM.DefaultMemoryMB - } - // Use default CPU count if not specified (for validation purposes) - cpuToCheck := cfg.VM.DefaultVCPUs - resourceCheck, err := providerMgr.CheckHostResources(ctx, cpuToCheck, memoryToCheck) - if err != nil { - allWarnings = append(allWarnings, fmt.Sprintf("Failed to check host resources: %v", err)) - } else { - result["host_memory_total_mb"] = resourceCheck.TotalMemoryMB - result["host_memory_available_mb"] = resourceCheck.AvailableMemoryMB - result["host_cpus_available"] = resourceCheck.AvailableCPUs - result["host_disk_available_mb"] = resourceCheck.AvailableDiskMB - result["required_memory_mb"] = resourceCheck.RequiredMemoryMB - result["required_cpus"] = resourceCheck.RequiredCPUs - if !resourceCheck.Valid { - result["valid"] = false - } - allWarnings = append(allWarnings, resourceCheck.Warnings...) - allErrors = append(allErrors, resourceCheck.Errors...) - } - - result["warnings"] = allWarnings - result["errors"] = allErrors - - output(result) - return nil - }, -} - -func init() { - validateCmd.Flags().Int("memory", 0, "Memory in MB to check for (default from config)") -} - -// --- Hosts Command --- - -var hostsCmd = &cobra.Command{ - Use: "hosts", - Short: "List configured remote hosts", - Long: `List all remote libvirt hosts configured in the config file`, - RunE: func(cmd *cobra.Command, args []string) error { - // Load config - configPath := cfgFile - if configPath == "" { - home, _ := os.UserHomeDir() - configPath = filepath.Join(home, ".fluid", "config.yaml") - } - - loadedCfg, _, err := config.LoadWithEnvOverride(configPath) - if err != nil { - return fmt.Errorf("load config: %w", err) - } - - if len(loadedCfg.Hosts) == 0 { - output(map[string]any{ - "hosts": []map[string]any{}, - "count": 0, - "message": "No remote hosts configured. Add hosts to your config file.", - }) - return nil - } - - hosts := make([]map[string]any, 0, len(loadedCfg.Hosts)) - for _, h := range loadedCfg.Hosts { - host := map[string]any{ - "name": h.Name, - "address": h.Address, - } - if h.SSHUser != "" { - host["ssh_user"] = h.SSHUser - } - if h.SSHPort != 0 { - host["ssh_port"] = h.SSHPort - } - hosts = append(hosts, host) - } - - output(map[string]any{ - "hosts": hosts, - "count": len(hosts), - }) - return nil - }, -} - -// --- Version Command --- - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print version information", - RunE: func(cmd *cobra.Command, args []string) error { - output(map[string]any{ - "version": tui.Version, - "name": "fluid", - }) - return nil - }, -} - -// --- Playbooks Command --- - -var playbooksCmd = &cobra.Command{ - Use: "playbooks", - Short: "List generated Ansible playbooks", - Long: `List all generated Ansible playbooks and provide links to open them.`, - RunE: func(cmd *cobra.Command, args []string) error { - ctx := context.Background() - playbooks, err := dataStore.ListPlaybooks(ctx, nil) - if err != nil { - return err - } - - if outputJSON { - type playbookOutput struct { - ID string `json:"id"` - Name string `json:"name"` - Path string `json:"path"` - CreatedAt string `json:"created_at"` - } - - results := make([]playbookOutput, 0, len(playbooks)) - for _, pb := range playbooks { - path := "" - if pb.FilePath != nil && *pb.FilePath != "" { - path = *pb.FilePath - } else { - path = filepath.Join(cfg.Ansible.PlaybooksDir, pb.Name+".yml") - } - results = append(results, playbookOutput{ - ID: pb.ID, - Name: pb.Name, - Path: path, - CreatedAt: pb.CreatedAt.Format(time.RFC3339), - }) - } - output(map[string]any{ - "playbooks": results, - "count": len(results), - }) - return nil - } - - if len(playbooks) == 0 { - fmt.Println("No playbooks found.") - return nil - } - - fmt.Printf("Found %d playbook(s):\n\n", len(playbooks)) - for _, pb := range playbooks { - path := "" - if pb.FilePath != nil && *pb.FilePath != "" { - path = *pb.FilePath - } else { - path = filepath.Join(cfg.Ansible.PlaybooksDir, pb.Name+".yml") - } - - absPath, _ := filepath.Abs(path) - // OSC 8 hyperlink - link := fmt.Sprintf("\033]8;;file://%s\033\\%s\033]8;;\033\\", absPath, path) - - fmt.Printf("- %s: %s\n", pb.Name, link) - } - return nil - }, -} - -// --- TUI Command --- - -var tuiCmd = &cobra.Command{ - Use: "tui", - Short: "Launch the interactive TUI", - Long: `Launch an interactive terminal UI for managing sandboxes`, - RunE: func(cmd *cobra.Command, args []string) error { - return runTUI() - }, -} - -// --- MCP Command --- - -var mcpCmd = &cobra.Command{ - Use: "mcp", - Short: "Start MCP server on stdio", - Long: `Start an MCP (Model Context Protocol) server that exposes fluid tools over stdio for use with Claude Code, Cursor, and other MCP clients.`, - RunE: func(cmd *cobra.Command, args []string) error { - return runMCP() - }, -} - -// runMCP launches the MCP server on stdio -func runMCP() error { - configPath := cfgFile - if configPath == "" { - home, _ := os.UserHomeDir() - configPath = filepath.Join(home, ".fluid", "config.yaml") - } - - var err error - cfg, err = tui.EnsureConfigExists(configPath) - if err != nil { - return fmt.Errorf("ensure config: %w", err) - } - - // Log to file - stdout is the MCP transport - logPath := filepath.Join(filepath.Dir(configPath), "fluid-mcp.log") - logFile, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644) - if err != nil { - logFile = nil - } - var logger *slog.Logger - if logFile != nil { - defer func() { _ = logFile.Close() }() - logger = slog.New(slog.NewTextHandler(logFile, &slog.HandlerOptions{Level: slog.LevelDebug})) - } else { - logger = slog.New(slog.NewTextHandler(io.Discard, nil)) - } - - if err := initServicesWithConfigAndLogger(cfg, logger); err != nil { - return fmt.Errorf("init services: %w", err) - } - - srv := fluidmcp.NewServer(cfg, dataStore, vmService, telemetryService, logger) - return srv.Serve() -} - -// runTUI launches the interactive TUI -func runTUI() error { - // Get config path - configPath := cfgFile - if configPath == "" { - home, _ := os.UserHomeDir() - configPath = filepath.Join(home, ".fluid", "config.yaml") - } - - // Load config directly here to ensure hosts are loaded - var err error - cfg, err = tui.EnsureConfigExists(configPath) - if err != nil { - return fmt.Errorf("ensure config: %w", err) - } - - // Check if onboarding is needed (first run) - if !cfg.OnboardingComplete { - // Run onboarding wizard - updatedCfg, err := tui.RunOnboarding(cfg, configPath) - if err != nil { - return fmt.Errorf("onboarding: %w", err) - } - cfg = updatedCfg - - // Mark onboarding as complete and save config - cfg.OnboardingComplete = true - if err := cfg.Save(configPath); err != nil { - // Non-fatal: continue even if we can't save the flag - fmt.Fprintf(os.Stderr, "Warning: could not save onboarding status: %v\n", err) - } - } - - // Log to ~/.fluid/fluid.log instead of stdout to avoid corrupting the TUI - logPath := filepath.Join(filepath.Dir(configPath), "fluid.log") - logFile, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644) - if err != nil { - fmt.Fprintf(os.Stderr, "Warning: could not open log file %s: %v\n", logPath, err) - logFile = nil - } - var fileLogger *slog.Logger - if logFile != nil { - defer func() { _ = logFile.Close() }() - fileLogger = slog.New(slog.NewTextHandler(logFile, &slog.HandlerOptions{Level: slog.LevelDebug})) - } else { - fileLogger = slog.New(slog.NewTextHandler(io.Discard, nil)) - } - - // Initialize services with the loaded config and file logger - if err := initServicesWithConfigAndLogger(cfg, fileLogger); err != nil { - return fmt.Errorf("init services: %w", err) - } - - agent := tui.NewFluidAgent(cfg, dataStore, vmService, providerMgr, telemetryService, fileLogger) - - // Cleanup now happens in the TUI cleanup page when user presses Ctrl+C twice - - model := tui.NewModel("fluid", "local", "vm-agent", agent, cfg, configPath) - return tui.Run(model) -} - -// initServicesWithConfigAndLogger initializes services with a pre-loaded config and custom logger -func initServicesWithConfigAndLogger(loadedCfg *config.Config, logger *slog.Logger) error { - var err error - - cfg = loadedCfg - - // Ensure SSH CA exists - generate if missing - // For TUI mode we don't log to stderr to avoid corrupting the display - _, err = sshca.EnsureSSHCA(cfg.SSH.CAKeyPath, cfg.SSH.CAPubPath, "fluid-ssh-ca") - if err != nil { - return fmt.Errorf("ensure SSH CA: %w", err) - } - - // Open SQLite store - ctx := context.Background() - dataStore, err = sqlite.New(ctx, store.Config{ - AutoMigrate: true, - }) - if err != nil { - return fmt.Errorf("open store: %w", err) - } - - // Create and initialize SSH CA for key management - ca, err := sshca.NewCA(sshca.Config{ - CAKeyPath: cfg.SSH.CAKeyPath, - CAPubKeyPath: cfg.SSH.CAPubPath, - WorkDir: cfg.SSH.WorkDir, - DefaultTTL: cfg.SSH.CertTTL, - MaxTTL: cfg.SSH.MaxTTL, - DefaultPrincipals: []string{cfg.SSH.DefaultUser}, - EnforceKeyPermissions: true, - }) - if err != nil { - return fmt.Errorf("create SSH CA: %w", err) - } - if err := ca.Initialize(ctx); err != nil { - return fmt.Errorf("initialize SSH CA: %w", err) - } - - // Create key manager for managed SSH credentials - keyMgr, err := sshkeys.NewKeyManager(ca, sshkeys.Config{ - KeyDir: cfg.SSH.KeyDir, - CertificateTTL: cfg.SSH.CertTTL, - DefaultUsername: cfg.SSH.DefaultUser, - }, logger) - if err != nil { - return fmt.Errorf("create key manager: %w", err) - } - - // Read SSH CA public key for injection into VMs via cloud-init - sshCAPubKey := "" - if pubKeyBytes, err := os.ReadFile(cfg.SSH.CAPubPath); err == nil { - sshCAPubKey = strings.TrimSpace(string(pubKeyBytes)) - } - - // Create provider manager based on configured provider - var remoteFactory vm.RemoteManagerFactory - switch cfg.Provider { - case "proxmox": - proxmoxCfg := proxmox.Config{ - Host: cfg.Proxmox.Host, - TokenID: cfg.Proxmox.TokenID, - Secret: cfg.Proxmox.Secret, - Node: cfg.Proxmox.Node, - VerifySSL: cfg.Proxmox.VerifySSL, - Storage: cfg.Proxmox.Storage, - Bridge: cfg.Proxmox.Bridge, - CloneMode: cfg.Proxmox.CloneMode, - VMIDStart: cfg.Proxmox.VMIDStart, - VMIDEnd: cfg.Proxmox.VMIDEnd, - } - mgr, mgrErr := proxmox.NewProxmoxManager(proxmoxCfg, logger) - if mgrErr != nil { - return fmt.Errorf("create proxmox manager: %w", mgrErr) - } - providerMgr = mgr - default: - virshCfg := libvirt.Config{ - LibvirtURI: cfg.Libvirt.URI, - BaseImageDir: cfg.Libvirt.BaseImageDir, - WorkDir: cfg.Libvirt.WorkDir, - SSHKeyInjectMethod: cfg.Libvirt.SSHKeyInjectMethod, - SocketVMNetWrapper: cfg.Libvirt.SocketVMNetWrapper, - DefaultNetwork: cfg.Libvirt.Network, - DefaultVCPUs: cfg.VM.DefaultVCPUs, - DefaultMemoryMB: cfg.VM.DefaultMemoryMB, - SSHCAPubKey: sshCAPubKey, - } - providerMgr = libvirt.NewVirshManager(virshCfg, logger) - remoteFactory = func(host config.HostConfig) provider.Manager { - return libvirt.NewRemoteVirshManager(host, virshCfg, logger) - } - } - - // Initialize telemetry - telemetryService, err = telemetry.NewService(cfg.Telemetry) - if err != nil { - // Fallback to no-op if telemetry fails - telemetryService = telemetry.NewNoopService() - } - - // Create VM service with remote factory for multi-host support - var serviceOpts []vm.Option - serviceOpts = append(serviceOpts, vm.WithLogger(logger), vm.WithKeyManager(keyMgr), vm.WithTelemetry(telemetryService)) - if remoteFactory != nil { - serviceOpts = append(serviceOpts, vm.WithRemoteManagerFactory(remoteFactory)) - } - vmService = vm.NewService(providerMgr, dataStore, vm.Config{ - Network: cfg.Libvirt.Network, - DefaultVCPUs: cfg.VM.DefaultVCPUs, - DefaultMemoryMB: cfg.VM.DefaultMemoryMB, - CommandTimeout: cfg.VM.CommandTimeout, - IPDiscoveryTimeout: cfg.VM.IPDiscoveryTimeout, - SSHProxyJump: cfg.SSH.ProxyJump, - }, serviceOpts...) - - return nil -} - -// --- Output Helpers --- - -func output(v any) { - if outputJSON { - enc := json.NewEncoder(os.Stdout) - enc.SetIndent("", " ") - _ = enc.Encode(v) - } else { - // Human-readable output - data, _ := yaml.Marshal(v) - fmt.Print(string(data)) - } -} - -func outputError(err error) { - v := map[string]any{ - "error": err.Error(), - } - if outputJSON { - enc := json.NewEncoder(os.Stderr) - _ = enc.Encode(v) - } else { - fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) - } -} - -// sourceCmd is the parent command for source VM operations. -var sourceCmd = &cobra.Command{ - Use: "source", - Short: "Manage source/golden VMs", - Long: "Commands for managing and preparing source/golden VMs for read-only access.", -} - -// sourcePrepareCmd prepares a golden VM for read-only SSH access. -var sourcePrepareCmd = &cobra.Command{ - Use: "prepare ", - Short: "Prepare a golden VM for read-only SSH access", - Long: "Sets up the fluid-readonly user, restricted shell, and SSH CA trust on a golden VM.", - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - vmName := args[0] - - if err := initServices(); err != nil { - return err - } - - sshUser, _ := cmd.Flags().GetString("user") - sshKey, _ := cmd.Flags().GetString("key") - - if sshUser == "" { - sshUser = "root" - } - - // Discover VM IP. - ctx := context.Background() - ip, _, err := providerMgr.GetIPAddress(ctx, vmName, 2*time.Minute) - if err != nil { - return fmt.Errorf("discover IP for VM %s: %w", vmName, err) - } - - // Read CA public key. - caPubPath := cfg.SSH.CAPubPath - if caPubPath == "" { - return fmt.Errorf("SSH CA public key path not configured (ssh.ca_pub_path)") - } - caPubKey, err := os.ReadFile(caPubPath) - if err != nil { - return fmt.Errorf("read CA public key %s: %w", caPubPath, err) - } - - // Create SSH run function. - sshRunFunc := func(ctx context.Context, command string) (string, string, int, error) { - sshArgs := []string{ - "-i", sshKey, - "-o", "StrictHostKeyChecking=no", - "-o", "UserKnownHostsFile=/dev/null", - "-o", "ConnectTimeout=15", - fmt.Sprintf("%s@%s", sshUser, ip), - "--", - command, - } - sshCmd := exec.CommandContext(ctx, "ssh", sshArgs...) - var stdout, stderr bytes.Buffer - sshCmd.Stdout = &stdout - sshCmd.Stderr = &stderr - if err := sshCmd.Run(); err != nil { - return stdout.String(), stderr.String(), 1, err - } - return stdout.String(), stderr.String(), 0, nil - } - - result, err := readonly.Prepare(ctx, sshRunFunc, string(caPubKey), nil, slog.Default()) - if err != nil { - return fmt.Errorf("prepare VM %s: %w", vmName, err) - } - - output(map[string]any{ - "vm": vmName, - "ip": ip, - "user_created": result.UserCreated, - "shell_installed": result.ShellInstalled, - "ca_key_installed": result.CAKeyInstalled, - "sshd_configured": result.SSHDConfigured, - "principals_created": result.PrincipalsCreated, - "sshd_restarted": result.SSHDRestarted, - }) - return nil - }, -} - -func init() { - sourcePrepareCmd.Flags().String("user", "root", "SSH user for connecting to the VM") - sourcePrepareCmd.Flags().String("key", "", "Path to SSH private key") - _ = sourcePrepareCmd.MarkFlagRequired("key") -} diff --git a/fluid/internal/extract/archive.go b/fluid/internal/extract/archive.go deleted file mode 100755 index 825e69ba..00000000 --- a/fluid/internal/extract/archive.go +++ /dev/null @@ -1,247 +0,0 @@ -package extract - -import ( - "bytes" - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - "time" - - "github.com/aspectrr/fluid.sh/fluid/internal/workflow" -) - -// Archiver handles creation of root filesystem archives. -type Archiver struct { - // tarPath is the path to the tar binary. - tarPath string -} - -// ArchiverConfig configures the archiver. -type ArchiverConfig struct { - // TarPath is the path to the tar binary. - // If empty, "tar" is looked up in PATH. - TarPath string -} - -// NewArchiver creates a new Archiver with the given configuration. -func NewArchiver(cfg ArchiverConfig) *Archiver { - tarPath := cfg.TarPath - if tarPath == "" { - tarPath = "tar" - } - return &Archiver{ - tarPath: tarPath, - } -} - -// ArchiveResult contains the result of creating an archive. -type ArchiveResult struct { - // ArchivePath is the path to the created tar archive. - ArchivePath string - - // Size is the size of the archive in bytes. - Size int64 - - // Cleanup is a function to remove the archive. - Cleanup workflow.CleanupFunc -} - -// CreateRootFSArchive creates a tar archive of the sanitized root filesystem. -// The archive preserves numeric ownership and extended attributes. -func (a *Archiver) CreateRootFSArchive(ctx context.Context, sourcePath string, workDir string) (*ArchiveResult, error) { - // Generate archive filename with timestamp - timestamp := time.Now().UTC().Format("20060102T150405Z") - archiveName := fmt.Sprintf("rootfs-%s.tar", timestamp) - archivePath := filepath.Join(workDir, archiveName) - - // Create the archive - if err := a.createTarArchive(ctx, sourcePath, archivePath); err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageCreateArchive, - workflow.ErrArchiveFailed, - fmt.Sprintf("failed to create archive: %v", err), - ) - } - - // Get archive size - info, err := os.Stat(archivePath) - if err != nil { - _ = os.Remove(archivePath) - return nil, workflow.NewWorkflowError( - workflow.StageCreateArchive, - workflow.ErrArchiveFailed, - fmt.Sprintf("failed to stat archive: %v", err), - ) - } - - return &ArchiveResult{ - ArchivePath: archivePath, - Size: info.Size(), - Cleanup: func() error { - return os.Remove(archivePath) - }, - }, nil -} - -// createTarArchive creates a tar archive of the source directory. -func (a *Archiver) createTarArchive(ctx context.Context, sourcePath, archivePath string) error { - // Build tar command with options: - // -c: create archive - // -f: output file - // --numeric-owner: preserve numeric UID/GID (important for container images) - // --xattrs: preserve extended attributes - // --xattrs-include=*: include all xattrs - // --acls: preserve ACLs (if supported) - // --selinux: preserve SELinux contexts (if applicable) - // -C: change to directory before archiving - // .: archive current directory contents - - args := []string{ - "-cf", archivePath, - "--numeric-owner", - } - - // Check if tar supports xattrs - if a.supportsXattrs(ctx) { - args = append(args, "--xattrs", "--xattrs-include=*") - } - - // Check if tar supports ACLs - if a.supportsACLs(ctx) { - args = append(args, "--acls") - } - - // Check if tar supports SELinux - if a.supportsSELinux(ctx) { - args = append(args, "--selinux") - } - - // Add source directory - args = append(args, "-C", sourcePath, ".") - - cmd := exec.CommandContext(ctx, a.tarPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("tar failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// supportsXattrs checks if tar supports --xattrs option. -func (a *Archiver) supportsXattrs(ctx context.Context) bool { - cmd := exec.CommandContext(ctx, a.tarPath, "--xattrs", "--help") - return cmd.Run() == nil -} - -// supportsACLs checks if tar supports --acls option. -func (a *Archiver) supportsACLs(ctx context.Context) bool { - cmd := exec.CommandContext(ctx, a.tarPath, "--acls", "--help") - return cmd.Run() == nil -} - -// supportsSELinux checks if tar supports --selinux option. -func (a *Archiver) supportsSELinux(ctx context.Context) bool { - cmd := exec.CommandContext(ctx, a.tarPath, "--selinux", "--help") - return cmd.Run() == nil -} - -// ExtractArchive extracts a tar archive to the specified destination. -// This is useful for testing or container image import operations. -func (a *Archiver) ExtractArchive(ctx context.Context, archivePath, destPath string) error { - // Ensure destination exists - if err := os.MkdirAll(destPath, 0o755); err != nil { - return fmt.Errorf("failed to create destination directory: %w", err) - } - - args := []string{ - "-xf", archivePath, - "--numeric-owner", - "-C", destPath, - } - - // Add xattrs support if available - if a.supportsXattrs(ctx) { - args = append(args[:2], append([]string{"--xattrs", "--xattrs-include=*"}, args[2:]...)...) - } - - cmd := exec.CommandContext(ctx, a.tarPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("tar extraction failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// GetArchiveSize returns the size of an archive file. -func (a *Archiver) GetArchiveSize(archivePath string) (int64, error) { - info, err := os.Stat(archivePath) - if err != nil { - return 0, err - } - return info.Size(), nil -} - -// ListArchiveContents lists the contents of an archive. -// This is useful for verification and debugging. -func (a *Archiver) ListArchiveContents(ctx context.Context, archivePath string) ([]string, error) { - args := []string{"-tf", archivePath} - - cmd := exec.CommandContext(ctx, a.tarPath, args...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("tar list failed: %w: %s", err, stderr.String()) - } - - // Parse output into list of files - output := stdout.String() - if output == "" { - return []string{}, nil - } - - lines := bytes.Split(stdout.Bytes(), []byte("\n")) - files := make([]string, 0, len(lines)) - for _, line := range lines { - if len(line) > 0 { - files = append(files, string(line)) - } - } - - return files, nil -} - -// VerifyArchive performs basic verification of an archive. -func (a *Archiver) VerifyArchive(ctx context.Context, archivePath string) error { - // Check file exists - info, err := os.Stat(archivePath) - if err != nil { - return fmt.Errorf("archive not found: %w", err) - } - - // Check file is not empty - if info.Size() == 0 { - return fmt.Errorf("archive is empty") - } - - // Try to list contents to verify integrity - args := []string{"-tf", archivePath} - cmd := exec.CommandContext(ctx, a.tarPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("archive verification failed: %w: %s", err, stderr.String()) - } - - return nil -} diff --git a/fluid/internal/extract/mount.go b/fluid/internal/extract/mount.go deleted file mode 100755 index 52f2238b..00000000 --- a/fluid/internal/extract/mount.go +++ /dev/null @@ -1,443 +0,0 @@ -package extract - -import ( - "bytes" - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/aspectrr/fluid.sh/fluid/internal/workflow" -) - -// MountManager handles qemu-nbd attachment and filesystem mounting. -type MountManager struct { - // qemuNbdPath is the path to the qemu-nbd binary. - qemuNbdPath string - - // nbdDeviceMu protects NBD device allocation. - nbdDeviceMu sync.Mutex - - // usedNbdDevices tracks which NBD devices are currently in use. - usedNbdDevices map[string]bool -} - -// MountConfig configures the mount manager. -type MountConfig struct { - // QemuNbdPath is the path to the qemu-nbd binary. - // If empty, "qemu-nbd" is looked up in PATH. - QemuNbdPath string -} - -// MountResult contains the result of mounting a disk image. -type MountResult struct { - // NBDDevice is the /dev/nbdX device the image is attached to. - NBDDevice string - - // Partition is the partition device (e.g., /dev/nbd0p1). - Partition string - - // MountPoint is the path where the filesystem is mounted. - MountPoint string - - // Cleanup is a function that unmounts and disconnects everything. - Cleanup workflow.CleanupFunc -} - -// NewMountManager creates a new MountManager with the given configuration. -func NewMountManager(cfg MountConfig) *MountManager { - qemuNbdPath := cfg.QemuNbdPath - if qemuNbdPath == "" { - qemuNbdPath = "qemu-nbd" - } - return &MountManager{ - qemuNbdPath: qemuNbdPath, - usedNbdDevices: make(map[string]bool), - } -} - -// MountDisk attaches a disk image via qemu-nbd and mounts the root filesystem. -// The returned MountResult contains a cleanup function that must be called -// to unmount and disconnect the NBD device. -func (m *MountManager) MountDisk(ctx context.Context, diskPath string, workDir string) (*MountResult, error) { - // Verify disk exists - if _, err := os.Stat(diskPath); err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrMountFailed, - fmt.Sprintf("disk image not found: %s", diskPath), - ) - } - - // Find an available NBD device - nbdDevice, err := m.findAvailableNBDDevice() - if err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrNBDAttachFailed, - fmt.Sprintf("no available NBD device: %v", err), - ) - } - - result := &MountResult{ - NBDDevice: nbdDevice, - } - - // Track cleanup steps for rollback - cleanups := workflow.NewCleanupStack() - - // Attach the disk to NBD device - if err := m.attachNBD(ctx, diskPath, nbdDevice); err != nil { - m.releaseNBDDevice(nbdDevice) - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrNBDAttachFailed, - fmt.Sprintf("failed to attach %s to %s: %v", diskPath, nbdDevice, err), - ) - } - cleanups.Push(func() error { - return m.detachNBD(context.Background(), nbdDevice) - }) - - // Run partprobe to detect partitions - if err := m.runPartprobe(ctx, nbdDevice); err != nil { - _ = cleanups.ExecuteAll() - m.releaseNBDDevice(nbdDevice) - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrMountFailed, - fmt.Sprintf("partprobe failed: %v", err), - ) - } - - // Find the root partition - partition, err := m.findRootPartition(ctx, nbdDevice) - if err != nil { - _ = cleanups.ExecuteAll() - m.releaseNBDDevice(nbdDevice) - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrMountFailed, - fmt.Sprintf("failed to find root partition: %v", err), - ) - } - result.Partition = partition - - // Create mount point - mountPoint := filepath.Join(workDir, "rootfs") - if err := os.MkdirAll(mountPoint, 0o755); err != nil { - _ = cleanups.ExecuteAll() - m.releaseNBDDevice(nbdDevice) - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrMountFailed, - fmt.Sprintf("failed to create mount point: %v", err), - ) - } - cleanups.Push(func() error { - return os.RemoveAll(mountPoint) - }) - - // Mount the partition read-only - if err := m.mountPartition(ctx, partition, mountPoint); err != nil { - _ = cleanups.ExecuteAll() - m.releaseNBDDevice(nbdDevice) - return nil, workflow.NewWorkflowError( - workflow.StageMountDisk, - workflow.ErrMountFailed, - fmt.Sprintf("failed to mount %s at %s: %v", partition, mountPoint, err), - ) - } - result.MountPoint = mountPoint - - // Build the final cleanup function that does everything in reverse order - result.Cleanup = func() error { - var errs []error - - // Unmount filesystem - if err := m.unmount(context.Background(), mountPoint); err != nil { - errs = append(errs, fmt.Errorf("unmount %s: %w", mountPoint, err)) - } - - // Remove mount point directory - if err := os.RemoveAll(mountPoint); err != nil { - errs = append(errs, fmt.Errorf("remove mount point: %w", err)) - } - - // Detach NBD device - if err := m.detachNBD(context.Background(), nbdDevice); err != nil { - errs = append(errs, fmt.Errorf("detach NBD: %w", err)) - } - - // Release the NBD device for reuse - m.releaseNBDDevice(nbdDevice) - - if len(errs) > 0 { - return fmt.Errorf("cleanup errors: %v", errs) - } - return nil - } - - return result, nil -} - -// findAvailableNBDDevice finds an available /dev/nbdX device. -func (m *MountManager) findAvailableNBDDevice() (string, error) { - m.nbdDeviceMu.Lock() - defer m.nbdDeviceMu.Unlock() - - // Check for nbd module - if _, err := os.Stat("/sys/module/nbd"); os.IsNotExist(err) { - return "", fmt.Errorf("nbd kernel module not loaded; run 'modprobe nbd max_part=16'") - } - - // Try to find an available NBD device (typically nbd0 through nbd15) - for i := 0; i < 16; i++ { - device := fmt.Sprintf("/dev/nbd%d", i) - - // Skip if we're already using it - if m.usedNbdDevices[device] { - continue - } - - // Check if device exists - if _, err := os.Stat(device); os.IsNotExist(err) { - continue - } - - // Check if device is in use by examining its size - sizePath := fmt.Sprintf("/sys/block/nbd%d/size", i) - data, err := os.ReadFile(sizePath) - if err != nil { - continue - } - - size, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) - if err != nil { - continue - } - - // Size of 0 means the device is not in use - if size == 0 { - m.usedNbdDevices[device] = true - return device, nil - } - } - - return "", fmt.Errorf("all NBD devices are in use") -} - -// releaseNBDDevice marks an NBD device as available for reuse. -func (m *MountManager) releaseNBDDevice(device string) { - m.nbdDeviceMu.Lock() - defer m.nbdDeviceMu.Unlock() - delete(m.usedNbdDevices, device) -} - -// attachNBD attaches a disk image to an NBD device using qemu-nbd. -func (m *MountManager) attachNBD(ctx context.Context, diskPath, nbdDevice string) error { - // Connect the image to the NBD device - // --read-only for safety, --connect to specify the device - args := []string{ - "--read-only", - "--connect", nbdDevice, - "--format", "qcow2", - diskPath, - } - - cmd := exec.CommandContext(ctx, m.qemuNbdPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("qemu-nbd failed: %w: %s", err, stderr.String()) - } - - // Wait a bit for the device to be ready - time.Sleep(500 * time.Millisecond) - - return nil -} - -// detachNBD disconnects an NBD device. -func (m *MountManager) detachNBD(ctx context.Context, nbdDevice string) error { - args := []string{"--disconnect", nbdDevice} - - cmd := exec.CommandContext(ctx, m.qemuNbdPath, args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("qemu-nbd disconnect failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// runPartprobe runs partprobe to detect partitions on the NBD device. -func (m *MountManager) runPartprobe(ctx context.Context, nbdDevice string) error { - cmd := exec.CommandContext(ctx, "partprobe", nbdDevice) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("partprobe failed: %w: %s", err, stderr.String()) - } - - // Wait for partition devices to appear - time.Sleep(500 * time.Millisecond) - - return nil -} - -// findRootPartition attempts to find the root partition on the NBD device. -// It looks for common partition layouts and returns the likely root partition. -func (m *MountManager) findRootPartition(ctx context.Context, nbdDevice string) (string, error) { - // Get the device name without /dev/ prefix - devName := filepath.Base(nbdDevice) - - // Check for partitions in /sys/block// - sysPath := fmt.Sprintf("/sys/block/%s", devName) - - entries, err := os.ReadDir(sysPath) - if err != nil { - return "", fmt.Errorf("failed to read %s: %w", sysPath, err) - } - - var partitions []string - for _, entry := range entries { - name := entry.Name() - // Partition entries start with the device name - if strings.HasPrefix(name, devName+"p") { - partitions = append(partitions, "/dev/"+name) - } - } - - if len(partitions) == 0 { - // No partitions found, might be a whole-disk filesystem - // Try to mount the device directly - return nbdDevice, nil - } - - // Sort partitions and try to find the root partition - // Typically: - // - p1 is often /boot or EFI on modern systems - // - p2 or p3 is often root - // We'll try to identify by checking for common root filesystem indicators - - for _, partition := range partitions { - // Use blkid to check filesystem type - cmd := exec.CommandContext(ctx, "blkid", "-o", "value", "-s", "TYPE", partition) - output, err := cmd.Output() - if err != nil { - continue - } - - fsType := strings.TrimSpace(string(output)) - // Look for ext4, xfs, btrfs which are common root filesystems - if fsType == "ext4" || fsType == "xfs" || fsType == "btrfs" || fsType == "ext3" { - // This is likely the root partition - // We could do more checks (mount and look for /etc, /bin, etc.) - // but for now we'll use the first Linux filesystem we find - // that isn't obviously a boot partition - if !m.isBootPartition(ctx, partition) { - return partition, nil - } - } - } - - // If we couldn't find a definitive root, try the largest partition - if len(partitions) > 0 { - largest := partitions[0] - var largestSize int64 - - for _, partition := range partitions { - partName := filepath.Base(partition) - sizePath := fmt.Sprintf("/sys/block/%s/%s/size", devName, partName) - data, err := os.ReadFile(sizePath) - if err != nil { - continue - } - size, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) - if err != nil { - continue - } - if size > largestSize { - largestSize = size - largest = partition - } - } - return largest, nil - } - - return "", fmt.Errorf("no suitable partition found") -} - -// isBootPartition checks if a partition appears to be a boot partition. -func (m *MountManager) isBootPartition(ctx context.Context, partition string) bool { - // Check partition label or flags - cmd := exec.CommandContext(ctx, "blkid", "-o", "value", "-s", "LABEL", partition) - output, err := cmd.Output() - if err == nil { - label := strings.ToLower(strings.TrimSpace(string(output))) - if strings.Contains(label, "boot") || strings.Contains(label, "efi") { - return true - } - } - - // Also check PARTLABEL for GPT partitions - cmd = exec.CommandContext(ctx, "blkid", "-o", "value", "-s", "PARTLABEL", partition) - output, err = cmd.Output() - if err == nil { - label := strings.ToLower(strings.TrimSpace(string(output))) - if strings.Contains(label, "boot") || strings.Contains(label, "efi") { - return true - } - } - - return false -} - -// mountPartition mounts a partition read-only at the specified mount point. -func (m *MountManager) mountPartition(ctx context.Context, partition, mountPoint string) error { - // Mount read-only with common options - args := []string{ - "-o", "ro,noatime,noexec", - partition, - mountPoint, - } - - cmd := exec.CommandContext(ctx, "mount", args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("mount failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// unmount unmounts a filesystem. -func (m *MountManager) unmount(ctx context.Context, mountPoint string) error { - // First try a regular unmount - cmd := exec.CommandContext(ctx, "umount", mountPoint) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - // If regular unmount fails, try lazy unmount - cmd = exec.CommandContext(ctx, "umount", "-l", mountPoint) - cmd.Stderr = &stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("unmount failed: %w: %s", err, stderr.String()) - } - } - - return nil -} diff --git a/fluid/internal/extract/sanitize.go b/fluid/internal/extract/sanitize.go deleted file mode 100755 index bb045167..00000000 --- a/fluid/internal/extract/sanitize.go +++ /dev/null @@ -1,438 +0,0 @@ -package extract - -import ( - "bytes" - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/aspectrr/fluid.sh/fluid/internal/workflow" -) - -// Sanitizer handles filesystem sanitization for container usage. -type Sanitizer struct { - // verbose enables detailed logging of sanitization steps. - verbose bool -} - -// SanitizerConfig configures the sanitizer. -type SanitizerConfig struct { - // Verbose enables detailed logging. - Verbose bool -} - -// NewSanitizer creates a new Sanitizer with the given configuration. -func NewSanitizer(cfg SanitizerConfig) *Sanitizer { - return &Sanitizer{ - verbose: cfg.Verbose, - } -} - -// SanitizeResult contains the result of filesystem sanitization. -type SanitizeResult struct { - // SanitizedPath is the path to the sanitized filesystem copy. - SanitizedPath string - - // RemovedPaths lists paths that were removed or neutralized. - RemovedPaths []string - - // ModifiedPaths lists paths that were modified. - ModifiedPaths []string - - // Cleanup is a function to remove the sanitized copy. - Cleanup workflow.CleanupFunc -} - -// SanitizeFilesystem creates a sanitized copy of the mounted filesystem -// suitable for container usage. It removes or neutralizes: -// - /boot directory -// - kernel modules (/lib/modules) -// - device nodes under /dev -// - fstab contents -// - swap references -// - systemd services that block container execution -func (s *Sanitizer) SanitizeFilesystem(ctx context.Context, sourcePath string, workDir string) (*SanitizeResult, error) { - // Create a working directory for the sanitized copy - sanitizedPath := filepath.Join(workDir, "sanitized") - if err := os.MkdirAll(sanitizedPath, 0o755); err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageSanitizeFS, - workflow.ErrSanitizeFailed, - fmt.Sprintf("failed to create sanitized directory: %v", err), - ) - } - - result := &SanitizeResult{ - SanitizedPath: sanitizedPath, - RemovedPaths: make([]string, 0), - ModifiedPaths: make([]string, 0), - } - - // Copy the filesystem using rsync for efficiency - // We exclude certain paths during copy rather than copying then deleting - if err := s.copyFilesystem(ctx, sourcePath, sanitizedPath); err != nil { - _ = os.RemoveAll(sanitizedPath) - return nil, workflow.NewWorkflowError( - workflow.StageSanitizeFS, - workflow.ErrSanitizeFailed, - fmt.Sprintf("failed to copy filesystem: %v", err), - ) - } - - // Apply sanitization steps - sanitizers := []struct { - name string - fn func(ctx context.Context, rootPath string, result *SanitizeResult) error - }{ - {"remove boot directory", s.removeBoot}, - {"remove kernel modules", s.removeKernelModules}, - {"clear device nodes", s.clearDeviceNodes}, - {"sanitize fstab", s.sanitizeFstab}, - {"remove swap references", s.removeSwapReferences}, - {"disable blocking systemd services", s.disableBlockingServices}, - {"set container environment marker", s.setContainerMarker}, - } - - for _, sanitizer := range sanitizers { - if err := sanitizer.fn(ctx, sanitizedPath, result); err != nil { - _ = os.RemoveAll(sanitizedPath) - return nil, workflow.NewWorkflowError( - workflow.StageSanitizeFS, - workflow.ErrSanitizeFailed, - fmt.Sprintf("%s failed: %v", sanitizer.name, err), - ) - } - } - - result.Cleanup = func() error { - return os.RemoveAll(sanitizedPath) - } - - return result, nil -} - -// copyFilesystem copies the source filesystem to the destination, -// excluding paths that will be removed anyway. -func (s *Sanitizer) copyFilesystem(ctx context.Context, src, dst string) error { - // Use rsync for efficient copying with exclusions - // Exclude paths we're going to remove anyway to save time and space - excludes := []string{ - "--exclude=/boot/*", - "--exclude=/lib/modules/*", - "--exclude=/dev/*", - "--exclude=/proc/*", - "--exclude=/sys/*", - "--exclude=/run/*", - "--exclude=/tmp/*", - "--exclude=/var/tmp/*", - "--exclude=/var/cache/*", - "--exclude=/var/log/*", - "--exclude=*.swap", - "--exclude=/swapfile", - } - - args := []string{ - "-a", // archive mode (preserves permissions, ownership, etc.) - "--hard-links", // preserve hard links - "--acls", // preserve ACLs - "--xattrs", // preserve extended attributes - "--sparse", // handle sparse files efficiently - } - args = append(args, excludes...) - args = append(args, src+"/", dst+"/") - - cmd := exec.CommandContext(ctx, "rsync", args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - // Try with cp if rsync is not available - return s.copyFilesystemFallback(ctx, src, dst) - } - - return nil -} - -// copyFilesystemFallback uses cp when rsync is not available. -func (s *Sanitizer) copyFilesystemFallback(ctx context.Context, src, dst string) error { - args := []string{ - "-a", // archive mode - "--reflink=auto", // use copy-on-write if available - src + "/.", - dst + "/", - } - - cmd := exec.CommandContext(ctx, "cp", args...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("cp failed: %w: %s", err, stderr.String()) - } - - return nil -} - -// removeBoot removes the /boot directory contents. -func (s *Sanitizer) removeBoot(ctx context.Context, rootPath string, result *SanitizeResult) error { - bootPath := filepath.Join(rootPath, "boot") - - // Remove contents but keep the directory - if err := s.clearDirectory(bootPath); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - result.RemovedPaths = append(result.RemovedPaths, "/boot/*") - return nil -} - -// removeKernelModules removes kernel modules from /lib/modules. -func (s *Sanitizer) removeKernelModules(ctx context.Context, rootPath string, result *SanitizeResult) error { - modulesPath := filepath.Join(rootPath, "lib", "modules") - - if err := s.clearDirectory(modulesPath); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - result.RemovedPaths = append(result.RemovedPaths, "/lib/modules/*") - return nil -} - -// clearDeviceNodes removes all device nodes under /dev. -func (s *Sanitizer) clearDeviceNodes(ctx context.Context, rootPath string, result *SanitizeResult) error { - devPath := filepath.Join(rootPath, "dev") - - // Remove contents but keep the directory - if err := s.clearDirectory(devPath); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - // Create minimal /dev entries that containers expect - // The container runtime will populate /dev properly - if err := os.MkdirAll(devPath, 0o755); err != nil { - return err - } - - // Create /dev/null, /dev/zero, /dev/random placeholders - // These are symlinks that the container runtime will handle - devEntries := []string{"null", "zero", "random", "urandom", "tty", "console"} - for _, entry := range devEntries { - placeholder := filepath.Join(devPath, entry) - // Create empty placeholder files - f, err := os.Create(placeholder) - if err != nil { - continue // Non-fatal, container runtime will create these - } - _ = f.Close() - } - - result.RemovedPaths = append(result.RemovedPaths, "/dev/*") - return nil -} - -// sanitizeFstab clears or comments out /etc/fstab entries. -func (s *Sanitizer) sanitizeFstab(ctx context.Context, rootPath string, result *SanitizeResult) error { - fstabPath := filepath.Join(rootPath, "etc", "fstab") - - content, err := os.ReadFile(fstabPath) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - // Comment out all mount entries, keeping only comments - lines := strings.Split(string(content), "\n") - var newLines []string - newLines = append(newLines, "# fstab sanitized for container usage") - newLines = append(newLines, "# Original entries commented out:") - newLines = append(newLines, "") - - for _, line := range lines { - trimmed := strings.TrimSpace(line) - if trimmed == "" || strings.HasPrefix(trimmed, "#") { - newLines = append(newLines, line) - } else { - newLines = append(newLines, "# "+line) - } - } - - if err := os.WriteFile(fstabPath, []byte(strings.Join(newLines, "\n")), 0o644); err != nil { - return err - } - - result.ModifiedPaths = append(result.ModifiedPaths, "/etc/fstab") - return nil -} - -// removeSwapReferences removes or disables swap configuration. -func (s *Sanitizer) removeSwapReferences(ctx context.Context, rootPath string, result *SanitizeResult) error { - // Remove swapfile if it exists - swapfile := filepath.Join(rootPath, "swapfile") - if _, err := os.Stat(swapfile); err == nil { - if err := os.Remove(swapfile); err != nil { - return err - } - result.RemovedPaths = append(result.RemovedPaths, "/swapfile") - } - - // Remove any .swap files in root - entries, err := os.ReadDir(rootPath) - if err != nil { - return err - } - for _, entry := range entries { - if strings.HasSuffix(entry.Name(), ".swap") { - swapPath := filepath.Join(rootPath, entry.Name()) - if err := os.Remove(swapPath); err != nil { - continue // Non-fatal - } - result.RemovedPaths = append(result.RemovedPaths, "/"+entry.Name()) - } - } - - // Disable swap-related systemd units - swapUnits := []string{ - "swap.target", - "dev-*.swap", - } - - // systemdPath := filepath.Join(rootPath, "etc", "systemd", "system") - for _, unit := range swapUnits { - // unitPath := filepath.Join(systemdPath, unit) - // Create a masked symlink to /dev/null - if err := s.maskSystemdUnit(rootPath, unit); err != nil { - continue // Non-fatal - } - } - - return nil -} - -// disableBlockingServices disables systemd services that block container execution. -func (s *Sanitizer) disableBlockingServices(ctx context.Context, rootPath string, result *SanitizeResult) error { - // Services that commonly block container startup or are inappropriate - blockingServices := []string{ - // Hardware/kernel related - "systemd-modules-load.service", - "systemd-sysctl.service", - "systemd-udevd.service", - "systemd-udev-trigger.service", - "systemd-udev-settle.service", - "kmod-static-nodes.service", - "systemd-tmpfiles-setup-dev.service", - - // Filesystem related - "systemd-remount-fs.service", - "systemd-fsck@.service", - "systemd-fsck-root.service", - "local-fs.target", - "local-fs-pre.target", - - // Network hardware related - "NetworkManager-wait-online.service", - "systemd-networkd-wait-online.service", - - // Other blocking services - "plymouth-start.service", - "plymouth-quit.service", - "plymouth-quit-wait.service", - "systemd-machine-id-commit.service", - "systemd-firstboot.service", - "systemd-random-seed.service", - - // Console/TTY related - "getty@.service", - "serial-getty@.service", - "console-getty.service", - "container-getty@.service", - "systemd-ask-password-wall.service", - "systemd-ask-password-console.service", - } - - for _, service := range blockingServices { - if err := s.maskSystemdUnit(rootPath, service); err != nil { - continue // Non-fatal, service may not exist - } - result.ModifiedPaths = append(result.ModifiedPaths, "/etc/systemd/system/"+service) - } - - return nil -} - -// maskSystemdUnit masks a systemd unit by creating a symlink to /dev/null. -func (s *Sanitizer) maskSystemdUnit(rootPath, unitName string) error { - systemdPath := filepath.Join(rootPath, "etc", "systemd", "system") - if err := os.MkdirAll(systemdPath, 0o755); err != nil { - return err - } - - unitPath := filepath.Join(systemdPath, unitName) - - // Remove existing unit/symlink if present - _ = os.Remove(unitPath) - - // Create symlink to /dev/null to mask the unit - return os.Symlink("/dev/null", unitPath) -} - -// setContainerMarker creates markers indicating container environment. -func (s *Sanitizer) setContainerMarker(ctx context.Context, rootPath string, result *SanitizeResult) error { - // Create /run/container marker directory - runPath := filepath.Join(rootPath, "run") - if err := os.MkdirAll(runPath, 0o755); err != nil { - return err - } - - // Create /.dockerenv equivalent for container detection - dockerenvPath := filepath.Join(rootPath, ".dockerenv") - f, err := os.Create(dockerenvPath) - if err != nil { - return err - } - _ = f.Close() - result.ModifiedPaths = append(result.ModifiedPaths, "/.dockerenv") - - // Create /run/.containerenv for Podman detection - containerenvPath := filepath.Join(runPath, ".containerenv") - containerenvContent := `engine="podman" -name="vmclone" -` - if err := os.WriteFile(containerenvPath, []byte(containerenvContent), 0o644); err != nil { - return err - } - result.ModifiedPaths = append(result.ModifiedPaths, "/run/.containerenv") - - return nil -} - -// clearDirectory removes all contents of a directory but keeps the directory itself. -func (s *Sanitizer) clearDirectory(dirPath string) error { - entries, err := os.ReadDir(dirPath) - if err != nil { - return err - } - - for _, entry := range entries { - entryPath := filepath.Join(dirPath, entry.Name()) - if err := os.RemoveAll(entryPath); err != nil { - // Try to continue with other entries - continue - } - } - - return nil -} diff --git a/fluid/internal/extract/snapshot.go b/fluid/internal/extract/snapshot.go deleted file mode 100755 index 32402357..00000000 --- a/fluid/internal/extract/snapshot.go +++ /dev/null @@ -1,129 +0,0 @@ -package extract - -import ( - "context" - "fmt" - "time" - - "github.com/aspectrr/fluid.sh/fluid/internal/libvirt" - "github.com/aspectrr/fluid.sh/fluid/internal/model" - "github.com/aspectrr/fluid.sh/fluid/internal/workflow" -) - -// SnapshotManager handles snapshot creation and extraction mode detection. -type SnapshotManager struct { - domainMgr *libvirt.DomainManager -} - -// NewSnapshotManager creates a new SnapshotManager. -func NewSnapshotManager(domainMgr *libvirt.DomainManager) *SnapshotManager { - return &SnapshotManager{ - domainMgr: domainMgr, - } -} - -// ExtractionPlan describes how to extract a VM's filesystem. -type ExtractionPlan struct { - // VMName is the name of the source VM. - VMName string - - // Mode is the extraction mode: "snapshot" for running VMs, "offline" for stopped VMs. - Mode string - - // DiskPath is the path to the disk image to extract from. - // For snapshot mode, this is the backing file of the snapshot. - // For offline mode, this is the VM's primary disk. - DiskPath string - - // SnapshotName is the name of the created snapshot (empty for offline mode). - SnapshotName string - - // Cleanup is a function to call to clean up the snapshot (nil for offline mode). - Cleanup workflow.CleanupFunc -} - -// DetermineExtractionMode determines whether to use snapshot or offline mode -// based on the VM's current state. -func (m *SnapshotManager) DetermineExtractionMode(ctx context.Context, vmName string) (string, error) { - state, err := m.domainMgr.GetDomainState(ctx, vmName) - if err != nil { - return "", fmt.Errorf("failed to get domain state: %w", err) - } - - if state.IsRunning() { - return model.ModeSnapshot, nil - } - return model.ModeOffline, nil -} - -// PrepareExtraction prepares the extraction plan for a VM. -// For running VMs, it creates a disk-only snapshot. -// For stopped VMs, it returns the disk path directly. -func (m *SnapshotManager) PrepareExtraction(ctx context.Context, vmName string) (*ExtractionPlan, error) { - // Get domain info - domainInfo, err := m.domainMgr.LookupDomain(ctx, vmName) - if err != nil { - return nil, fmt.Errorf("failed to lookup domain: %w", err) - } - - // Determine extraction mode - mode, err := m.DetermineExtractionMode(ctx, vmName) - if err != nil { - return nil, err - } - - plan := &ExtractionPlan{ - VMName: vmName, - Mode: mode, - } - - if mode == model.ModeOffline { - // For offline mode, use the disk directly - plan.DiskPath = domainInfo.DiskPath - return plan, nil - } - - // For snapshot mode, create a disk-only snapshot - snapshotName := generateSnapshotName(vmName) - - snapshotInfo, err := m.domainMgr.CreateDiskOnlySnapshot(ctx, vmName, snapshotName) - if err != nil { - return nil, workflow.NewWorkflowError( - workflow.StageCreateSnapshot, - workflow.ErrSnapshotFailed, - fmt.Sprintf("unable to create disk-only snapshot: %v", err), - ) - } - - plan.SnapshotName = snapshotName - plan.DiskPath = snapshotInfo.BackingFile - - // Create cleanup function that commits the snapshot back - plan.Cleanup = func() error { - commitCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - return m.domainMgr.BlockCommit(commitCtx, vmName, "vda", 5*time.Minute) - } - - return plan, nil -} - -// generateSnapshotName generates a unique snapshot name based on VM name and timestamp. -func generateSnapshotName(vmName string) string { - return fmt.Sprintf("clone-%s-%d", vmName, time.Now().UnixNano()) -} - -// CleanupSnapshot removes a snapshot created during extraction. -// This is typically called on successful completion to clean up resources. -func (m *SnapshotManager) CleanupSnapshot(ctx context.Context, vmName string, plan *ExtractionPlan) error { - if plan == nil || plan.Mode == model.ModeOffline { - // Nothing to clean up for offline mode - return nil - } - - if plan.Cleanup != nil { - return plan.Cleanup() - } - - return nil -} diff --git a/fluid/internal/libvirt/domain-stub.go b/fluid/internal/libvirt/domain-stub.go deleted file mode 100755 index 47f3fe0f..00000000 --- a/fluid/internal/libvirt/domain-stub.go +++ /dev/null @@ -1,126 +0,0 @@ -//go:build !libvirt - -package libvirt - -import ( - "context" - "errors" - "time" -) - -// Sentinel errors for domain operations. -var ( - ErrDomainNotFound = errors.New("domain not found") - ErrDomainTransient = errors.New("transient domains are not supported") - ErrDomainUnsupported = errors.New("domain configuration not supported") -) - -// DomainManager provides libvirt domain operations using libvirt-go bindings. -// This is a stub implementation that returns errors when libvirt is not available. -type DomainManager struct { - uri string -} - -// DomainInfo contains information about a libvirt domain. -type DomainInfo struct { - Name string - UUID string - State DomainState - Persistent bool - DiskPath string -} - -// DomainState represents the state of a domain. -type DomainState int - -const ( - DomainStateUnknown DomainState = iota - DomainStateRunning - DomainStatePaused - DomainStateShutdown - DomainStateStopped - DomainStateCrashed - DomainStateSuspended -) - -// String returns a human-readable domain state. -func (s DomainState) String() string { - switch s { - case DomainStateRunning: - return "running" - case DomainStatePaused: - return "paused" - case DomainStateShutdown: - return "shutdown" - case DomainStateStopped: - return "stopped" - case DomainStateCrashed: - return "crashed" - case DomainStateSuspended: - return "suspended" - default: - return "unknown" - } -} - -// IsRunning returns true if the domain is in a running state. -func (s DomainState) IsRunning() bool { - return s == DomainStateRunning || s == DomainStatePaused -} - -// SnapshotInfo contains information about a created snapshot. -type SnapshotInfo struct { - Name string - BackingFile string -} - -// NewDomainManager creates a new DomainManager with the given libvirt URI. -// Note: This stub implementation will return errors for all operations. -func NewDomainManager(uri string) *DomainManager { - if uri == "" { - uri = "qemu:///system" - } - return &DomainManager{ - uri: uri, - } -} - -// Connect is a stub that returns an error when libvirt is not available. -func (m *DomainManager) Connect() error { - return ErrLibvirtNotAvailable -} - -// Close is a stub that does nothing when libvirt is not available. -func (m *DomainManager) Close() error { - return nil -} - -// LookupDomain is a stub that returns an error when libvirt is not available. -func (m *DomainManager) LookupDomain(ctx context.Context, name string) (*DomainInfo, error) { - return nil, ErrLibvirtNotAvailable -} - -// GetDomainState is a stub that returns an error when libvirt is not available. -func (m *DomainManager) GetDomainState(ctx context.Context, name string) (DomainState, error) { - return DomainStateUnknown, ErrLibvirtNotAvailable -} - -// CreateDiskOnlySnapshot is a stub that returns an error when libvirt is not available. -func (m *DomainManager) CreateDiskOnlySnapshot(ctx context.Context, domainName, snapshotName string) (*SnapshotInfo, error) { - return nil, ErrLibvirtNotAvailable -} - -// BlockCommit is a stub that returns an error when libvirt is not available. -func (m *DomainManager) BlockCommit(ctx context.Context, domainName, diskTarget string, timeout time.Duration) error { - return ErrLibvirtNotAvailable -} - -// ListDomains is a stub that returns an error when libvirt is not available. -func (m *DomainManager) ListDomains(ctx context.Context) ([]*DomainInfo, error) { - return nil, ErrLibvirtNotAvailable -} - -// GetDiskPath is a stub that returns an error when libvirt is not available. -func (m *DomainManager) GetDiskPath(ctx context.Context, domainName string) (string, error) { - return "", ErrLibvirtNotAvailable -} diff --git a/fluid/internal/libvirt/domain.go b/fluid/internal/libvirt/domain.go deleted file mode 100755 index 3a53c147..00000000 --- a/fluid/internal/libvirt/domain.go +++ /dev/null @@ -1,498 +0,0 @@ -//go:build libvirt - -package libvirt - -import ( - "context" - "encoding/xml" - "errors" - "fmt" - "sync" - "time" - - libvirtgo "libvirt.org/go/libvirt" -) - -// DomainManager provides libvirt domain operations using libvirt-go bindings. -type DomainManager struct { - uri string - conn *libvirtgo.Connect - mu sync.Mutex -} - -// DomainInfo contains information about a libvirt domain. -type DomainInfo struct { - Name string - UUID string - State DomainState - Persistent bool - DiskPath string -} - -// DomainState represents the state of a domain. -type DomainState int - -const ( - DomainStateUnknown DomainState = iota - DomainStateRunning - DomainStatePaused - DomainStateShutdown - DomainStateStopped - DomainStateCrashed - DomainStateSuspended -) - -// String returns a human-readable domain state. -func (s DomainState) String() string { - switch s { - case DomainStateRunning: - return "running" - case DomainStatePaused: - return "paused" - case DomainStateShutdown: - return "shutdown" - case DomainStateStopped: - return "stopped" - case DomainStateCrashed: - return "crashed" - case DomainStateSuspended: - return "suspended" - default: - return "unknown" - } -} - -// IsRunning returns true if the domain is in a running state. -func (s DomainState) IsRunning() bool { - return s == DomainStateRunning || s == DomainStatePaused -} - -// SnapshotInfo contains information about a created snapshot. -type SnapshotInfo struct { - Name string - BackingFile string -} - -// Domain XML structures for parsing disk information. -type domainXML struct { - XMLName xml.Name `xml:"domain"` - Name string `xml:"name"` - UUID string `xml:"uuid"` - Devices domainDevices `xml:"devices"` -} - -type domainDevices struct { - Disks []domainDisk `xml:"disk"` -} - -type domainDisk struct { - Type string `xml:"type,attr"` - Device string `xml:"device,attr"` - Driver domainDiskDriver `xml:"driver"` - Source domainDiskSource `xml:"source"` - Target domainDiskTarget `xml:"target"` -} - -type domainDiskDriver struct { - Name string `xml:"name,attr"` - Type string `xml:"type,attr"` -} - -type domainDiskSource struct { - File string `xml:"file,attr"` - Dev string `xml:"dev,attr"` -} - -type domainDiskTarget struct { - Dev string `xml:"dev,attr"` - Bus string `xml:"bus,attr"` -} - -// NewDomainManager creates a new DomainManager with the given libvirt URI. -func NewDomainManager(uri string) *DomainManager { - if uri == "" { - uri = "qemu:///system" - } - return &DomainManager{ - uri: uri, - } -} - -// Connect establishes a connection to libvirt. -func (m *DomainManager) Connect() error { - m.mu.Lock() - defer m.mu.Unlock() - - if m.conn != nil { - // Check if connection is still alive - if alive, _ := m.conn.IsAlive(); alive { - return nil - } - // Connection dead, close and reconnect - _, _ = m.conn.Close() - m.conn = nil - } - - conn, err := libvirtgo.NewConnect(m.uri) - if err != nil { - return fmt.Errorf("failed to connect to libvirt at %s: %w", m.uri, err) - } - m.conn = conn - return nil -} - -// Close closes the libvirt connection. -func (m *DomainManager) Close() error { - m.mu.Lock() - defer m.mu.Unlock() - - if m.conn != nil { - _, err := m.conn.Close() - m.conn = nil - return err - } - return nil -} - -// ensureConnected ensures we have an active connection. -func (m *DomainManager) ensureConnected() error { - return m.Connect() -} - -// LookupDomain looks up a domain by name and returns its information. -// Returns an error if the domain is transient or not found. -func (m *DomainManager) LookupDomain(ctx context.Context, name string) (*DomainInfo, error) { - if err := m.ensureConnected(); err != nil { - return nil, err - } - - m.mu.Lock() - dom, err := m.conn.LookupDomainByName(name) - m.mu.Unlock() - - if err != nil { - var libvirtErr libvirtgo.Error - if errors.As(err, &libvirtErr) { - if libvirtErr.Code == libvirtgo.ERR_NO_DOMAIN { - return nil, fmt.Errorf("domain %q not found: %w", name, ErrDomainNotFound) - } - } - return nil, fmt.Errorf("failed to lookup domain %q: %w", name, err) - } - defer func() { _ = dom.Free() }() - - // Check if domain is persistent (not transient) - persistent, err := dom.IsPersistent() - if err != nil { - return nil, fmt.Errorf("failed to check if domain is persistent: %w", err) - } - if !persistent { - return nil, fmt.Errorf("domain %q is transient: %w", name, ErrDomainTransient) - } - - // Get domain UUID - uuid, err := dom.GetUUIDString() - if err != nil { - return nil, fmt.Errorf("failed to get domain UUID: %w", err) - } - - // Get domain state - state, _, err := dom.GetState() - if err != nil { - return nil, fmt.Errorf("failed to get domain state: %w", err) - } - - // Get domain XML to extract disk path - xmlDesc, err := dom.GetXMLDesc(0) - if err != nil { - return nil, fmt.Errorf("failed to get domain XML: %w", err) - } - - diskPath, err := extractDiskPath(xmlDesc) - if err != nil { - return nil, fmt.Errorf("failed to extract disk path: %w", err) - } - - return &DomainInfo{ - Name: name, - UUID: uuid, - State: mapLibvirtState(state), - Persistent: persistent, - DiskPath: diskPath, - }, nil -} - -// GetDomainState returns the current state of a domain. -func (m *DomainManager) GetDomainState(ctx context.Context, name string) (DomainState, error) { - if err := m.ensureConnected(); err != nil { - return DomainStateUnknown, err - } - - m.mu.Lock() - dom, err := m.conn.LookupDomainByName(name) - m.mu.Unlock() - - if err != nil { - return DomainStateUnknown, fmt.Errorf("failed to lookup domain %q: %w", name, err) - } - defer func() { _ = dom.Free() }() - - state, _, err := dom.GetState() - if err != nil { - return DomainStateUnknown, fmt.Errorf("failed to get domain state: %w", err) - } - - return mapLibvirtState(state), nil -} - -// CreateDiskOnlySnapshot creates an external, disk-only snapshot without metadata. -// This is safe for running VMs and does not pause or stop the VM. -// Returns the snapshot info including the path to the new overlay file. -func (m *DomainManager) CreateDiskOnlySnapshot(ctx context.Context, domainName, snapshotName string) (*SnapshotInfo, error) { - if err := m.ensureConnected(); err != nil { - return nil, err - } - - m.mu.Lock() - dom, err := m.conn.LookupDomainByName(domainName) - m.mu.Unlock() - - if err != nil { - return nil, fmt.Errorf("failed to lookup domain %q: %w", domainName, err) - } - defer func() { _ = dom.Free() }() - - // Get current disk path from domain XML - xmlDesc, err := dom.GetXMLDesc(0) - if err != nil { - return nil, fmt.Errorf("failed to get domain XML: %w", err) - } - - currentDiskPath, err := extractDiskPath(xmlDesc) - if err != nil { - return nil, fmt.Errorf("failed to extract current disk path: %w", err) - } - - // The backing file for the container will be the current disk - // After snapshot, libvirt creates a new overlay and the current disk becomes backing - backingFile := currentDiskPath - - // Build snapshot XML for disk-only, external snapshot - // The snapshot file path will be auto-generated by libvirt - snapshotXML := fmt.Sprintf(` - - %s - Disk-only snapshot for container cloning - - - -`, snapshotName) - - // Create the snapshot with flags: - // - DISK_ONLY: Only snapshot the disk, not memory - // - ATOMIC: All-or-nothing operation - // - NO_METADATA: Don't store snapshot metadata in libvirt - flags := libvirtgo.DomainSnapshotCreateFlags(libvirtgo.DOMAIN_SNAPSHOT_CREATE_DISK_ONLY) | - libvirtgo.DomainSnapshotCreateFlags(libvirtgo.DOMAIN_SNAPSHOT_CREATE_ATOMIC) | - libvirtgo.DomainSnapshotCreateFlags(libvirtgo.DOMAIN_SNAPSHOT_CREATE_NO_METADATA) - - m.mu.Lock() - _, err = dom.CreateSnapshotXML(snapshotXML, flags) - m.mu.Unlock() - - if err != nil { - return nil, fmt.Errorf("failed to create disk-only snapshot: %w", err) - } - - return &SnapshotInfo{ - Name: snapshotName, - BackingFile: backingFile, - }, nil -} - -// BlockCommit merges a snapshot overlay back into its backing file and removes the overlay. -// This is used for cleanup after cloning or on rollback. -func (m *DomainManager) BlockCommit(ctx context.Context, domainName, diskTarget string, timeout time.Duration) error { - if err := m.ensureConnected(); err != nil { - return err - } - - m.mu.Lock() - dom, err := m.conn.LookupDomainByName(domainName) - m.mu.Unlock() - - if err != nil { - return fmt.Errorf("failed to lookup domain %q: %w", domainName, err) - } - defer func() { _ = dom.Free() }() - - // Start block commit - merge active layer into backing file - flags := libvirtgo.DomainBlockCommitFlags(libvirtgo.DOMAIN_BLOCK_COMMIT_ACTIVE) | libvirtgo.DomainBlockCommitFlags(libvirtgo.DOMAIN_BLOCK_COMMIT_DELETE) - - m.mu.Lock() - err = dom.BlockCommit(diskTarget, "", "", 0, flags) - m.mu.Unlock() - - if err != nil { - return fmt.Errorf("failed to start block commit: %w", err) - } - - // Wait for block commit to complete - deadline := time.Now().Add(timeout) - for time.Now().Before(deadline) { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - m.mu.Lock() - info, err := dom.GetBlockJobInfo(diskTarget, 0) - m.mu.Unlock() - - if err != nil { - // Job may have completed - break - } - - if info.Type == 0 { - // No job running, commit completed - break - } - - time.Sleep(500 * time.Millisecond) - } - - // Pivot to the base image if needed - m.mu.Lock() - _ = dom.BlockJobAbort(diskTarget, libvirtgo.DomainBlockJobAbortFlags(libvirtgo.DOMAIN_BLOCK_JOB_ABORT_PIVOT)) - m.mu.Unlock() - - return nil -} - -// ListDomains returns information about all domains (VMs) in libvirt. -// It lists both running and stopped persistent domains. -func (m *DomainManager) ListDomains(ctx context.Context) ([]*DomainInfo, error) { - if err := m.ensureConnected(); err != nil { - return nil, err - } - - m.mu.Lock() - // List all persistent domains (both active and inactive) - domains, err := m.conn.ListAllDomains(libvirtgo.ConnectListAllDomainsFlags(libvirtgo.CONNECT_LIST_DOMAINS_PERSISTENT)) - m.mu.Unlock() - - if err != nil { - return nil, fmt.Errorf("failed to list domains: %w", err) - } - - var result []*DomainInfo - for _, dom := range domains { - name, err := dom.GetName() - if err != nil { - _ = dom.Free() - continue - } - - uuid, err := dom.GetUUIDString() - if err != nil { - _ = dom.Free() - continue - } - - state, _, err := dom.GetState() - if err != nil { - _ = dom.Free() - continue - } - - persistent, _ := dom.IsPersistent() - - // Get disk path from domain XML - var diskPath string - xmlDesc, err := dom.GetXMLDesc(0) - if err == nil { - diskPath, _ = extractDiskPath(xmlDesc) - } - - result = append(result, &DomainInfo{ - Name: name, - UUID: uuid, - State: mapLibvirtState(state), - Persistent: persistent, - DiskPath: diskPath, - }) - - _ = dom.Free() - } - - return result, nil -} - -// GetDiskPath returns the primary disk path for a domain. -func (m *DomainManager) GetDiskPath(ctx context.Context, domainName string) (string, error) { - if err := m.ensureConnected(); err != nil { - return "", err - } - - m.mu.Lock() - dom, err := m.conn.LookupDomainByName(domainName) - m.mu.Unlock() - - if err != nil { - return "", fmt.Errorf("failed to lookup domain %q: %w", domainName, err) - } - defer func() { _ = dom.Free() }() - - xmlDesc, err := dom.GetXMLDesc(0) - if err != nil { - return "", fmt.Errorf("failed to get domain XML: %w", err) - } - - return extractDiskPath(xmlDesc) -} - -// extractDiskPath parses domain XML and extracts the primary disk file path. -func extractDiskPath(xmlDesc string) (string, error) { - var domain domainXML - if err := xml.Unmarshal([]byte(xmlDesc), &domain); err != nil { - return "", fmt.Errorf("failed to parse domain XML: %w", err) - } - - // Find the first disk device (typically vda) - for _, disk := range domain.Devices.Disks { - if disk.Device == "disk" && disk.Source.File != "" { - return disk.Source.File, nil - } - } - - return "", fmt.Errorf("no disk device found in domain XML") -} - -// mapLibvirtState converts libvirt domain state to our DomainState type. -func mapLibvirtState(state libvirtgo.DomainState) DomainState { - switch state { - case libvirtgo.DOMAIN_RUNNING: - return DomainStateRunning - case libvirtgo.DOMAIN_PAUSED: - return DomainStatePaused - case libvirtgo.DOMAIN_SHUTDOWN: - return DomainStateShutdown - case libvirtgo.DOMAIN_SHUTOFF: - return DomainStateStopped - case libvirtgo.DOMAIN_CRASHED: - return DomainStateCrashed - case libvirtgo.DOMAIN_PMSUSPENDED: - return DomainStateSuspended - default: - return DomainStateUnknown - } -} - -// Sentinel errors for domain operations. -var ( - ErrDomainNotFound = errors.New("domain not found") - ErrDomainTransient = errors.New("transient domains are not supported") - ErrDomainUnsupported = errors.New("domain configuration not supported") -) diff --git a/fluid/internal/libvirt/helpers.go b/fluid/internal/libvirt/helpers.go deleted file mode 100644 index 280039cb..00000000 --- a/fluid/internal/libvirt/helpers.go +++ /dev/null @@ -1,269 +0,0 @@ -package libvirt - -import ( - "crypto/rand" - "fmt" - "strconv" - "strings" - - "github.com/beevik/etree" -) - -// generateMACAddressHelper generates a random MAC address with the locally administered bit set. -// Uses the 52:54:00 prefix which is commonly used by QEMU/KVM. -func generateMACAddressHelper() string { - buf := make([]byte, 3) - _, _ = rand.Read(buf) - return fmt.Sprintf("52:54:00:%02x:%02x:%02x", buf[0], buf[1], buf[2]) -} - -// modifyClonedXMLHelper takes the XML from a source domain and adapts it for a new cloned domain. -// It sets a new name, UUID, disk path, MAC address, and cloud-init ISO path. -// If cloudInitISO is provided, any existing CDROM device is updated to use it, ensuring the -// cloned VM gets a unique instance-id and fresh network configuration via cloud-init. -func modifyClonedXMLHelper(sourceXML, newName, newDiskPath, cloudInitISO string, cpu, memoryMB int, network string) (string, error) { - doc := etree.NewDocument() - if err := doc.ReadFromString(sourceXML); err != nil { - return "", fmt.Errorf("parse source XML: %w", err) - } - - root := doc.Root() - if root == nil { - return "", fmt.Errorf("invalid XML: no root element") - } - - // Update VM name - nameElem := root.SelectElement("name") - if nameElem == nil { - return "", fmt.Errorf("invalid XML: missing element") - } - nameElem.SetText(newName) - - // Remove UUID - if uuidElem := root.SelectElement("uuid"); uuidElem != nil { - root.RemoveChild(uuidElem) - } - - // Update CPU - if cpu > 0 { - if vcpuElem := root.SelectElement("vcpu"); vcpuElem != nil { - vcpuElem.SetText(strconv.Itoa(cpu)) - } - } - - // Update Memory - if memoryMB > 0 { - memKiB := strconv.Itoa(memoryMB * 1024) - if memElem := root.SelectElement("memory"); memElem != nil { - memElem.SetText(memKiB) - } - if currMemElem := root.SelectElement("currentMemory"); currMemElem != nil { - currMemElem.SetText(memKiB) - } - } - - // Update disk path for the main virtual disk (vda) - var diskReplaced bool - for _, disk := range root.FindElements("./devices/disk[@device='disk']") { - if target := disk.SelectElement("target"); target != nil { - if bus := target.SelectAttr("bus"); bus != nil && bus.Value == "virtio" { - if source := disk.SelectElement("source"); source != nil { - source.SelectAttr("file").Value = newDiskPath - diskReplaced = true - break - } - } - } - } - if !diskReplaced { - return "", fmt.Errorf("could not find a virtio disk in the source XML to replace") - } - - // Handle cloud-init CDROM: update existing or add new one - // This is critical for cloned VMs - they need a unique instance-id to trigger - // cloud-init re-initialization, including DHCP network configuration - if cloudInitISO != "" { - devices := root.SelectElement("devices") - if devices == nil { - return "", fmt.Errorf("invalid XML: missing element") - } - - // Find any existing CDROM device (not just ones with source files) - existingCDROMs := root.FindElements("./devices/disk[@device='cdrom']") - - if len(existingCDROMs) > 0 { - // Update first existing CDROM - cdrom := existingCDROMs[0] - if source := cdrom.SelectElement("source"); source != nil { - // Update existing source element - if fileAttr := source.SelectAttr("file"); fileAttr != nil { - fileAttr.Value = cloudInitISO - } else { - source.CreateAttr("file", cloudInitISO) - } - } else { - // Create source element if missing - source = cdrom.CreateElement("source") - source.CreateAttr("file", cloudInitISO) - } - } else { - // No existing CDROM - add new one with SCSI controller - hasScsiController := false - for _, ctrl := range root.FindElements("./devices/controller[@type='scsi']") { - if model := ctrl.SelectAttr("model"); model != nil && model.Value == "virtio-scsi" { - hasScsiController = true - break - } - } - if !hasScsiController { - scsiCtrl := devices.CreateElement("controller") - scsiCtrl.CreateAttr("type", "scsi") - scsiCtrl.CreateAttr("model", "virtio-scsi") - } - - cdrom := devices.CreateElement("disk") - cdrom.CreateAttr("type", "file") - cdrom.CreateAttr("device", "cdrom") - - driver := cdrom.CreateElement("driver") - driver.CreateAttr("name", "qemu") - driver.CreateAttr("type", "raw") - - source := cdrom.CreateElement("source") - source.CreateAttr("file", cloudInitISO) - - target := cdrom.CreateElement("target") - target.CreateAttr("dev", "sda") - target.CreateAttr("bus", "scsi") - - cdrom.CreateElement("readonly") - } - } - - // Update network interface: set new MAC and remove PCI address - if iface := root.FindElement("./devices/interface"); iface != nil { - macElem := iface.SelectElement("mac") - if macElem != nil { - if addrAttr := macElem.SelectAttr("address"); addrAttr != nil { - addrAttr.Value = generateMACAddressHelper() - } - } else { - macElem = iface.CreateElement("mac") - macElem.CreateAttr("address", generateMACAddressHelper()) - } - - if addrElem := iface.SelectElement("address"); addrElem != nil { - iface.RemoveChild(addrElem) - } - - // Update network source if provided - if network != "" && iface.SelectAttrValue("type", "") == "network" { - if source := iface.SelectElement("source"); source != nil { - if netAttr := source.SelectAttr("network"); netAttr != nil { - netAttr.Value = network - } else { - source.CreateAttr("network", network) - } - } else { - source := iface.CreateElement("source") - source.CreateAttr("network", network) - } - } - } else { - // Handle socket_vmnet case (qemu:commandline) - var cmdline *etree.Element - for _, child := range root.ChildElements() { - if child.Tag == "commandline" && child.Space == "qemu" { - cmdline = child - break - } - } - - if cmdline != nil { - for _, child := range cmdline.ChildElements() { - if child.Tag == "arg" && child.Space == "qemu" { - if valAttr := child.SelectAttr("value"); valAttr != nil { - if strings.HasPrefix(valAttr.Value, "virtio-net-pci") && strings.Contains(valAttr.Value, "mac=") { - parts := strings.Split(valAttr.Value, ",") - newParts := make([]string, 0, len(parts)) - macUpdated := false - for _, part := range parts { - if strings.HasPrefix(part, "mac=") { - newParts = append(newParts, "mac="+generateMACAddressHelper()) - macUpdated = true - } else { - newParts = append(newParts, part) - } - } - if macUpdated { - valAttr.Value = strings.Join(newParts, ",") - break - } - } - } - } - } - } - } - - // Remove existing graphics password - if graphics := root.FindElement("./devices/graphics"); graphics != nil { - graphics.RemoveAttr("passwd") - } - - // Remove existing sound devices - for _, sound := range root.FindElements("./devices/sound") { - root.SelectElement("devices").RemoveChild(sound) - } - - doc.Indent(2) - newXML, err := doc.WriteToString() - if err != nil { - return "", fmt.Errorf("failed to write modified XML: %w", err) - } - - return newXML, nil -} - -// parseDomIfAddrIPv4WithMACHelper parses virsh domifaddr output and returns both IP and MAC address. -func parseDomIfAddrIPv4WithMACHelper(s string) (ip string, mac string) { - lines := strings.Split(s, "\n") - for _, l := range lines { - l = strings.TrimSpace(l) - if l == "" || strings.HasPrefix(l, "Name") || strings.HasPrefix(l, "-") { - continue - } - parts := strings.Fields(l) - if len(parts) >= 4 && parts[2] == "ipv4" { - mac = parts[1] - addr := parts[3] - if i := strings.IndexByte(addr, '/'); i > 0 { - ip = addr[:i] - } else { - ip = addr - } - return ip, mac - } - } - return "", "" -} - -// parseVMStateHelper converts virsh domstate output to VMState. -// VMState is defined in virsh.go/virsh-stub.go -func parseVMStateHelper(output string) VMState { - state := strings.TrimSpace(output) - switch state { - case "running": - return VMStateRunning - case "paused": - return VMStatePaused - case "shut off": - return VMStateShutOff - case "crashed": - return VMStateCrashed - case "pmsuspended": - return VMStateSuspended - default: - return VMStateUnknown - } -} diff --git a/fluid/internal/libvirt/helpers_test.go b/fluid/internal/libvirt/helpers_test.go deleted file mode 100644 index 6b6fb4f9..00000000 --- a/fluid/internal/libvirt/helpers_test.go +++ /dev/null @@ -1,388 +0,0 @@ -package libvirt - -import ( - "strings" - "testing" -) - -func TestModifyClonedXMLHelper_UpdatesCloudInitISO(t *testing.T) { - // Test that modifyClonedXMLHelper updates existing CDROM device to use new cloud-init ISO - sourceXML := ` - test-vm - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - hvm - - - - - - - - - - - - - - - - - -
- - -` - - newXML, err := modifyClonedXMLHelper(sourceXML, "sbx-clone123", - "/var/lib/libvirt/images/jobs/sbx-clone123/disk-overlay.qcow2", - "/var/lib/libvirt/images/jobs/sbx-clone123/cloud-init.iso", - 2, 2048, "default") - if err != nil { - t.Fatalf("modifyClonedXMLHelper() error = %v", err) - } - - // Should have updated name - if !strings.Contains(newXML, "sbx-clone123") { - t.Error("modifyClonedXMLHelper() did not update VM name") - } - - // Should have updated disk path - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-clone123/disk-overlay.qcow2") { - t.Error("modifyClonedXMLHelper() did not update disk path") - } - - // CRITICAL: Should have updated cloud-init ISO path (not the old /tmp/test-vm-seed.img) - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-clone123/cloud-init.iso") { - t.Errorf("modifyClonedXMLHelper() did not update cloud-init ISO path in XML:\n%s", newXML) - } - - // Should NOT contain the old cloud-init ISO path - if strings.Contains(newXML, "/tmp/test-vm-seed.img") { - t.Errorf("modifyClonedXMLHelper() still contains old cloud-init ISO path in XML:\n%s", newXML) - } - - // UUID should be removed - if strings.Contains(newXML, "12345678-1234-1234-1234-123456789012") { - t.Error("modifyClonedXMLHelper() did not remove UUID") - } - - // MAC address should be different from source - if strings.Contains(newXML, "52:54:00:11:22:33") { - t.Error("modifyClonedXMLHelper() did not generate new MAC address") - } -} - -func TestModifyClonedXMLHelper_AddsCloudInitCDROM(t *testing.T) { - // Test that modifyClonedXMLHelper adds CDROM device when source VM has none - sourceXML := ` - test-vm-no-cdrom - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - hvm - - - - - - - - - - - - - -` - - newXML, err := modifyClonedXMLHelper(sourceXML, "sbx-new", - "/var/lib/libvirt/images/jobs/sbx-new/disk.qcow2", - "/var/lib/libvirt/images/jobs/sbx-new/cloud-init.iso", - 2, 2048, "default") - if err != nil { - t.Fatalf("modifyClonedXMLHelper() error = %v", err) - } - - // Should have added CDROM device with cloud-init ISO - if !strings.Contains(newXML, `device="cdrom"`) { - t.Errorf("modifyClonedXMLHelper() did not add CDROM device in XML:\n%s", newXML) - } - - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-new/cloud-init.iso") { - t.Errorf("modifyClonedXMLHelper() did not add cloud-init ISO path in XML:\n%s", newXML) - } - - // Should have added SCSI controller for the CDROM - if !strings.Contains(newXML, `type="scsi"`) { - t.Errorf("modifyClonedXMLHelper() did not add SCSI controller in XML:\n%s", newXML) - } -} - -func TestModifyClonedXMLHelper_NoCloudInitISO(t *testing.T) { - // Test that modifyClonedXMLHelper works without cloud-init ISO (empty string) - sourceXML := ` - test-vm - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - hvm - - - - - - - - - - - - - - - - - - -` - - // Empty cloudInitISO - should not modify CDROM - newXML, err := modifyClonedXMLHelper(sourceXML, "sbx-no-cloud", - "/var/lib/libvirt/images/jobs/sbx-no-cloud/disk.qcow2", - "", // empty cloud-init ISO - 2, 2048, "default") - if err != nil { - t.Fatalf("modifyClonedXMLHelper() error = %v", err) - } - - // Old CDROM path should still be there (unchanged) - if !strings.Contains(newXML, "/tmp/old-seed.img") { - t.Errorf("modifyClonedXMLHelper() modified CDROM when cloudInitISO was empty:\n%s", newXML) - } - - // Name and disk should still be updated - if !strings.Contains(newXML, "sbx-no-cloud") { - t.Error("modifyClonedXMLHelper() did not update VM name") - } - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-no-cloud/disk.qcow2") { - t.Error("modifyClonedXMLHelper() did not update disk path") - } -} - -func TestModifyClonedXMLHelper_UpdatesEmptyCDROM(t *testing.T) { - // Test that modifyClonedXMLHelper updates existing CDROM even when it has no source file (empty drive) - // This is the bug fix: source VMs may have a CDROM with sda but no source file - sourceXML := ` - test-vm - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - - - - - - - - - - - - - - - -` - - newXML, err := modifyClonedXMLHelper(sourceXML, "sbx-test", - "/images/sbx-test/disk.qcow2", - "/images/sbx-test/cloud-init.iso", - 2, 2048, "default") - if err != nil { - t.Fatalf("error = %v", err) - } - - // Should NOT have duplicate sda - this was the bug - count := strings.Count(newXML, `dev="sda"`) - if count != 1 { - t.Errorf("expected 1 sda device, got %d:\n%s", count, newXML) - } - - // Should have cloud-init ISO path - if !strings.Contains(newXML, "/images/sbx-test/cloud-init.iso") { - t.Errorf("missing cloud-init ISO path:\n%s", newXML) - } - - // Should only have one CDROM device - cdromCount := strings.Count(newXML, `device="cdrom"`) - if cdromCount != 1 { - t.Errorf("expected 1 cdrom device, got %d:\n%s", cdromCount, newXML) - } -} - -func TestModifyClonedXMLHelper_UpdatesCDROMWithEmptySourceElement(t *testing.T) { - // Test CDROM with empty element (no file attribute) - sourceXML := ` - test-vm - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - - - - - - - - - - - - - - - - -` - - newXML, err := modifyClonedXMLHelper(sourceXML, "sbx-test2", - "/images/sbx-test2/disk.qcow2", - "/images/sbx-test2/cloud-init.iso", - 2, 2048, "default") - if err != nil { - t.Fatalf("error = %v", err) - } - - // Should NOT have duplicate sda - count := strings.Count(newXML, `dev="sda"`) - if count != 1 { - t.Errorf("expected 1 sda device, got %d:\n%s", count, newXML) - } - - // Should have cloud-init ISO path - if !strings.Contains(newXML, "/images/sbx-test2/cloud-init.iso") { - t.Errorf("missing cloud-init ISO path:\n%s", newXML) - } -} - -func TestGenerateMACAddressHelper(t *testing.T) { - mac := generateMACAddressHelper() - - // Should start with QEMU prefix - if !strings.HasPrefix(mac, "52:54:00:") { - t.Errorf("generateMACAddressHelper() = %q, want prefix '52:54:00:'", mac) - } - - // Should be valid format (17 chars: xx:xx:xx:xx:xx:xx) - if len(mac) != 17 { - t.Errorf("generateMACAddressHelper() = %q, want 17 chars", mac) - } - - // Should have 5 colons - if strings.Count(mac, ":") != 5 { - t.Errorf("generateMACAddressHelper() = %q, want 5 colons", mac) - } - - // Generate another one - should be different (random) - mac2 := generateMACAddressHelper() - if mac == mac2 { - t.Errorf("generateMACAddressHelper() returned same MAC twice: %q", mac) - } -} - -func TestParseDomIfAddrIPv4WithMACHelper(t *testing.T) { - tests := []struct { - name string - input string - expectedIP string - expectedMAC string - }{ - { - name: "valid output with IP and MAC", - input: `Name MAC address Protocol Address -------------------------------------------------------------------------------- - vnet0 52:54:00:ab:cd:ef ipv4 192.168.122.100/24`, - expectedIP: "192.168.122.100", - expectedMAC: "52:54:00:ab:cd:ef", - }, - { - name: "output with multiple interfaces", - input: `Name MAC address Protocol Address -------------------------------------------------------------------------------- - vnet0 52:54:00:11:22:33 ipv4 192.168.122.50/24 - vnet1 52:54:00:aa:bb:cc ipv4 10.0.0.5/24`, - expectedIP: "192.168.122.50", - expectedMAC: "52:54:00:11:22:33", - }, - { - name: "empty output", - input: "", - expectedIP: "", - expectedMAC: "", - }, - { - name: "no IPv4 address", - input: `Name MAC address Protocol Address --------------------------------------------------------------------------------`, - expectedIP: "", - expectedMAC: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ip, mac := parseDomIfAddrIPv4WithMACHelper(tt.input) - if ip != tt.expectedIP { - t.Errorf("parseDomIfAddrIPv4WithMACHelper() IP = %q, want %q", ip, tt.expectedIP) - } - if mac != tt.expectedMAC { - t.Errorf("parseDomIfAddrIPv4WithMACHelper() MAC = %q, want %q", mac, tt.expectedMAC) - } - }) - } -} - -func TestParseVMStateHelper(t *testing.T) { - tests := []struct { - name string - output string - expected VMState - }{ - { - name: "running state", - output: "running\n", - expected: VMStateRunning, - }, - { - name: "shut off state", - output: "shut off\n", - expected: VMStateShutOff, - }, - { - name: "paused state", - output: "paused\n", - expected: VMStatePaused, - }, - { - name: "unknown state", - output: "weird-state\n", - expected: VMStateUnknown, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := parseVMStateHelper(tt.output) - if result != tt.expected { - t.Errorf("parseVMStateHelper(%q) = %v, want %v", tt.output, result, tt.expected) - } - }) - } -} diff --git a/fluid/internal/libvirt/multihost.go b/fluid/internal/libvirt/multihost.go deleted file mode 100644 index 2e16cfdf..00000000 --- a/fluid/internal/libvirt/multihost.go +++ /dev/null @@ -1,463 +0,0 @@ -package libvirt - -import ( - "bufio" - "context" - "errors" - "fmt" - "log/slog" - "os/exec" - "strings" - "sync" - "time" - "unicode" - - "github.com/aspectrr/fluid.sh/fluid/internal/config" -) - -const ( - // DefaultSSHUser is the default SSH user for remote hosts. - DefaultSSHUser = "root" - // DefaultSSHPort is the default SSH port. - DefaultSSHPort = 22 - // DefaultHostQueryTimeout is the default per-host query timeout. - DefaultHostQueryTimeout = 30 * time.Second - // MaxShellInputLength is the maximum allowed length for shell input. - MaxShellInputLength = 4096 -) - -// MultiHostDomainInfo extends DomainInfo with host identification. -type MultiHostDomainInfo struct { - Name string - UUID string - State DomainState - Persistent bool - DiskPath string - HostName string // Display name of the host - HostAddress string // IP or hostname of the host -} - -// HostError represents an error from querying a specific host. -type HostError struct { - HostName string `json:"host_name"` - HostAddress string `json:"host_address"` - Error string `json:"error"` -} - -// MultiHostListResult contains the aggregated result from querying all hosts. -type MultiHostListResult struct { - Domains []*MultiHostDomainInfo - HostErrors []HostError -} - -// SSHRunner executes commands on a remote host via SSH. -// This interface enables testing without actual SSH connections. -type SSHRunner interface { - Run(ctx context.Context, address, user string, port int, command string) (string, error) -} - -// defaultSSHRunner implements SSHRunner using actual SSH commands. -type defaultSSHRunner struct{} - -func (r *defaultSSHRunner) Run(ctx context.Context, address, user string, port int, command string) (string, error) { - args := []string{ - "-o", "BatchMode=yes", - "-o", "StrictHostKeyChecking=accept-new", - "-o", "ConnectTimeout=10", - "-p", fmt.Sprintf("%d", port), - fmt.Sprintf("%s@%s", user, address), - command, - } - - cmd := exec.CommandContext(ctx, "ssh", args...) - output, err := cmd.CombinedOutput() - if err != nil { - return "", fmt.Errorf("ssh command failed: %w (output: %s)", err, string(output)) - } - - return string(output), nil -} - -// MultiHostDomainManager queries multiple libvirt hosts via SSH. -type MultiHostDomainManager struct { - hosts []config.HostConfig - logger *slog.Logger - sshRunner SSHRunner -} - -// NewMultiHostDomainManager creates a new MultiHostDomainManager. -func NewMultiHostDomainManager(hosts []config.HostConfig, logger *slog.Logger) *MultiHostDomainManager { - return &MultiHostDomainManager{ - hosts: hosts, - logger: logger, - sshRunner: &defaultSSHRunner{}, - } -} - -// NewMultiHostDomainManagerWithRunner creates a MultiHostDomainManager with a custom SSH runner. -// This is primarily useful for testing. -func NewMultiHostDomainManagerWithRunner(hosts []config.HostConfig, logger *slog.Logger, runner SSHRunner) *MultiHostDomainManager { - return &MultiHostDomainManager{ - hosts: hosts, - logger: logger, - sshRunner: runner, - } -} - -// ListDomains queries all configured hosts in parallel and aggregates VM listings. -// Returns all VMs found along with any host errors encountered. -func (m *MultiHostDomainManager) ListDomains(ctx context.Context) (*MultiHostListResult, error) { - if len(m.hosts) == 0 { - return &MultiHostListResult{}, nil - } - - type hostResult struct { - domains []*MultiHostDomainInfo - err *HostError - } - - results := make(chan hostResult, len(m.hosts)) - var wg sync.WaitGroup - - for _, host := range m.hosts { - wg.Add(1) - go func(h config.HostConfig) { - defer wg.Done() - - domains, err := m.queryHost(ctx, h) - if err != nil { - m.logger.Warn("failed to query host", - "host_name", h.Name, - "host_address", h.Address, - "error", err, - ) - results <- hostResult{ - err: &HostError{ - HostName: h.Name, - HostAddress: h.Address, - Error: err.Error(), - }, - } - return - } - results <- hostResult{domains: domains} - }(host) - } - - // Close results channel when all goroutines complete - go func() { - wg.Wait() - close(results) - }() - - // Aggregate results - var allDomains []*MultiHostDomainInfo - var hostErrors []HostError - - for result := range results { - if result.err != nil { - hostErrors = append(hostErrors, *result.err) - } else { - allDomains = append(allDomains, result.domains...) - } - } - - return &MultiHostListResult{ - Domains: allDomains, - HostErrors: hostErrors, - }, nil -} - -// queryHost queries a single host for its VM list via SSH. -func (m *MultiHostDomainManager) queryHost(ctx context.Context, host config.HostConfig) ([]*MultiHostDomainInfo, error) { - // Apply defaults - sshUser := host.SSHUser - if sshUser == "" { - sshUser = DefaultSSHUser - } - sshPort := host.SSHPort - if sshPort == 0 { - sshPort = DefaultSSHPort - } - queryTimeout := host.QueryTimeout - if queryTimeout == 0 { - queryTimeout = DefaultHostQueryTimeout - } - - // Create context with timeout - queryCtx, cancel := context.WithTimeout(ctx, queryTimeout) - defer cancel() - - // Get list of VM names - vmNames, err := m.runSSHCommand(queryCtx, host.Address, sshUser, sshPort, - "virsh list --all --name") - if err != nil { - return nil, fmt.Errorf("list VMs: %w", err) - } - - // Parse VM names (one per line, skip empty lines) - var names []string - scanner := bufio.NewScanner(strings.NewReader(vmNames)) - for scanner.Scan() { - name := strings.TrimSpace(scanner.Text()) - if name != "" { - names = append(names, name) - } - } - - if len(names) == 0 { - return nil, nil - } - - // Get details for each VM - var domains []*MultiHostDomainInfo - for _, name := range names { - domain, err := m.getDomainInfo(queryCtx, host, sshUser, sshPort, name) - if err != nil { - m.logger.Debug("failed to get domain info", - "host", host.Name, - "domain", name, - "error", err, - ) - // Continue with other VMs even if one fails - continue - } - domains = append(domains, domain) - } - - return domains, nil -} - -// getDomainInfo gets detailed information for a single domain. -func (m *MultiHostDomainManager) getDomainInfo(ctx context.Context, host config.HostConfig, sshUser string, sshPort int, name string) (*MultiHostDomainInfo, error) { - escapedName, err := shellEscape(name) - if err != nil { - return nil, fmt.Errorf("invalid domain name: %w", err) - } - - // Get domain info using virsh dominfo - output, err := m.runSSHCommand(ctx, host.Address, sshUser, sshPort, - fmt.Sprintf("virsh dominfo %s", escapedName)) - if err != nil { - return nil, fmt.Errorf("dominfo: %w", err) - } - - domain := &MultiHostDomainInfo{ - Name: name, - HostName: host.Name, - HostAddress: host.Address, - } - - // Parse dominfo output - scanner := bufio.NewScanner(strings.NewReader(output)) - for scanner.Scan() { - line := scanner.Text() - parts := strings.SplitN(line, ":", 2) - if len(parts) != 2 { - continue - } - key := strings.TrimSpace(parts[0]) - value := strings.TrimSpace(parts[1]) - - switch key { - case "UUID": - domain.UUID = value - case "State": - domain.State = parseVirshState(value) - case "Persistent": - domain.Persistent = value == "yes" - } - } - - // Get disk path using virsh domblklist (reuse escapedName from above) - diskOutput, err := m.runSSHCommand(ctx, host.Address, sshUser, sshPort, - fmt.Sprintf("virsh domblklist %s --details", escapedName)) - if err == nil { - domain.DiskPath = parseDiskPath(diskOutput) - } - - return domain, nil -} - -// runSSHCommand executes a command on a remote host via SSH. -func (m *MultiHostDomainManager) runSSHCommand(ctx context.Context, address, user string, port int, command string) (string, error) { - return m.sshRunner.Run(ctx, address, user, port, command) -} - -// parseVirshState converts virsh state string to DomainState. -func parseVirshState(state string) DomainState { - switch strings.ToLower(state) { - case "running": - return DomainStateRunning - case "paused": - return DomainStatePaused - case "shut off": - return DomainStateStopped - case "shutdown": - return DomainStateShutdown - case "crashed": - return DomainStateCrashed - case "pmsuspended": - return DomainStateSuspended - default: - return DomainStateUnknown - } -} - -// parseDiskPath extracts the primary disk path from virsh domblklist output. -func parseDiskPath(output string) string { - // Output format: - // Type Device Target Source - // ------------------------------------------------ - // file disk vda /var/lib/libvirt/images/vm.qcow2 - scanner := bufio.NewScanner(strings.NewReader(output)) - lineNum := 0 - for scanner.Scan() { - lineNum++ - // Skip header lines - if lineNum <= 2 { - continue - } - line := scanner.Text() - fields := strings.Fields(line) - if len(fields) >= 4 && fields[1] == "disk" { - return fields[3] - } - } - return "" -} - -// ErrShellInputTooLong is returned when input exceeds MaxShellInputLength. -var ErrShellInputTooLong = errors.New("shell input exceeds maximum length") - -// ErrShellInputNullByte is returned when input contains null bytes. -var ErrShellInputNullByte = errors.New("shell input contains null byte") - -// ErrShellInputControlChar is returned when input contains control characters. -var ErrShellInputControlChar = errors.New("shell input contains control character") - -// validateShellInput checks input for dangerous characters before shell escaping. -func validateShellInput(s string) error { - if len(s) > MaxShellInputLength { - return ErrShellInputTooLong - } - for _, r := range s { - if r == 0 { - return ErrShellInputNullByte - } - // Reject control characters (0x00-0x1F) except tab (0x09) and newline (0x0A) - if unicode.IsControl(r) && r != '\t' && r != '\n' { - return ErrShellInputControlChar - } - } - return nil -} - -// shellEscape escapes a string for safe use in shell commands. -// Returns an error if the input contains dangerous characters. -func shellEscape(s string) (string, error) { - if err := validateShellInput(s); err != nil { - return "", err - } - // Wrap in single quotes and escape existing single quotes - return "'" + strings.ReplaceAll(s, "'", "'\"'\"'") + "'", nil -} - -// FindHostForVM searches all configured hosts to find which one has the given VM. -// Returns the host config if found, or an error if the VM is not found on any host. -func (m *MultiHostDomainManager) FindHostForVM(ctx context.Context, vmName string) (*config.HostConfig, error) { - if len(m.hosts) == 0 { - return nil, fmt.Errorf("no hosts configured") - } - - type findResult struct { - host *config.HostConfig - found bool - err error - } - - results := make(chan findResult, len(m.hosts)) - var wg sync.WaitGroup - - for i := range m.hosts { - wg.Add(1) - go func(h *config.HostConfig) { - defer wg.Done() - - found, err := m.hostHasVM(ctx, *h, vmName) - if err != nil { - m.logger.Debug("error checking host for VM", - "host", h.Name, - "vm_name", vmName, - "error", err, - ) - results <- findResult{err: err} - return - } - if found { - results <- findResult{host: h, found: true} - } else { - results <- findResult{found: false} - } - }(&m.hosts[i]) - } - - go func() { - wg.Wait() - close(results) - }() - - // Collect results - return first host that has the VM - var lastErr error - for result := range results { - if result.found { - return result.host, nil - } - if result.err != nil { - lastErr = result.err - } - } - - if lastErr != nil { - return nil, fmt.Errorf("VM %q not found on any host (last error: %w)", vmName, lastErr) - } - return nil, fmt.Errorf("VM %q not found on any configured host", vmName) -} - -// hostHasVM checks if a specific host has the given VM. -func (m *MultiHostDomainManager) hostHasVM(ctx context.Context, host config.HostConfig, vmName string) (bool, error) { - escapedName, err := shellEscape(vmName) - if err != nil { - return false, fmt.Errorf("invalid VM name: %w", err) - } - - sshUser := host.SSHUser - if sshUser == "" { - sshUser = DefaultSSHUser - } - sshPort := host.SSHPort - if sshPort == 0 { - sshPort = DefaultSSHPort - } - queryTimeout := host.QueryTimeout - if queryTimeout == 0 { - queryTimeout = DefaultHostQueryTimeout - } - - queryCtx, cancel := context.WithTimeout(ctx, queryTimeout) - defer cancel() - - // Check if VM exists using virsh dominfo - _, err = m.runSSHCommand(queryCtx, host.Address, sshUser, sshPort, - fmt.Sprintf("virsh dominfo %s", escapedName)) - if err != nil { - // If virsh dominfo fails, the VM doesn't exist on this host - return false, nil - } - return true, nil -} - -// GetHosts returns the configured hosts. -func (m *MultiHostDomainManager) GetHosts() []config.HostConfig { - return m.hosts -} diff --git a/fluid/internal/libvirt/multihost_test.go b/fluid/internal/libvirt/multihost_test.go deleted file mode 100644 index 748d3a16..00000000 --- a/fluid/internal/libvirt/multihost_test.go +++ /dev/null @@ -1,817 +0,0 @@ -package libvirt - -import ( - "context" - "errors" - "fmt" - "log/slog" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/aspectrr/fluid.sh/fluid/internal/config" -) - -// mockSSHRunner implements SSHRunner for testing. -type mockSSHRunner struct { - mu sync.Mutex - responses map[string]string // command -> response - errors map[string]error // command -> error - defaultError error - delay time.Duration - callCount atomic.Int64 - callLog []mockSSHCall -} - -type mockSSHCall struct { - Address string - User string - Port int - Command string -} - -func newMockSSHRunner() *mockSSHRunner { - return &mockSSHRunner{ - responses: make(map[string]string), - errors: make(map[string]error), - } -} - -func (m *mockSSHRunner) Run(ctx context.Context, address, user string, port int, command string) (string, error) { - m.callCount.Add(1) - - m.mu.Lock() - m.callLog = append(m.callLog, mockSSHCall{ - Address: address, - User: user, - Port: port, - Command: command, - }) - m.mu.Unlock() - - if m.delay > 0 { - select { - case <-time.After(m.delay): - case <-ctx.Done(): - return "", ctx.Err() - } - } - - // Check for address-specific errors first - if err, ok := m.errors[address]; ok { - return "", err - } - - // Check for command-specific responses - if resp, ok := m.responses[command]; ok { - return resp, nil - } - - if m.defaultError != nil { - return "", m.defaultError - } - - return "", nil -} - -func (m *mockSSHRunner) setResponse(command, response string) { - m.mu.Lock() - defer m.mu.Unlock() - m.responses[command] = response -} - -func (m *mockSSHRunner) setHostError(address string, err error) { - m.mu.Lock() - defer m.mu.Unlock() - m.errors[address] = err -} - -func (m *mockSSHRunner) getCalls() []mockSSHCall { - m.mu.Lock() - defer m.mu.Unlock() - result := make([]mockSSHCall, len(m.callLog)) - copy(result, m.callLog) - return result -} - -func TestParseVirshState(t *testing.T) { - tests := []struct { - input string - expected DomainState - }{ - {"running", DomainStateRunning}, - {"Running", DomainStateRunning}, - {"RUNNING", DomainStateRunning}, - {"paused", DomainStatePaused}, - {"shut off", DomainStateStopped}, - {"shutdown", DomainStateShutdown}, - {"crashed", DomainStateCrashed}, - {"pmsuspended", DomainStateSuspended}, - {"unknown", DomainStateUnknown}, - {"", DomainStateUnknown}, - } - - for _, tt := range tests { - t.Run(tt.input, func(t *testing.T) { - result := parseVirshState(tt.input) - if result != tt.expected { - t.Errorf("parseVirshState(%q) = %v, want %v", tt.input, result, tt.expected) - } - }) - } -} - -func TestParseDiskPath(t *testing.T) { - tests := []struct { - name string - input string - expected string - }{ - { - name: "standard output", - input: `Type Device Target Source ------------------------------------------------- -file disk vda /var/lib/libvirt/images/test.qcow2 -file cdrom sda -`, - expected: "/var/lib/libvirt/images/test.qcow2", - }, - { - name: "multiple disks", - input: `Type Device Target Source ------------------------------------------------- -file disk vda /var/lib/libvirt/images/root.qcow2 -file disk vdb /var/lib/libvirt/images/data.qcow2`, - expected: "/var/lib/libvirt/images/root.qcow2", - }, - { - name: "empty output", - input: "", - expected: "", - }, - { - name: "no disks", - input: `Type Device Target Source -------------------------------------------------`, - expected: "", - }, - { - name: "cdrom only", - input: `Type Device Target Source ------------------------------------------------- -file cdrom sda /path/to/iso.iso`, - expected: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := parseDiskPath(tt.input) - if result != tt.expected { - t.Errorf("parseDiskPath() = %q, want %q", result, tt.expected) - } - }) - } -} - -func TestShellEscape(t *testing.T) { - tests := []struct { - input string - expected string - wantErr bool - }{ - {"simple", "'simple'", false}, - {"with spaces", "'with spaces'", false}, - {"with'quote", "'with'\"'\"'quote'", false}, - {"", "''", false}, - {"test-vm-01", "'test-vm-01'", false}, - {"with\ttab", "'with\ttab'", false}, - {"with\nnewline", "'with\nnewline'", false}, - } - - for _, tt := range tests { - t.Run(tt.input, func(t *testing.T) { - result, err := shellEscape(tt.input) - if (err != nil) != tt.wantErr { - t.Errorf("shellEscape(%q) error = %v, wantErr %v", tt.input, err, tt.wantErr) - return - } - if result != tt.expected { - t.Errorf("shellEscape(%q) = %q, want %q", tt.input, result, tt.expected) - } - }) - } -} - -func TestShellEscapeValidation(t *testing.T) { - tests := []struct { - name string - input string - wantErr error - }{ - { - name: "null byte", - input: "test\x00value", - wantErr: ErrShellInputNullByte, - }, - { - name: "control character bell", - input: "test\x07value", - wantErr: ErrShellInputControlChar, - }, - { - name: "control character escape", - input: "test\x1bvalue", - wantErr: ErrShellInputControlChar, - }, - { - name: "control character carriage return", - input: "test\rvalue", - wantErr: ErrShellInputControlChar, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := shellEscape(tt.input) - if err == nil { - t.Errorf("shellEscape(%q) expected error, got nil", tt.input) - return - } - if err != tt.wantErr { - t.Errorf("shellEscape(%q) error = %v, want %v", tt.input, err, tt.wantErr) - } - }) - } -} - -func TestShellEscapeMaxLength(t *testing.T) { - // Test input at max length (should succeed) - atMax := make([]byte, MaxShellInputLength) - for i := range atMax { - atMax[i] = 'a' - } - _, err := shellEscape(string(atMax)) - if err != nil { - t.Errorf("shellEscape at max length should succeed, got error: %v", err) - } - - // Test input over max length (should fail) - overMax := make([]byte, MaxShellInputLength+1) - for i := range overMax { - overMax[i] = 'a' - } - _, err = shellEscape(string(overMax)) - if err != ErrShellInputTooLong { - t.Errorf("shellEscape over max length should return ErrShellInputTooLong, got: %v", err) - } -} - -func TestValidateShellInput(t *testing.T) { - tests := []struct { - name string - input string - wantErr error - }{ - {"valid simple", "simple", nil}, - {"valid with spaces", "with spaces", nil}, - {"valid with tab", "with\ttab", nil}, - {"valid with newline", "with\nnewline", nil}, - {"invalid null byte", "test\x00value", ErrShellInputNullByte}, - {"invalid bell", "test\x07value", ErrShellInputControlChar}, - {"invalid escape", "test\x1bvalue", ErrShellInputControlChar}, - {"invalid backspace", "test\x08value", ErrShellInputControlChar}, - {"invalid form feed", "test\x0cvalue", ErrShellInputControlChar}, - {"invalid carriage return", "test\rvalue", ErrShellInputControlChar}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := validateShellInput(tt.input) - if err != tt.wantErr { - t.Errorf("validateShellInput(%q) = %v, want %v", tt.input, err, tt.wantErr) - } - }) - } -} - -func TestNewMultiHostDomainManager(t *testing.T) { - manager := NewMultiHostDomainManager(nil, nil) - if manager == nil { - t.Fatal("NewMultiHostDomainManager returned nil") - } - if manager.hosts != nil { - t.Error("Expected nil hosts slice") - } -} - -func TestGetHosts(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - {Name: "host2", Address: "192.168.1.2"}, - } - manager := NewMultiHostDomainManager(hosts, nil) - - result := manager.GetHosts() - if len(result) != 2 { - t.Errorf("Expected 2 hosts, got %d", len(result)) - } - if result[0].Name != "host1" { - t.Errorf("Expected first host name to be 'host1', got %s", result[0].Name) - } -} - -// TestListDomainsAllHostsUnreachable tests the case when all configured hosts fail. -func TestListDomainsAllHostsUnreachable(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - {Name: "host2", Address: "192.168.1.2"}, - {Name: "host3", Address: "192.168.1.3"}, - } - - mock := newMockSSHRunner() - mock.defaultError = errors.New("connection refused") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - result, err := manager.ListDomains(ctx) - // ListDomains returns an error aggregation, not a top-level error - if err != nil { - t.Fatalf("ListDomains should not return top-level error: %v", err) - } - - if result == nil { - t.Fatal("Expected non-nil result") - } - - // All hosts should have failed - if len(result.HostErrors) != 3 { - t.Errorf("Expected 3 host errors, got %d", len(result.HostErrors)) - } - - // No domains should be returned - if len(result.Domains) != 0 { - t.Errorf("Expected 0 domains, got %d", len(result.Domains)) - } - - // Verify each host error is recorded - hostErrorMap := make(map[string]bool) - for _, he := range result.HostErrors { - hostErrorMap[he.HostName] = true - if he.Error == "" { - t.Errorf("Host error for %s should have error message", he.HostName) - } - } - - for _, h := range hosts { - if !hostErrorMap[h.Name] { - t.Errorf("Expected error for host %s", h.Name) - } - } -} - -// TestListDomainsPartialHostFailure tests when some hosts succeed and some fail. -func TestListDomainsPartialHostFailure(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - {Name: "host2", Address: "192.168.1.2"}, - } - - mock := newMockSSHRunner() - // host1 fails - mock.setHostError("192.168.1.1", errors.New("connection refused")) - // host2 succeeds with VMs - mock.setResponse("virsh list --all --name", "vm1\nvm2\n") - mock.setResponse("virsh dominfo 'vm1'", "UUID: 1234\nState: running\nPersistent: yes\n") - mock.setResponse("virsh dominfo 'vm2'", "UUID: 5678\nState: shut off\nPersistent: yes\n") - mock.setResponse("virsh domblklist 'vm1' --details", "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm1.qcow2\n") - mock.setResponse("virsh domblklist 'vm2' --details", "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm2.qcow2\n") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - result, err := manager.ListDomains(ctx) - if err != nil { - t.Fatalf("ListDomains should not return top-level error: %v", err) - } - - // One host should have failed - if len(result.HostErrors) != 1 { - t.Errorf("Expected 1 host error, got %d", len(result.HostErrors)) - } - - if result.HostErrors[0].HostName != "host1" { - t.Errorf("Expected host1 to fail, got %s", result.HostErrors[0].HostName) - } - - // VMs from host2 should be returned - if len(result.Domains) != 2 { - t.Errorf("Expected 2 domains, got %d", len(result.Domains)) - } -} - -// TestSSHConnectionTimeout tests that SSH timeouts are handled correctly. -func TestSSHConnectionTimeout(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "slow-host", Address: "192.168.1.100"}, - } - - mock := newMockSSHRunner() - mock.delay = 5 * time.Second // Simulate slow response - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - // Use a context with short timeout - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - - result, err := manager.ListDomains(ctx) - if err != nil { - t.Fatalf("ListDomains should not return top-level error: %v", err) - } - - // The host should have timed out - if len(result.HostErrors) != 1 { - t.Errorf("Expected 1 host error due to timeout, got %d", len(result.HostErrors)) - } - - if len(result.HostErrors) > 0 && result.HostErrors[0].HostName != "slow-host" { - t.Errorf("Expected slow-host to fail, got %s", result.HostErrors[0].HostName) - } -} - -// TestFindHostForVMAllHostsUnreachable tests FindHostForVM when all hosts fail. -func TestFindHostForVMAllHostsUnreachable(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - {Name: "host2", Address: "192.168.1.2"}, - } - - mock := newMockSSHRunner() - mock.defaultError = errors.New("connection timed out") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - _, err := manager.FindHostForVM(ctx, "test-vm") - - if err == nil { - t.Fatal("FindHostForVM should return error when all hosts are unreachable") - } - - // Error should mention the VM name - if !errors.Is(err, nil) && err.Error() == "" { - t.Error("Expected non-empty error message") - } -} - -// TestFindHostForVMNotFoundOnAnyHost tests when VM doesn't exist on any host. -func TestFindHostForVMNotFoundOnAnyHost(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - {Name: "host2", Address: "192.168.1.2"}, - } - - mock := newMockSSHRunner() - // All hosts respond but VM not found (dominfo fails) - mock.defaultError = errors.New("domain not found") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - _, err := manager.FindHostForVM(ctx, "nonexistent-vm") - - if err == nil { - t.Fatal("FindHostForVM should return error when VM not found") - } -} - -// TestConcurrentVMOperationsOnSameHost tests thread safety during concurrent queries. -func TestConcurrentVMOperationsOnSameHost(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - } - - mock := newMockSSHRunner() - mock.setResponse("virsh list --all --name", "vm1\nvm2\nvm3\n") - mock.setResponse("virsh dominfo 'vm1'", "UUID: 1111\nState: running\nPersistent: yes\n") - mock.setResponse("virsh dominfo 'vm2'", "UUID: 2222\nState: running\nPersistent: yes\n") - mock.setResponse("virsh dominfo 'vm3'", "UUID: 3333\nState: running\nPersistent: yes\n") - mock.setResponse("virsh domblklist 'vm1' --details", "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm1.qcow2\n") - mock.setResponse("virsh domblklist 'vm2' --details", "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm2.qcow2\n") - mock.setResponse("virsh domblklist 'vm3' --details", "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm3.qcow2\n") - mock.delay = 10 * time.Millisecond // Small delay to create overlap - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - // Run multiple concurrent ListDomains operations - const concurrency = 10 - var wg sync.WaitGroup - errors := make(chan error, concurrency) - results := make(chan *MultiHostListResult, concurrency) - - ctx := context.Background() - for i := 0; i < concurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - result, err := manager.ListDomains(ctx) - if err != nil { - errors <- err - return - } - results <- result - }() - } - - wg.Wait() - close(errors) - close(results) - - // Check for any errors - for err := range errors { - t.Errorf("Concurrent operation failed: %v", err) - } - - // Verify all results are consistent - var resultCount int - for result := range results { - resultCount++ - if len(result.Domains) != 3 { - t.Errorf("Expected 3 domains, got %d", len(result.Domains)) - } - } - - if resultCount != concurrency { - t.Errorf("Expected %d results, got %d", concurrency, resultCount) - } -} - -// TestConcurrentFindHostForVM tests concurrent FindHostForVM operations. -func TestConcurrentFindHostForVM(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - {Name: "host2", Address: "192.168.1.2"}, - {Name: "host3", Address: "192.168.1.3"}, - } - - mock := newMockSSHRunner() - // VM exists on host2 only - mock.setHostError("192.168.1.1", errors.New("domain not found")) - mock.setHostError("192.168.1.3", errors.New("domain not found")) - mock.setResponse("virsh dominfo 'target-vm'", "UUID: abc123\nState: running\n") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - // Run multiple concurrent FindHostForVM operations - const concurrency = 5 - var wg sync.WaitGroup - foundHosts := make(chan *config.HostConfig, concurrency) - foundErrors := make(chan error, concurrency) - - ctx := context.Background() - for i := 0; i < concurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - host, err := manager.FindHostForVM(ctx, "target-vm") - if err != nil { - foundErrors <- err - return - } - foundHosts <- host - }() - } - - wg.Wait() - close(foundHosts) - close(foundErrors) - - // All should find host2 - for host := range foundHosts { - if host.Name != "host2" { - t.Errorf("Expected host2, got %s", host.Name) - } - } - - // Should have no errors - for err := range foundErrors { - t.Errorf("Unexpected error: %v", err) - } -} - -// TestListDomainsEmptyHosts tests behavior with no hosts configured. -func TestListDomainsEmptyHosts(t *testing.T) { - manager := NewMultiHostDomainManager(nil, nil) - - ctx := context.Background() - result, err := manager.ListDomains(ctx) - if err != nil { - t.Fatalf("ListDomains with empty hosts should not error: %v", err) - } - - if result == nil { - t.Fatal("Expected non-nil result") - } - - if len(result.Domains) != 0 { - t.Errorf("Expected 0 domains, got %d", len(result.Domains)) - } - - if len(result.HostErrors) != 0 { - t.Errorf("Expected 0 host errors, got %d", len(result.HostErrors)) - } -} - -// TestFindHostForVMNoHostsConfigured tests FindHostForVM with no hosts. -func TestFindHostForVMNoHostsConfigured(t *testing.T) { - manager := NewMultiHostDomainManager(nil, nil) - - ctx := context.Background() - _, err := manager.FindHostForVM(ctx, "any-vm") - - if err == nil { - t.Fatal("Expected error when no hosts configured") - } - - expectedMsg := "no hosts configured" - if err.Error() != expectedMsg { - t.Errorf("Expected error %q, got %q", expectedMsg, err.Error()) - } -} - -// TestSSHDefaultsApplied tests that SSH defaults are correctly applied. -func TestSSHDefaultsApplied(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, // No SSHUser or SSHPort set - } - - mock := newMockSSHRunner() - mock.setResponse("virsh list --all --name", "") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - _, _ = manager.ListDomains(ctx) - - calls := mock.getCalls() - if len(calls) == 0 { - t.Fatal("Expected at least one SSH call") - } - - // Verify defaults were applied - if calls[0].User != DefaultSSHUser { - t.Errorf("Expected default SSH user %q, got %q", DefaultSSHUser, calls[0].User) - } - - if calls[0].Port != DefaultSSHPort { - t.Errorf("Expected default SSH port %d, got %d", DefaultSSHPort, calls[0].Port) - } -} - -// TestSSHCustomPortAndUser tests that custom SSH settings are used. -func TestSSHCustomPortAndUser(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1", SSHUser: "admin", SSHPort: 2222}, - } - - mock := newMockSSHRunner() - mock.setResponse("virsh list --all --name", "") - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - _, _ = manager.ListDomains(ctx) - - calls := mock.getCalls() - if len(calls) == 0 { - t.Fatal("Expected at least one SSH call") - } - - if calls[0].User != "admin" { - t.Errorf("Expected SSH user 'admin', got %q", calls[0].User) - } - - if calls[0].Port != 2222 { - t.Errorf("Expected SSH port 2222, got %d", calls[0].Port) - } -} - -// TestListDomainsWithDomainInfoFailure tests graceful handling when dominfo fails for one VM. -func TestListDomainsWithDomainInfoFailure(t *testing.T) { - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, - } - - mock := &selectiveMockSSHRunner{ - responses: map[string]mockResponse{ - "virsh list --all --name": {output: "vm1\nvm2\nvm3\n"}, - "virsh dominfo 'vm1'": {output: "UUID: 1111\nState: running\nPersistent: yes\n"}, - "virsh dominfo 'vm2'": {err: errors.New("domain info failed")}, - "virsh dominfo 'vm3'": {output: "UUID: 3333\nState: running\nPersistent: yes\n"}, - "virsh domblklist 'vm1' --details": {output: "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm1.qcow2\n"}, - "virsh domblklist 'vm3' --details": {output: "Type Device Target Source\n------------------------------------------------\nfile disk vda /var/lib/libvirt/images/vm3.qcow2\n"}, - }, - } - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - result, err := manager.ListDomains(ctx) - if err != nil { - t.Fatalf("ListDomains should not return top-level error: %v", err) - } - - // Should get 2 VMs (vm1 and vm3), vm2 failed - if len(result.Domains) != 2 { - t.Errorf("Expected 2 domains (vm2 should be skipped), got %d", len(result.Domains)) - } - - // No host errors since the host itself is reachable - if len(result.HostErrors) != 0 { - t.Errorf("Expected 0 host errors, got %d", len(result.HostErrors)) - } -} - -// TestCustomQueryTimeout tests that per-host QueryTimeout is respected. -func TestCustomQueryTimeout(t *testing.T) { - // Host with short custom timeout - hosts := []config.HostConfig{ - {Name: "fast-host", Address: "192.168.1.1", QueryTimeout: 50 * time.Millisecond}, - } - - mock := newMockSSHRunner() - mock.delay = 200 * time.Millisecond // Exceeds the custom timeout - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - result, err := manager.ListDomains(ctx) - if err != nil { - t.Fatalf("ListDomains should not return top-level error: %v", err) - } - - // The host should have timed out due to custom timeout - if len(result.HostErrors) != 1 { - t.Errorf("Expected 1 host error due to custom timeout, got %d", len(result.HostErrors)) - } -} - -// TestDefaultQueryTimeoutUsedWhenNotSet tests that default timeout is used when QueryTimeout is 0. -func TestDefaultQueryTimeoutUsedWhenNotSet(t *testing.T) { - // Host without custom timeout (uses default) - hosts := []config.HostConfig{ - {Name: "host1", Address: "192.168.1.1"}, // QueryTimeout = 0, should use default - } - - mock := newMockSSHRunner() - mock.setResponse("virsh list --all --name", "") - // No delay - should complete within default timeout - - logger := slog.Default() - manager := NewMultiHostDomainManagerWithRunner(hosts, logger, mock) - - ctx := context.Background() - result, err := manager.ListDomains(ctx) - if err != nil { - t.Fatalf("ListDomains should not return error: %v", err) - } - - // Should succeed with no errors - if len(result.HostErrors) != 0 { - t.Errorf("Expected 0 host errors, got %d", len(result.HostErrors)) - } -} - -// selectiveMockSSHRunner allows command-specific responses. -type selectiveMockSSHRunner struct { - mu sync.Mutex - responses map[string]mockResponse -} - -type mockResponse struct { - output string - err error -} - -func (m *selectiveMockSSHRunner) Run(ctx context.Context, address, user string, port int, command string) (string, error) { - m.mu.Lock() - defer m.mu.Unlock() - - if resp, ok := m.responses[command]; ok { - return resp.output, resp.err - } - return "", fmt.Errorf("no mock response for command: %s", command) -} diff --git a/fluid/internal/libvirt/remote.go b/fluid/internal/libvirt/remote.go deleted file mode 100644 index c2d5ab5c..00000000 --- a/fluid/internal/libvirt/remote.go +++ /dev/null @@ -1,765 +0,0 @@ -package libvirt - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "log/slog" - "os/exec" - "strings" - "time" - - "github.com/aspectrr/fluid.sh/fluid/internal/config" -) - -// RemoteVirshManager implements Manager for remote libvirt hosts via SSH. -// It executes virsh and related commands on a remote host. -type RemoteVirshManager struct { - host config.HostConfig - cfg Config - logger *slog.Logger -} - -// NewRemoteVirshManager creates a new RemoteVirshManager for the given host. -func NewRemoteVirshManager(host config.HostConfig, cfg Config, logger *slog.Logger) *RemoteVirshManager { - if cfg.DefaultVCPUs == 0 { - cfg.DefaultVCPUs = 2 - } - if cfg.DefaultMemoryMB == 0 { - cfg.DefaultMemoryMB = 2048 - } - if logger == nil { - logger = slog.Default() - } - return &RemoteVirshManager{ - host: host, - cfg: cfg, - logger: logger, - } -} - -// CloneVM creates a linked-clone VM on the remote host. -func (m *RemoteVirshManager) CloneVM(ctx context.Context, baseImage, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) { - return DomainRef{}, fmt.Errorf("CloneVM not implemented for remote hosts - use CloneFromVM instead") -} - -// CloneFromVM creates a linked-clone VM from an existing VM on the remote host. -func (m *RemoteVirshManager) CloneFromVM(ctx context.Context, sourceVMName, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) { - if newVMName == "" { - return DomainRef{}, fmt.Errorf("new VM name is required") - } - if sourceVMName == "" { - return DomainRef{}, fmt.Errorf("source VM name is required") - } - - // Validate inputs for shell escaping - escapedSourceVM, err := shellEscape(sourceVMName) - if err != nil { - return DomainRef{}, fmt.Errorf("invalid source VM name: %w", err) - } - escapedNewVM, err := shellEscape(newVMName) - if err != nil { - return DomainRef{}, fmt.Errorf("invalid new VM name: %w", err) - } - - if cpu <= 0 { - cpu = m.cfg.DefaultVCPUs - } - if memoryMB <= 0 { - memoryMB = m.cfg.DefaultMemoryMB - } - if network == "" { - network = m.cfg.DefaultNetwork - } - - m.logger.Info("cloning VM on remote host", - "host", m.host.Name, - "source_vm", sourceVMName, - "new_vm", newVMName, - ) - - // Get source VM's disk path - out, err := m.runSSH(ctx, fmt.Sprintf("virsh domblklist %s --details", escapedSourceVM)) - if err != nil { - return DomainRef{}, fmt.Errorf("lookup source VM %q: %w", sourceVMName, err) - } - - basePath := "" - lines := strings.Split(out, "\n") - for _, line := range lines { - fields := strings.Fields(line) - if len(fields) >= 4 && fields[0] == "file" && fields[1] == "disk" { - basePath = fields[3] - break - } - } - if basePath == "" { - return DomainRef{}, fmt.Errorf("could not find disk path for source VM %q", sourceVMName) - } - - // Validate and escape paths - jobDir := fmt.Sprintf("%s/%s", m.cfg.WorkDir, newVMName) - escapedJobDir, err := shellEscape(jobDir) - if err != nil { - return DomainRef{}, fmt.Errorf("invalid job directory path: %w", err) - } - - // Create job directory on remote host - if _, err := m.runSSH(ctx, fmt.Sprintf("mkdir -p %s", escapedJobDir)); err != nil { - return DomainRef{}, fmt.Errorf("create job dir: %w", err) - } - - // Create overlay disk - overlayPath := fmt.Sprintf("%s/disk-overlay.qcow2", jobDir) - escapedBasePath, err := shellEscape(basePath) - if err != nil { - return DomainRef{}, fmt.Errorf("invalid base path: %w", err) - } - escapedOverlayPath, err := shellEscape(overlayPath) - if err != nil { - return DomainRef{}, fmt.Errorf("invalid overlay path: %w", err) - } - if _, err := m.runSSH(ctx, fmt.Sprintf("qemu-img create -f qcow2 -F qcow2 -b %s %s", - escapedBasePath, escapedOverlayPath)); err != nil { - return DomainRef{}, fmt.Errorf("create overlay: %w", err) - } - - // Generate a unique cloud-init ISO for the cloned VM on the remote host - // This ensures the clone gets a new instance-id and DHCP network config - cloudInitISO := fmt.Sprintf("%s/cloud-init.iso", jobDir) - if err := m.buildCloudInitSeedOnRemote(ctx, newVMName, jobDir, cloudInitISO); err != nil { - // Log warning but don't fail - VM might still work if source didn't use cloud-init - m.logger.Warn("failed to build cloud-init seed for clone, continuing without it", - "vm", newVMName, - "error", err, - ) - cloudInitISO = "" // Don't try to attach a non-existent ISO - } - - // Dump source VM XML and modify it - sourceXML, err := m.runSSH(ctx, fmt.Sprintf("virsh dumpxml %s", escapedSourceVM)) - if err != nil { - return DomainRef{}, fmt.Errorf("dumpxml source vm: %w", err) - } - - newXML, err := modifyClonedXMLHelper(sourceXML, newVMName, overlayPath, cloudInitISO, cpu, memoryMB, network) - if err != nil { - return DomainRef{}, fmt.Errorf("modify cloned xml: %w", err) - } - - // Write domain XML to remote host using base64 to avoid shell escaping issues - xmlPath := fmt.Sprintf("%s/domain.xml", jobDir) - escapedXMLPath, err := shellEscape(xmlPath) - if err != nil { - return DomainRef{}, fmt.Errorf("invalid XML path: %w", err) - } - encodedXML := base64.StdEncoding.EncodeToString([]byte(newXML)) - if _, err := m.runSSH(ctx, fmt.Sprintf("echo %s | base64 -d > %s", encodedXML, escapedXMLPath)); err != nil { - return DomainRef{}, fmt.Errorf("write domain xml: %w", err) - } - - // Define the domain - if _, err := m.runSSH(ctx, fmt.Sprintf("virsh define %s", escapedXMLPath)); err != nil { - return DomainRef{}, fmt.Errorf("virsh define: %w", err) - } - - // Get UUID - out, err = m.runSSH(ctx, fmt.Sprintf("virsh domuuid %s", escapedNewVM)) - if err != nil { - return DomainRef{Name: newVMName}, nil - } - - return DomainRef{Name: newVMName, UUID: strings.TrimSpace(out)}, nil -} - -// InjectSSHKey injects an SSH public key on the remote host. -func (m *RemoteVirshManager) InjectSSHKey(ctx context.Context, sandboxName, username, publicKey string) error { - if sandboxName == "" { - return fmt.Errorf("sandboxName is required") - } - if username == "" { - username = "sandbox" - } - if strings.TrimSpace(publicKey) == "" { - return fmt.Errorf("publicKey is required") - } - - jobDir := fmt.Sprintf("%s/%s", m.cfg.WorkDir, sandboxName) - overlay := fmt.Sprintf("%s/disk-overlay.qcow2", jobDir) - - // Validate inputs for shell escaping - escapedOverlay, err := shellEscape(overlay) - if err != nil { - return fmt.Errorf("invalid overlay path: %w", err) - } - escapedUsername, err := shellEscape(username) - if err != nil { - return fmt.Errorf("invalid username: %w", err) - } - escapedPublicKey, err := shellEscape(publicKey) - if err != nil { - return fmt.Errorf("invalid public key: %w", err) - } - - switch strings.ToLower(m.cfg.SSHKeyInjectMethod) { - case "virt-customize": - cmdArgs := fmt.Sprintf("virt-customize -a %s --run-command 'id -u %s >/dev/null 2>&1 || useradd -m -s /bin/bash %s' --ssh-inject '%s:string:%s'", - escapedOverlay, - escapedUsername, - escapedUsername, - escapedUsername, - escapedPublicKey, - ) - if _, err := m.runSSH(ctx, cmdArgs); err != nil { - return fmt.Errorf("virt-customize inject: %w", err) - } - default: - return fmt.Errorf("unsupported SSHKeyInjectMethod for remote: %s", m.cfg.SSHKeyInjectMethod) - } - return nil -} - -// StartVM starts a VM on the remote host. -func (m *RemoteVirshManager) StartVM(ctx context.Context, vmName string) error { - if vmName == "" { - return fmt.Errorf("vmName is required") - } - - escapedName, err := shellEscape(vmName) - if err != nil { - return fmt.Errorf("invalid VM name: %w", err) - } - - m.logger.Info("starting VM on remote host", - "host", m.host.Name, - "vm_name", vmName, - ) - - _, err = m.runSSH(ctx, fmt.Sprintf("virsh start %s", escapedName)) - if err != nil { - return fmt.Errorf("virsh start: %w", err) - } - return nil -} - -// StopVM stops a VM on the remote host. -func (m *RemoteVirshManager) StopVM(ctx context.Context, vmName string, force bool) error { - if vmName == "" { - return fmt.Errorf("vmName is required") - } - - escapedName, err := shellEscape(vmName) - if err != nil { - return fmt.Errorf("invalid VM name: %w", err) - } - - cmd := "shutdown" - if force { - cmd = "destroy" - } - - _, err = m.runSSH(ctx, fmt.Sprintf("virsh %s %s", cmd, escapedName)) - return err -} - -// DestroyVM destroys and undefines a VM on the remote host. -func (m *RemoteVirshManager) DestroyVM(ctx context.Context, vmName string) error { - if vmName == "" { - return fmt.Errorf("vmName is required") - } - - escapedName, err := shellEscape(vmName) - if err != nil { - return fmt.Errorf("invalid VM name: %w", err) - } - - // Best-effort destroy if running - _, _ = m.runSSH(ctx, fmt.Sprintf("virsh destroy %s", escapedName)) - - // Undefine with --remove-all-storage to force cleanup of associated volumes - if _, err := m.runSSH(ctx, fmt.Sprintf("virsh undefine --remove-all-storage %s", escapedName)); err != nil { - // If --remove-all-storage fails (e.g., old libvirt), try without it - if _, err2 := m.runSSH(ctx, fmt.Sprintf("virsh undefine %s", escapedName)); err2 != nil { - // Continue to remove files even if undefine fails - _ = err2 - } - } - - // Remove workspace - jobDir := fmt.Sprintf("%s/%s", m.cfg.WorkDir, vmName) - escapedJobDir, err := shellEscape(jobDir) - if err != nil { - return fmt.Errorf("invalid job directory path: %w", err) - } - _, _ = m.runSSH(ctx, fmt.Sprintf("rm -rf %s", escapedJobDir)) - - return nil -} - -// CreateSnapshot creates a snapshot on the remote host. -func (m *RemoteVirshManager) CreateSnapshot(ctx context.Context, vmName, snapshotName string, external bool) (SnapshotRef, error) { - if vmName == "" || snapshotName == "" { - return SnapshotRef{}, fmt.Errorf("vmName and snapshotName are required") - } - - escapedVMName, err := shellEscape(vmName) - if err != nil { - return SnapshotRef{}, fmt.Errorf("invalid VM name: %w", err) - } - escapedSnapshotName, err := shellEscape(snapshotName) - if err != nil { - return SnapshotRef{}, fmt.Errorf("invalid snapshot name: %w", err) - } - - if external { - jobDir := fmt.Sprintf("%s/%s", m.cfg.WorkDir, vmName) - snapPath := fmt.Sprintf("%s/snap-%s.qcow2", jobDir, snapshotName) - escapedSnapPath, err := shellEscape(snapPath) - if err != nil { - return SnapshotRef{}, fmt.Errorf("invalid snapshot path: %w", err) - } - args := fmt.Sprintf("virsh snapshot-create-as %s %s --disk-only --atomic --no-metadata --diskspec vda,file=%s", - escapedVMName, escapedSnapshotName, escapedSnapPath) - if _, err := m.runSSH(ctx, args); err != nil { - return SnapshotRef{}, fmt.Errorf("external snapshot create: %w", err) - } - return SnapshotRef{Name: snapshotName, Kind: "EXTERNAL", Ref: snapPath}, nil - } - - if _, err := m.runSSH(ctx, fmt.Sprintf("virsh snapshot-create-as %s %s", - escapedVMName, escapedSnapshotName)); err != nil { - return SnapshotRef{}, fmt.Errorf("internal snapshot create: %w", err) - } - return SnapshotRef{Name: snapshotName, Kind: "INTERNAL", Ref: snapshotName}, nil -} - -// DiffSnapshot returns a diff plan for the remote host. -func (m *RemoteVirshManager) DiffSnapshot(ctx context.Context, vmName, fromSnapshot, toSnapshot string) (*FSComparePlan, error) { - if vmName == "" || fromSnapshot == "" || toSnapshot == "" { - return nil, fmt.Errorf("vmName, fromSnapshot and toSnapshot are required") - } - - plan := &FSComparePlan{ - VMName: vmName, - FromSnapshot: fromSnapshot, - ToSnapshot: toSnapshot, - Notes: []string{"Remote host snapshot diffing - manual intervention required"}, - } - return plan, nil -} - -// GetIPAddress discovers the IP address of a VM on the remote host. -func (m *RemoteVirshManager) GetIPAddress(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - if vmName == "" { - return "", "", fmt.Errorf("vmName is required") - } - - escapedName, err := shellEscape(vmName) - if err != nil { - return "", "", fmt.Errorf("invalid VM name: %w", err) - } - - m.logger.Info("discovering IP on remote host", - "host", m.host.Name, - "vm_name", vmName, - "timeout", timeout, - ) - - deadline := time.Now().Add(timeout) - attempt := 0 - - for { - attempt++ - out, err := m.runSSH(ctx, fmt.Sprintf("virsh domifaddr %s --source lease", escapedName)) - if err == nil { - ip, mac := parseDomIfAddrIPv4WithMACHelper(out) - if ip != "" { - m.logger.Info("IP discovered on remote host", - "host", m.host.Name, - "vm_name", vmName, - "ip", ip, - "mac", mac, - ) - return ip, mac, nil - } - } - - if time.Now().After(deadline) { - break - } - time.Sleep(2 * time.Second) - } - - return "", "", fmt.Errorf("ip address not found within timeout on remote host %s", m.host.Name) -} - -// GetVMState returns the state of a VM on the remote host. -func (m *RemoteVirshManager) GetVMState(ctx context.Context, vmName string) (VMState, error) { - if vmName == "" { - return VMStateUnknown, fmt.Errorf("vmName is required") - } - - escapedName, err := shellEscape(vmName) - if err != nil { - return VMStateUnknown, fmt.Errorf("invalid VM name: %w", err) - } - - out, err := m.runSSH(ctx, fmt.Sprintf("virsh domstate %s", escapedName)) - if err != nil { - return VMStateUnknown, fmt.Errorf("get vm state: %w", err) - } - return parseVMStateHelper(out), nil -} - -// ValidateSourceVM performs pre-flight checks on a source VM on the remote host. -func (m *RemoteVirshManager) ValidateSourceVM(ctx context.Context, vmName string) (*VMValidationResult, error) { - if vmName == "" { - return nil, fmt.Errorf("vmName is required") - } - - escapedName, err := shellEscape(vmName) - if err != nil { - return nil, fmt.Errorf("invalid VM name: %w", err) - } - - result := &VMValidationResult{ - VMName: vmName, - Valid: true, - Warnings: []string{}, - Errors: []string{}, - } - - // Check VM state - state, err := m.GetVMState(ctx, vmName) - if err != nil { - result.Valid = false - result.Errors = append(result.Errors, fmt.Sprintf("Failed to get VM state: %v", err)) - return result, nil - } - result.State = state - - // Check MAC address using domiflist - out, err := m.runSSH(ctx, fmt.Sprintf("virsh domiflist %s", escapedName)) - if err != nil { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not get network interfaces: %v", err)) - } else { - lines := strings.Split(out, "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - if line == "" || strings.HasPrefix(line, "Interface") || strings.HasPrefix(line, "-") { - continue - } - fields := strings.Fields(line) - if len(fields) >= 5 { - mac := fields[4] - if strings.Count(mac, ":") == 5 { - result.MACAddress = mac - result.HasNetwork = true - break - } - } - } - if result.MACAddress == "" { - result.Warnings = append(result.Warnings, - "Could not find MAC address - source VM may not have a network interface") - } - } - - // Check IP address if running - switch state { - case VMStateRunning: - out, err = m.runSSH(ctx, fmt.Sprintf("virsh domifaddr %s --source lease", escapedName)) - if err == nil { - ip, mac := parseDomIfAddrIPv4WithMACHelper(out) - if ip != "" { - result.IPAddress = ip - if mac != "" && result.MACAddress == "" { - result.MACAddress = mac - result.HasNetwork = true - } - } else { - result.Warnings = append(result.Warnings, - "Source VM is running but has no IP address assigned") - result.Warnings = append(result.Warnings, - "This may indicate cloud-init or DHCP issues - cloned sandboxes may also fail to get IPs") - } - } - case VMStateShutOff: - result.Warnings = append(result.Warnings, - "Source VM is shut off - cannot verify network configuration (IP/DHCP)") - } - - return result, nil -} - -// CheckHostResources validates that the remote host has sufficient resources. -func (m *RemoteVirshManager) CheckHostResources(ctx context.Context, requiredCPUs, requiredMemoryMB int) (*ResourceCheckResult, error) { - result := &ResourceCheckResult{ - Valid: true, - RequiredCPUs: requiredCPUs, - RequiredMemoryMB: requiredMemoryMB, - Warnings: []string{}, - Errors: []string{}, - } - - // Check CPUs using virsh nodeinfo - out, err := m.runSSH(ctx, "virsh nodeinfo") - if err == nil { - for _, line := range strings.Split(out, "\n") { - if strings.HasPrefix(line, "CPU(s):") { - fields := strings.Fields(line) - if len(fields) >= 2 { - _, _ = fmt.Sscanf(fields[1], "%d", &result.AvailableCPUs) - result.TotalCPUs = result.AvailableCPUs // Total CPUs on the host - } - } - } - if requiredCPUs > result.AvailableCPUs { - result.Valid = false - result.NeedsCPUApproval = true - result.Errors = append(result.Errors, - fmt.Sprintf("Insufficient CPUs on %s: need %d but only %d available", - m.host.Name, requiredCPUs, result.AvailableCPUs)) - } - } else { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not check CPUs on %s: %v", m.host.Name, err)) - } - - // Check memory using virsh nodememstats - out, err = m.runSSH(ctx, "virsh nodememstats") - if err == nil { - var free, buffers, cached int64 - for _, line := range strings.Split(out, "\n") { - fields := strings.Fields(line) - if len(fields) >= 2 { - var val int64 - _, _ = fmt.Sscanf(fields[len(fields)-2], "%d", &val) - switch { - case strings.Contains(fields[0], "total"): - result.TotalMemoryMB = val / 1024 - case strings.Contains(fields[0], "free"): - free = val - case strings.Contains(fields[0], "buffers"): - buffers = val - case strings.Contains(fields[0], "cached"): - cached = val - } - } - } - // Calculate available as free + buffers + cached - // This is more accurate than just free, as buffers/cached can be reclaimed - result.AvailableMemoryMB = (free + buffers + cached) / 1024 - - if result.TotalMemoryMB > 0 { - if int64(requiredMemoryMB) > result.AvailableMemoryMB { - result.Valid = false - result.NeedsMemoryApproval = true - result.Errors = append(result.Errors, - fmt.Sprintf("Insufficient memory on %s: need %d MB but only %d MB available", - m.host.Name, requiredMemoryMB, result.AvailableMemoryMB)) - } else if float64(requiredMemoryMB) > float64(result.AvailableMemoryMB)*0.8 { - result.NeedsMemoryApproval = true - result.Warnings = append(result.Warnings, - fmt.Sprintf("Low memory warning on %s: requesting %d MB of %d MB available", - m.host.Name, requiredMemoryMB, result.AvailableMemoryMB)) - } - } - } else { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not check memory on %s: %v", m.host.Name, err)) - } - - // Check disk space - workDir := m.cfg.WorkDir - if workDir == "" { - workDir = "/var/lib/libvirt/images/sandboxes" - } - escapedWorkDir, err := shellEscape(workDir) - if err == nil { - out, err = m.runSSH(ctx, fmt.Sprintf("df -m %s | tail -1 | awk '{print $4}'", escapedWorkDir)) - if err == nil { - var available int64 - _, _ = fmt.Sscanf(strings.TrimSpace(out), "%d", &available) - result.AvailableDiskMB = available - - if available < 1024 { - result.Valid = false - result.Errors = append(result.Errors, - fmt.Sprintf("Insufficient disk space on %s: only %d MB available in %s", - m.host.Name, available, workDir)) - } else if available < 10*1024 { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Low disk space warning on %s: only %d MB available in %s", - m.host.Name, available, workDir)) - } - } else { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not check disk space on %s: %v", m.host.Name, err)) - } - } - - return result, nil -} - -// runSSH executes a command on the remote host via SSH. -func (m *RemoteVirshManager) runSSH(ctx context.Context, command string) (string, error) { - sshUser := m.host.SSHUser - if sshUser == "" { - sshUser = "root" - } - sshPort := m.host.SSHPort - if sshPort == 0 { - sshPort = 22 - } - - args := []string{ - "-o", "BatchMode=yes", - "-o", "StrictHostKeyChecking=accept-new", - "-o", "ConnectTimeout=10", - "-p", fmt.Sprintf("%d", sshPort), - fmt.Sprintf("%s@%s", sshUser, m.host.Address), - command, - } - - var stdout, stderr bytes.Buffer - cmd := exec.CommandContext(ctx, "ssh", args...) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - err := cmd.Run() - if err != nil { - errStr := strings.TrimSpace(stderr.String()) - if errStr != "" { - return stdout.String(), fmt.Errorf("%w: %s", err, errStr) - } - return stdout.String(), err - } - - return strings.TrimSpace(stdout.String()), nil -} - -// HostConfig returns the host configuration for this manager. -func (m *RemoteVirshManager) HostConfig() config.HostConfig { - return m.host -} - -// buildCloudInitSeedOnRemote creates a cloud-init ISO on the remote host. -// The key purpose is to provide a NEW instance-id that differs from what's stored -// on the cloned disk. This forces cloud-init to re-run its initialization, -// including network configuration for the clone's new MAC address. -func (m *RemoteVirshManager) buildCloudInitSeedOnRemote(ctx context.Context, vmName, jobDir, outISO string) error { - // Build cloud-init user-data with DHCP networking - userData := `#cloud-config -# Cloud-init config for cloned VMs -# This triggers cloud-init to re-run network configuration - -# Ensure networking is configured via DHCP -network: - version: 2 - ethernets: - id0: - match: - driver: virtio* - dhcp4: true -` - - // If SSH CA is configured, add sandbox user and SSH CA trust - if m.cfg.SSHCAPubKey != "" { - userData += fmt.Sprintf(` -# Create sandbox user for managed SSH credentials -users: - - default - - name: sandbox - sudo: ALL=(ALL) NOPASSWD:ALL - shell: /bin/bash - lock_passwd: true - -# Write SSH CA public key -write_files: - - path: /etc/ssh/ssh_ca.pub - content: | - %s - permissions: '0644' - owner: root:root - -# Configure sshd to trust the CA -runcmd: - - | - if [ -s /etc/ssh/ssh_ca.pub ]; then - if ! grep -q "TrustedUserCAKeys" /etc/ssh/sshd_config; then - echo "TrustedUserCAKeys /etc/ssh/ssh_ca.pub" >> /etc/ssh/sshd_config - systemctl restart sshd || systemctl restart ssh || true - fi - fi -`, m.cfg.SSHCAPubKey) - } - - // Use a unique instance-id based on the VM name - metaData := fmt.Sprintf(`instance-id: %s -local-hostname: %s -`, vmName, vmName) - - // Escape paths for shell - escapedOutISO, err := shellEscape(outISO) - if err != nil { - return fmt.Errorf("invalid ISO path: %w", err) - } - - // Write user-data and meta-data to remote host using base64 - userDataB64 := base64.StdEncoding.EncodeToString([]byte(userData)) - metaDataB64 := base64.StdEncoding.EncodeToString([]byte(metaData)) - - userDataPath := fmt.Sprintf("%s/user-data", jobDir) - metaDataPath := fmt.Sprintf("%s/meta-data", jobDir) - escapedUserDataPath, err := shellEscape(userDataPath) - if err != nil { - return fmt.Errorf("invalid user-data path: %w", err) - } - escapedMetaDataPath, err := shellEscape(metaDataPath) - if err != nil { - return fmt.Errorf("invalid meta-data path: %w", err) - } - - if _, err := m.runSSH(ctx, fmt.Sprintf("echo %s | base64 -d > %s", userDataB64, escapedUserDataPath)); err != nil { - return fmt.Errorf("write user-data: %w", err) - } - if _, err := m.runSSH(ctx, fmt.Sprintf("echo %s | base64 -d > %s", metaDataB64, escapedMetaDataPath)); err != nil { - return fmt.Errorf("write meta-data: %w", err) - } - - // Try cloud-localds first, then genisoimage, then mkisofs - isoCmd := fmt.Sprintf(` -if command -v cloud-localds >/dev/null 2>&1; then - cloud-localds %s %s %s -elif command -v genisoimage >/dev/null 2>&1; then - genisoimage -output %s -volid cidata -joliet -rock %s %s -elif command -v mkisofs >/dev/null 2>&1; then - mkisofs -output %s -V cidata -J -R %s %s -else - echo "No ISO creation tool found" >&2 - exit 1 -fi -`, escapedOutISO, escapedUserDataPath, escapedMetaDataPath, - escapedOutISO, escapedUserDataPath, escapedMetaDataPath, - escapedOutISO, escapedUserDataPath, escapedMetaDataPath) - - if _, err := m.runSSH(ctx, isoCmd); err != nil { - return fmt.Errorf("create cloud-init ISO: %w", err) - } - - // Verify ISO was created - if _, err := m.runSSH(ctx, fmt.Sprintf("test -f %s", escapedOutISO)); err != nil { - return fmt.Errorf("cloud-init ISO not created at %s", outISO) - } - - m.logger.Info("created cloud-init ISO on remote host", - "host", m.host.Name, - "vm", vmName, - "iso", outISO, - ) - - return nil -} diff --git a/fluid/internal/libvirt/virsh-stub.go b/fluid/internal/libvirt/virsh-stub.go deleted file mode 100755 index 7c6ab4a4..00000000 --- a/fluid/internal/libvirt/virsh-stub.go +++ /dev/null @@ -1,174 +0,0 @@ -//go:build !libvirt - -package libvirt - -import ( - "context" - "errors" - "fmt" - "log/slog" - "os" - "time" - - "github.com/aspectrr/fluid.sh/fluid/internal/provider" -) - -// ErrLibvirtNotAvailable is returned by all stub methods when libvirt support is not compiled in. -var ErrLibvirtNotAvailable = errors.New("libvirt support not available: rebuild with -tags libvirt") - -// Manager is the provider-neutral VM manager interface. -type Manager = provider.Manager - -// Type aliases to provider types - keeps all existing imports working. -type ( - VMValidationResult = provider.VMValidationResult - ResourceCheckResult = provider.ResourceCheckResult - VMState = provider.VMState - DomainRef = provider.VMRef - SnapshotRef = provider.SnapshotRef - FSComparePlan = provider.FSComparePlan -) - -// Forward VMState constants. -const ( - VMStateRunning = provider.VMStateRunning - VMStatePaused = provider.VMStatePaused - VMStateShutOff = provider.VMStateShutOff - VMStateCrashed = provider.VMStateCrashed - VMStateSuspended = provider.VMStateSuspended - VMStateUnknown = provider.VMStateUnknown -) - -// Config controls how the virsh-based manager interacts with the host. -type Config struct { - LibvirtURI string // e.g., qemu:///system - BaseImageDir string // e.g., /var/lib/libvirt/images/base - WorkDir string // e.g., /var/lib/libvirt/images/jobs - DefaultNetwork string // e.g., default - SSHKeyInjectMethod string // "virt-customize" or "cloud-init" - CloudInitMetaTemplate string // optional meta-data template for cloud-init seed - - // SSH CA public key for managed credentials. - SSHCAPubKey string - - // SSH ProxyJump host for reaching VMs on an isolated network. - SSHProxyJump string - - // Optional explicit paths to binaries; if empty these are looked up in PATH. - VirshPath string - QemuImgPath string - VirtCustomizePath string - QemuNbdPath string - - // Socket VMNet configuration (macOS only) - SocketVMNetWrapper string // e.g., /path/to/qemu-socket-vmnet-wrapper.sh - - // Domain defaults - DefaultVCPUs int - DefaultMemoryMB int -} - -// VirshManager implements Manager using virsh/qemu-img/qemu-nbd/virt-customize and simple domain XML. -// This is a stub implementation that returns errors when libvirt is not available. -type VirshManager struct { - cfg Config - logger *slog.Logger -} - -// ConfigFromEnv returns a Config populated from environment variables. -func ConfigFromEnv() Config { - return Config{ - LibvirtURI: os.Getenv("LIBVIRT_URI"), - BaseImageDir: os.Getenv("BASE_IMAGE_DIR"), - WorkDir: os.Getenv("SANDBOX_WORKDIR"), - DefaultNetwork: os.Getenv("LIBVIRT_NETWORK"), - SSHKeyInjectMethod: os.Getenv("SSH_KEY_INJECT_METHOD"), - } -} - -// NewVirshManager creates a new VirshManager with the provided config. -// Note: This stub implementation will return errors for all operations. -func NewVirshManager(cfg Config, logger *slog.Logger) *VirshManager { - return &VirshManager{cfg: cfg, logger: logger} -} - -// NewFromEnv builds a Config from environment variables and returns a manager. -// Note: This stub implementation will return errors for all operations. -func NewFromEnv() *VirshManager { - cfg := Config{ - DefaultVCPUs: 2, - DefaultMemoryMB: 2048, - } - return NewVirshManager(cfg, nil) -} - -// CloneVM is a stub that returns an error when libvirt is not available. -func (m *VirshManager) CloneVM(ctx context.Context, baseImage, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) { - return DomainRef{}, ErrLibvirtNotAvailable -} - -// CloneFromVM is a stub that returns an error when libvirt is not available. -func (m *VirshManager) CloneFromVM(ctx context.Context, sourceVMName, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) { - return DomainRef{}, ErrLibvirtNotAvailable -} - -// InjectSSHKey is a stub that returns an error when libvirt is not available. -func (m *VirshManager) InjectSSHKey(ctx context.Context, sandboxName, username, publicKey string) error { - return ErrLibvirtNotAvailable -} - -// StartVM is a stub that returns an error when libvirt is not available. -func (m *VirshManager) StartVM(ctx context.Context, vmName string) error { - return ErrLibvirtNotAvailable -} - -// StopVM is a stub that returns an error when libvirt is not available. -func (m *VirshManager) StopVM(ctx context.Context, vmName string, force bool) error { - return ErrLibvirtNotAvailable -} - -// DestroyVM is a stub that returns an error when libvirt is not available. -func (m *VirshManager) DestroyVM(ctx context.Context, vmName string) error { - return ErrLibvirtNotAvailable -} - -// CreateSnapshot is a stub that returns an error when libvirt is not available. -func (m *VirshManager) CreateSnapshot(ctx context.Context, vmName, snapshotName string, external bool) (SnapshotRef, error) { - return SnapshotRef{}, ErrLibvirtNotAvailable -} - -// DiffSnapshot is a stub that returns an error when libvirt is not available. -func (m *VirshManager) DiffSnapshot(ctx context.Context, vmName, fromSnapshot, toSnapshot string) (*FSComparePlan, error) { - return nil, ErrLibvirtNotAvailable -} - -// GetIPAddress is a stub that returns an error when libvirt is not available. -func (m *VirshManager) GetIPAddress(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "", "", ErrLibvirtNotAvailable -} - -// GetVMState is a stub that returns an error when libvirt is not available. -func (m *VirshManager) GetVMState(ctx context.Context, vmName string) (VMState, error) { - return VMStateUnknown, ErrLibvirtNotAvailable -} - -// GetVMMAC is a stub that returns an error when libvirt is not available. -func (m *VirshManager) GetVMMAC(ctx context.Context, vmName string) (string, error) { - return "", ErrLibvirtNotAvailable -} - -// ReleaseDHCPLease is a stub that returns an error when libvirt is not available. -func (m *VirshManager) ReleaseDHCPLease(ctx context.Context, network, mac string) error { - return ErrLibvirtNotAvailable -} - -// ValidateSourceVM is a stub that returns an error when libvirt is not available. -func (m *VirshManager) ValidateSourceVM(ctx context.Context, vmName string) (*VMValidationResult, error) { - return nil, ErrLibvirtNotAvailable -} - -// CheckHostResources validates that the host has sufficient resources for a new sandbox. -// Returns a ResourceCheckResult with available resources and any warnings. -func (m *VirshManager) CheckHostResources(ctx context.Context, requiredCPUs, requiredMemoryMB int) (*ResourceCheckResult, error) { - return nil, fmt.Errorf("CheckHostResources not implemented in stub") -} diff --git a/fluid/internal/libvirt/virsh.go b/fluid/internal/libvirt/virsh.go deleted file mode 100755 index 3ad4ac72..00000000 --- a/fluid/internal/libvirt/virsh.go +++ /dev/null @@ -1,2137 +0,0 @@ -//go:build libvirt - -package libvirt - -import ( - "bytes" - "context" - "crypto/rand" - "fmt" - "log" - "log/slog" - "os" - "os/exec" - "path/filepath" - "strings" - "text/template" - "time" - - "github.com/aspectrr/fluid.sh/fluid/internal/provider" - "github.com/beevik/etree" -) - -// generateMACAddress generates a random MAC address with the locally administered bit set. -// Uses the 52:54:00 prefix which is commonly used by QEMU/KVM. -func generateMACAddress() string { - buf := make([]byte, 3) - _, _ = rand.Read(buf) - return fmt.Sprintf("52:54:00:%02x:%02x:%02x", buf[0], buf[1], buf[2]) -} - -// Manager is the provider-neutral VM manager interface. -type Manager = provider.Manager - -// Type aliases to provider types - keeps all existing imports working. -type ( - VMValidationResult = provider.VMValidationResult - ResourceCheckResult = provider.ResourceCheckResult - VMState = provider.VMState - DomainRef = provider.VMRef - SnapshotRef = provider.SnapshotRef - FSComparePlan = provider.FSComparePlan -) - -// Forward VMState constants. -const ( - VMStateRunning = provider.VMStateRunning - VMStatePaused = provider.VMStatePaused - VMStateShutOff = provider.VMStateShutOff - VMStateCrashed = provider.VMStateCrashed - VMStateSuspended = provider.VMStateSuspended - VMStateUnknown = provider.VMStateUnknown -) - -// Config controls how the virsh-based manager interacts with the host. -type Config struct { - LibvirtURI string // e.g., qemu:///system - BaseImageDir string // e.g., /var/lib/libvirt/images/base - WorkDir string // e.g., /var/lib/libvirt/images/jobs - DefaultNetwork string // e.g., default - SSHKeyInjectMethod string // "virt-customize" or "cloud-init" - CloudInitMetaTemplate string // optional meta-data template for cloud-init seed - - // SSH CA public key for managed credentials. - // If set, this will be injected into VMs via cloud-init so they trust - // certificates signed by this CA. - SSHCAPubKey string - - // SSH ProxyJump host for reaching VMs on an isolated network. - // Format: "user@host:port" or just "host" for default user/port. - // If set, SSH commands will use -J flag to proxy through this host. - SSHProxyJump string - - // Optional explicit paths to binaries; if empty these are looked up in PATH. - VirshPath string - QemuImgPath string - VirtCustomizePath string - QemuNbdPath string - - // Socket VMNet configuration (macOS only) - // If DefaultNetwork is "socket_vmnet", this wrapper script is used as the emulator. - // The wrapper should invoke qemu through socket_vmnet_client. - SocketVMNetWrapper string // e.g., /path/to/qemu-socket-vmnet-wrapper.sh - - // Domain defaults - DefaultVCPUs int - DefaultMemoryMB int -} - -// DomainRef, SnapshotRef, FSComparePlan are aliased from provider package above. - -// VirshManager implements Manager using virsh/qemu-img/qemu-nbd/virt-customize and simple domain XML. -type VirshManager struct { - cfg Config - logger *slog.Logger -} - -// NewVirshManager creates a new VirshManager with the provided config and optional logger. -// If logger is nil, slog.Default() is used. -func NewVirshManager(cfg Config, logger *slog.Logger) *VirshManager { - // Fill sensible defaults - if cfg.DefaultVCPUs == 0 { - cfg.DefaultVCPUs = 2 - } - if cfg.DefaultMemoryMB == 0 { - cfg.DefaultMemoryMB = 2048 - } - if logger == nil { - logger = slog.Default() - } - return &VirshManager{cfg: cfg, logger: logger} -} - -// NewFromEnv builds a Config from environment variables and returns a manager. -// LIBVIRT_URI, BASE_IMAGE_DIR, SANDBOX_WORKDIR, LIBVIRT_NETWORK, SSH_KEY_INJECT_METHOD -func NewFromEnv() *VirshManager { - cfg := Config{ - LibvirtURI: getenvDefault("LIBVIRT_URI", "qemu:///system"), - BaseImageDir: getenvDefault("BASE_IMAGE_DIR", "/var/lib/libvirt/images/base"), - WorkDir: getenvDefault("SANDBOX_WORKDIR", "/var/lib/libvirt/images/jobs"), - DefaultNetwork: getenvDefault("LIBVIRT_NETWORK", "default"), - SSHKeyInjectMethod: getenvDefault("SSH_KEY_INJECT_METHOD", "virt-customize"), - SSHCAPubKey: readSSHCAPubKey(getenvDefault("SSH_CA_PUB_KEY_PATH", "")), - SSHProxyJump: getenvDefault("SSH_PROXY_JUMP", ""), - DefaultVCPUs: intFromEnv("DEFAULT_VCPUS", 2), - DefaultMemoryMB: intFromEnv("DEFAULT_MEMORY_MB", 2048), - } - return NewVirshManager(cfg, nil) -} - -// ConfigFromEnv returns a Config populated from environment variables. -func ConfigFromEnv() Config { - return Config{ - LibvirtURI: getenvDefault("LIBVIRT_URI", "qemu:///system"), - BaseImageDir: getenvDefault("BASE_IMAGE_DIR", "/var/lib/libvirt/images/base"), - WorkDir: getenvDefault("SANDBOX_WORKDIR", "/var/lib/libvirt/images/jobs"), - DefaultNetwork: getenvDefault("LIBVIRT_NETWORK", "default"), - SSHKeyInjectMethod: getenvDefault("SSH_KEY_INJECT_METHOD", "virt-customize"), - SSHCAPubKey: readSSHCAPubKey(getenvDefault("SSH_CA_PUB_KEY_PATH", "")), - SSHProxyJump: getenvDefault("SSH_PROXY_JUMP", ""), - SocketVMNetWrapper: getenvDefault("SOCKET_VMNET_WRAPPER", ""), - DefaultVCPUs: intFromEnv("DEFAULT_VCPUS", 2), - DefaultMemoryMB: intFromEnv("DEFAULT_MEMORY_MB", 2048), - } -} - -// readSSHCAPubKey reads the SSH CA public key from a file path. -// Returns empty string if path is empty or file cannot be read. -func readSSHCAPubKey(path string) string { - if path == "" { - return "" - } - data, err := os.ReadFile(path) - if err != nil { - return "" - } - return strings.TrimSpace(string(data)) -} - -func (m *VirshManager) CloneVM(ctx context.Context, baseImage, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) { - if newVMName == "" { - return DomainRef{}, fmt.Errorf("new VM name is required") - } - if baseImage == "" { - return DomainRef{}, fmt.Errorf("base image is required") - } - if cpu <= 0 { - cpu = m.cfg.DefaultVCPUs - } - if memoryMB <= 0 { - memoryMB = m.cfg.DefaultMemoryMB - } - if network == "" { - network = m.cfg.DefaultNetwork - } - - basePath := filepath.Join(m.cfg.BaseImageDir, baseImage) - if _, err := os.Stat(basePath); err != nil { - return DomainRef{}, fmt.Errorf("base image not accessible: %s: %w", basePath, err) - } - - jobDir := filepath.Join(m.cfg.WorkDir, newVMName) - if err := os.MkdirAll(jobDir, 0o755); err != nil { - return DomainRef{}, fmt.Errorf("create job dir: %w", err) - } - - overlayPath := filepath.Join(jobDir, "disk-overlay.qcow2") - qemuImg := m.binPath("qemu-img", m.cfg.QemuImgPath) - if _, err := m.run(ctx, qemuImg, "create", "-f", "qcow2", "-F", "qcow2", "-b", basePath, overlayPath); err != nil { - return DomainRef{}, fmt.Errorf("create overlay: %w", err) - } - - // Create minimal domain XML referencing overlay disk and network. - xmlPath := filepath.Join(jobDir, "domain.xml") - xml, err := renderDomainXML(domainXMLParams{ - Name: newVMName, - MemoryMB: memoryMB, - VCPUs: cpu, - DiskPath: overlayPath, - Network: network, - BootOrder: []string{"hd", "cdrom", "network"}, - }) - log.Println("Generated domain XML:", xml) - if err != nil { - return DomainRef{}, fmt.Errorf("render domain xml: %w", err) - } - if err := os.WriteFile(xmlPath, []byte(xml), 0o644); err != nil { - return DomainRef{}, fmt.Errorf("write domain xml: %w", err) - } - - // virsh define - virsh := m.binPath("virsh", m.cfg.VirshPath) - if _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "define", xmlPath); err != nil { - return DomainRef{}, fmt.Errorf("virsh define: %w", err) - } - - // Fetch UUID - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domuuid", newVMName) - if err != nil { - // Best-effort: If domuuid fails, we still return Name. - return DomainRef{Name: newVMName}, nil - } - return DomainRef{Name: newVMName, UUID: strings.TrimSpace(out)}, nil -} - -// CloneFromVM creates a linked-clone VM from an existing VM's disk. -// It looks up the source VM by name, retrieves its disk path, and creates an overlay. -func (m *VirshManager) CloneFromVM(ctx context.Context, sourceVMName, newVMName string, cpu, memoryMB int, network string) (DomainRef, error) { - if newVMName == "" { - return DomainRef{}, fmt.Errorf("new VM name is required") - } - if sourceVMName == "" { - return DomainRef{}, fmt.Errorf("source VM name is required") - } - if cpu <= 0 { - cpu = m.cfg.DefaultVCPUs - } - if memoryMB <= 0 { - memoryMB = m.cfg.DefaultMemoryMB - } - if network == "" { - network = m.cfg.DefaultNetwork - } - - // Look up the source VM's disk path using virsh domblklist - virsh := m.binPath("virsh", m.cfg.VirshPath) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domblklist", sourceVMName, "--details") - if err != nil { - return DomainRef{}, fmt.Errorf("lookup source VM %q: %w", sourceVMName, err) - } - - // Parse domblklist output to find the disk path - basePath := "" - lines := strings.Split(out, "\n") - for _, line := range lines { - fields := strings.Fields(line) - if len(fields) >= 4 && fields[0] == "file" && fields[1] == "disk" { - basePath = fields[3] - break - } - } - if basePath == "" { - return DomainRef{}, fmt.Errorf("could not find disk path for source VM %q", sourceVMName) - } - - // Verify the disk exists - if _, err := os.Stat(basePath); err != nil { - return DomainRef{}, fmt.Errorf("source VM disk not accessible: %s: %w", basePath, err) - } - - jobDir := filepath.Join(m.cfg.WorkDir, newVMName) - if err := os.MkdirAll(jobDir, 0o755); err != nil { - return DomainRef{}, fmt.Errorf("create job dir: %w", err) - } - - overlayPath := filepath.Join(jobDir, "disk-overlay.qcow2") - qemuImg := m.binPath("qemu-img", m.cfg.QemuImgPath) - if _, err := m.run(ctx, qemuImg, "create", "-f", "qcow2", "-F", "qcow2", "-b", basePath, overlayPath); err != nil { - return DomainRef{}, fmt.Errorf("create overlay: %w", err) - } - - // Generate a unique cloud-init ISO for the cloned VM - // This ensures the clone gets a new instance-id and DHCP network config, - // regardless of how the source VM's cloud-init was configured (static IP, MAC, etc.) - cloudInitISO := filepath.Join(jobDir, "cloud-init.iso") - if err := m.buildCloudInitSeedForClone(ctx, newVMName, cloudInitISO); err != nil { - // Log warning but don't fail - VM might still work if source didn't use cloud-init - log.Printf("WARNING: failed to build cloud-init seed for clone %s: %v", newVMName, err) - cloudInitISO = "" // Don't try to attach a non-existent ISO - } - - // Dump the source VM's XML and modify it for the new VM - sourceXML, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "dumpxml", sourceVMName) - if err != nil { - return DomainRef{}, fmt.Errorf("dumpxml source vm: %w", err) - } - - newXML, err := modifyClonedXMLHelper(sourceXML, newVMName, overlayPath, cloudInitISO, cpu, memoryMB, network) - if err != nil { - return DomainRef{}, fmt.Errorf("modify cloned xml: %w", err) - } - - xmlPath := filepath.Join(jobDir, "domain.xml") - if err := os.WriteFile(xmlPath, []byte(newXML), 0o644); err != nil { - return DomainRef{}, fmt.Errorf("write domain xml: %w", err) - } - - // virsh define - if _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "define", xmlPath); err != nil { - return DomainRef{}, fmt.Errorf("virsh define: %w", err) - } - - // Fetch UUID - out, err = m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domuuid", newVMName) - if err != nil { - return DomainRef{Name: newVMName}, nil - } - return DomainRef{Name: newVMName, UUID: strings.TrimSpace(out)}, nil -} - -// modifyClonedXML takes the XML from a source domain and adapts it for a new cloned domain. -// It sets a new name, UUID, disk path, MAC address, and cloud-init ISO path. It removes the -//
element from the network interface to prevent PCI slot conflicts. -// If cloudInitISO is provided, any existing CDROM device is updated to use it, ensuring the -// cloned VM gets a unique instance-id and fresh network configuration via cloud-init. -func modifyClonedXML(sourceXML, newName, newDiskPath, cloudInitISO string) (string, error) { - doc := etree.NewDocument() - if err := doc.ReadFromString(sourceXML); err != nil { - return "", fmt.Errorf("parse source XML: %w", err) - } - - root := doc.Root() - if root == nil { - return "", fmt.Errorf("invalid XML: no root element") - } - - // Update VM name - nameElem := root.SelectElement("name") - if nameElem == nil { - return "", fmt.Errorf("invalid XML: missing element") - } - nameElem.SetText(newName) - - // Remove UUID - if uuidElem := root.SelectElement("uuid"); uuidElem != nil { - root.RemoveChild(uuidElem) - } - - // Update disk path for the main virtual disk (vda) - // This finds the first disk with a virtio bus and assumes it's the one to replace. - // This might need to be more robust if multiple virtio disks are present. - var diskReplaced bool - for _, disk := range root.FindElements("./devices/disk[@device='disk']") { - if target := disk.SelectElement("target"); target != nil { - if bus := target.SelectAttr("bus"); bus != nil && bus.Value == "virtio" { - if source := disk.SelectElement("source"); source != nil { - source.SelectAttr("file").Value = newDiskPath - diskReplaced = true - break - } - } - } - } - if !diskReplaced { - return "", fmt.Errorf("could not find a virtio disk in the source XML to replace") - } - - // Handle cloud-init CDROM: update existing or add new one - // This is critical for cloned VMs - they need a unique instance-id to trigger - // cloud-init re-initialization, including DHCP network configuration - if cloudInitISO != "" { - devices := root.SelectElement("devices") - if devices == nil { - return "", fmt.Errorf("invalid XML: missing element") - } - - // Find any existing CDROM device (not just ones with source files) - existingCDROMs := root.FindElements("./devices/disk[@device='cdrom']") - - if len(existingCDROMs) > 0 { - // Update first existing CDROM - cdrom := existingCDROMs[0] - if source := cdrom.SelectElement("source"); source != nil { - // Update existing source element - if fileAttr := source.SelectAttr("file"); fileAttr != nil { - fileAttr.Value = cloudInitISO - } else { - source.CreateAttr("file", cloudInitISO) - } - } else { - // Create source element if missing - source = cdrom.CreateElement("source") - source.CreateAttr("file", cloudInitISO) - } - } else { - // No existing CDROM - add new one with SCSI controller - hasScsiController := false - for _, ctrl := range root.FindElements("./devices/controller[@type='scsi']") { - if model := ctrl.SelectAttr("model"); model != nil && model.Value == "virtio-scsi" { - hasScsiController = true - break - } - } - if !hasScsiController { - scsiCtrl := devices.CreateElement("controller") - scsiCtrl.CreateAttr("type", "scsi") - scsiCtrl.CreateAttr("model", "virtio-scsi") - } - - cdrom := devices.CreateElement("disk") - cdrom.CreateAttr("type", "file") - cdrom.CreateAttr("device", "cdrom") - - driver := cdrom.CreateElement("driver") - driver.CreateAttr("name", "qemu") - driver.CreateAttr("type", "raw") - - source := cdrom.CreateElement("source") - source.CreateAttr("file", cloudInitISO) - - target := cdrom.CreateElement("target") - target.CreateAttr("dev", "sda") - target.CreateAttr("bus", "scsi") - - cdrom.CreateElement("readonly") - } - } - - // Update network interface: set new MAC and remove PCI address - if iface := root.FindElement("./devices/interface"); iface != nil { - // This handles standard libvirt network interfaces. - // Set a new MAC address - macElem := iface.SelectElement("mac") - if macElem != nil { - if addrAttr := macElem.SelectAttr("address"); addrAttr != nil { - addrAttr.Value = generateMACAddress() - } - } else { - // If no element, create one - macElem = iface.CreateElement("mac") - macElem.CreateAttr("address", generateMACAddress()) - } - - // Remove the address element to let libvirt assign a new one - if addrElem := iface.SelectElement("address"); addrElem != nil { - iface.RemoveChild(addrElem) - } - } else { - // Handle socket_vmnet case (qemu:commandline) - // The namespace makes selection tricky, so we iterate. - var cmdline *etree.Element - for _, child := range root.ChildElements() { - if child.Tag == "commandline" && child.Space == "qemu" { - cmdline = child - break - } - } - - if cmdline != nil { - for _, child := range cmdline.ChildElements() { - if child.Tag == "arg" && child.Space == "qemu" { - if valAttr := child.SelectAttr("value"); valAttr != nil { - if strings.HasPrefix(valAttr.Value, "virtio-net-pci") && strings.Contains(valAttr.Value, "mac=") { - parts := strings.Split(valAttr.Value, ",") - newParts := make([]string, 0, len(parts)) - macUpdated := false - for _, part := range parts { - if strings.HasPrefix(part, "mac=") { - newParts = append(newParts, "mac="+generateMACAddress()) - macUpdated = true - } else { - newParts = append(newParts, part) - } - } - if macUpdated { - valAttr.Value = strings.Join(newParts, ",") - break // Assuming only one network device per command line - } - } - } - } - } - } - } - - // Remove existing graphics password - if graphics := root.FindElement("./devices/graphics"); graphics != nil { - graphics.RemoveAttr("passwd") - } - - // Remove existing sound devices - for _, sound := range root.FindElements("./devices/sound") { - root.SelectElement("devices").RemoveChild(sound) - } - - doc.Indent(2) - newXML, err := doc.WriteToString() - if err != nil { - return "", fmt.Errorf("failed to write modified XML: %w", err) - } - - return newXML, nil -} - -func (m *VirshManager) InjectSSHKey(ctx context.Context, sandboxName, username, publicKey string) error { - if sandboxName == "" { - return fmt.Errorf("sandboxName is required") - } - if username == "" { - username = defaultGuestUser(sandboxName) - } - if strings.TrimSpace(publicKey) == "" { - return fmt.Errorf("publicKey is required") - } - - jobDir := filepath.Join(m.cfg.WorkDir, sandboxName) - overlay := filepath.Join(jobDir, "disk-overlay.qcow2") - if _, err := os.Stat(overlay); err != nil { - return fmt.Errorf("overlay not found for VM %s: %w", sandboxName, err) - } - - switch strings.ToLower(m.cfg.SSHKeyInjectMethod) { - case "virt-customize": - // Requires libguestfs tools on host. - virtCustomize := m.binPath("virt-customize", m.cfg.VirtCustomizePath) - // Ensure account exists and inject key. This is offline before first boot. - cmdArgs := []string{ - "-a", overlay, - "--run-command", fmt.Sprintf("id -u %s >/dev/null 2>&1 || useradd -m -s /bin/bash %s", shEscape(username), shEscape(username)), - "--ssh-inject", fmt.Sprintf("%s:string:%s", username, publicKey), - } - if _, err := m.run(ctx, virtCustomize, cmdArgs...); err != nil { - return fmt.Errorf("virt-customize inject: %w", err) - } - case "cloud-init": - // Build a NoCloud seed with the provided key and attach as CD-ROM. - seedISO := filepath.Join(jobDir, "seed.iso") - if err := m.buildCloudInitSeed(ctx, sandboxName, username, publicKey, seedISO); err != nil { - return fmt.Errorf("build cloud-init seed: %w", err) - } - // Attach seed ISO to domain XML (adds a CDROM) and redefine the domain. - xmlPath := filepath.Join(jobDir, "domain.xml") - if err := m.attachISOToDomainXML(xmlPath, seedISO); err != nil { - return fmt.Errorf("attach seed iso to domain xml: %w", err) - } - virsh := m.binPath("virsh", m.cfg.VirshPath) - if _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "define", xmlPath); err != nil { - return fmt.Errorf("re-define domain with seed: %w", err) - } - default: - return fmt.Errorf("unsupported SSHKeyInjectMethod: %s", m.cfg.SSHKeyInjectMethod) - } - return nil -} - -func (m *VirshManager) StartVM(ctx context.Context, vmName string) error { - if vmName == "" { - return fmt.Errorf("vmName is required") - } - - m.logger.Info("starting VM", - "vm_name", vmName, - "libvirt_uri", m.cfg.LibvirtURI, - ) - - virsh := m.binPath("virsh", m.cfg.VirshPath) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "start", vmName) - if err != nil { - m.logger.Error("failed to start VM", - "vm_name", vmName, - "error", err, - "output", out, - ) - return err - } - - m.logger.Debug("virsh start command completed", - "vm_name", vmName, - "output", out, - ) - - // Verify VM actually started by checking state - state, stateErr := m.GetVMState(ctx, vmName) - if stateErr != nil { - m.logger.Warn("unable to verify VM state after start", - "vm_name", vmName, - "error", stateErr, - ) - } else { - m.logger.Info("VM state after start command", - "vm_name", vmName, - "state", state, - ) - if state != VMStateRunning { - m.logger.Warn("VM not in running state after start command", - "vm_name", vmName, - "actual_state", state, - "expected_state", VMStateRunning, - "hint", "On ARM Macs with Lima, VMs may fail to start due to CPU mode limitations", - ) - } - } - - return nil -} - -// GetVMState returns the current state of a VM using virsh domstate. -func (m *VirshManager) GetVMState(ctx context.Context, vmName string) (VMState, error) { - if vmName == "" { - return VMStateUnknown, fmt.Errorf("vmName is required") - } - virsh := m.binPath("virsh", m.cfg.VirshPath) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domstate", vmName) - if err != nil { - return VMStateUnknown, fmt.Errorf("get vm state: %w", err) - } - return parseVMState(out), nil -} - -// parseVMState converts virsh domstate output to VMState. -func parseVMState(output string) VMState { - state := strings.TrimSpace(output) - switch state { - case "running": - return VMStateRunning - case "paused": - return VMStatePaused - case "shut off": - return VMStateShutOff - case "crashed": - return VMStateCrashed - case "pmsuspended": - return VMStateSuspended - default: - return VMStateUnknown - } -} - -// GetVMMAC returns the MAC address of the VM's primary network interface. -// This is useful for DHCP lease management. -func (m *VirshManager) GetVMMAC(ctx context.Context, vmName string) (string, error) { - if vmName == "" { - return "", fmt.Errorf("vmName is required") - } - virsh := m.binPath("virsh", m.cfg.VirshPath) - - // Use domiflist to get interface info (works even if VM is not running) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domiflist", vmName) - if err != nil { - return "", fmt.Errorf("get vm interfaces: %w", err) - } - - // Parse domiflist output: - // Interface Type Source Model MAC - // ------------------------------------------------------- - // - network default virtio 52:54:00:6b:3c:86 - lines := strings.Split(out, "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - if line == "" || strings.HasPrefix(line, "Interface") || strings.HasPrefix(line, "-") { - continue - } - fields := strings.Fields(line) - if len(fields) >= 5 { - mac := fields[4] - // Validate MAC format - if strings.Count(mac, ":") == 5 { - return mac, nil - } - } - } - return "", fmt.Errorf("no MAC address found for VM %s", vmName) -} - -// ReleaseDHCPLease attempts to release the DHCP lease for a given MAC address. -// This helps prevent IP conflicts when VMs are rapidly created and destroyed. -// It tries multiple methods: -// 1. Remove static DHCP host entry (if any) -// 2. Use dhcp_release utility to release dynamic lease -// 3. Remove from lease file directly as fallback -func (m *VirshManager) ReleaseDHCPLease(ctx context.Context, network, mac string) error { - if network == "" { - network = m.cfg.DefaultNetwork - } - if mac == "" { - return fmt.Errorf("MAC address is required") - } - - virsh := m.binPath("virsh", m.cfg.VirshPath) - - // Try to remove any static DHCP host entry (if exists) - // This is a best-effort operation - it may fail if no static entry exists - hostXML := fmt.Sprintf("", mac) - _, _ = m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, - "net-update", network, "delete", "ip-dhcp-host", hostXML, "--live", "--config") - - // Get the bridge interface name for the network (e.g., virbr0) - bridgeName, ip := m.getNetworkBridgeAndLeaseIP(ctx, network, mac) - - if bridgeName != "" && ip != "" { - // Try dhcp_release utility first (cleanest method) - // dhcp_release - if _, err := m.run(ctx, "dhcp_release", bridgeName, ip, mac); err == nil { - m.logger.Info("released DHCP lease via dhcp_release", - "network", network, - "bridge", bridgeName, - "ip", ip, - "mac", mac, - ) - return nil - } - - // Fallback: try to remove from lease file directly - if err := m.removeLeaseFromFile(network, mac); err == nil { - m.logger.Info("removed DHCP lease from lease file", - "network", network, - "mac", mac, - ) - return nil - } - } - - m.logger.Debug("DHCP lease release attempted (may not have fully succeeded)", - "network", network, - "mac", mac, - ) - - return nil -} - -// getNetworkBridgeAndLeaseIP returns the bridge interface name and leased IP for a MAC address. -func (m *VirshManager) getNetworkBridgeAndLeaseIP(ctx context.Context, network, mac string) (bridge, ip string) { - virsh := m.binPath("virsh", m.cfg.VirshPath) - - // Get bridge name from network XML - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "net-info", network) - if err == nil { - for _, line := range strings.Split(out, "\n") { - if strings.HasPrefix(line, "Bridge:") { - parts := strings.Fields(line) - if len(parts) >= 2 { - bridge = parts[1] - } - } - } - } - - // Get IP from DHCP leases - out, err = m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "net-dhcp-leases", network) - if err == nil { - // Parse output: - // Expiry Time MAC address Protocol IP address Hostname Client ID - // 2024-01-08 12:00:00 52:54:00:6b:3c:86 ipv4 192.168.122.63/24 vm-name - - for _, line := range strings.Split(out, "\n") { - if strings.Contains(line, mac) { - fields := strings.Fields(line) - // Fields: [date, time, mac, protocol, ip/cidr, hostname, clientid] - if len(fields) >= 5 { - ipCIDR := fields[4] - if idx := strings.Index(ipCIDR, "/"); idx > 0 { - ip = ipCIDR[:idx] - } else { - ip = ipCIDR - } - } - } - } - } - - return bridge, ip -} - -// removeLeaseFromFile removes a DHCP lease entry from the dnsmasq lease file. -func (m *VirshManager) removeLeaseFromFile(network, mac string) error { - // Lease file is typically at /var/lib/libvirt/dnsmasq/.leases - leaseFile := fmt.Sprintf("/var/lib/libvirt/dnsmasq/%s.leases", network) - - data, err := os.ReadFile(leaseFile) - if err != nil { - return fmt.Errorf("read lease file: %w", err) - } - - // Lease file format: - // Example: 1704672000 52:54:00:6b:3c:86 192.168.122.63 vm-name * - var newLines []string - found := false - for _, line := range strings.Split(string(data), "\n") { - if strings.TrimSpace(line) == "" { - continue - } - if strings.Contains(line, mac) { - found = true - continue // Skip this line (remove the lease) - } - newLines = append(newLines, line) - } - - if !found { - return fmt.Errorf("lease not found for MAC %s", mac) - } - - // Write back the modified lease file - newData := strings.Join(newLines, "\n") - if len(newLines) > 0 { - newData += "\n" - } - if err := os.WriteFile(leaseFile, []byte(newData), 0o644); err != nil { - return fmt.Errorf("write lease file: %w", err) - } - - return nil -} - -func (m *VirshManager) StopVM(ctx context.Context, vmName string, force bool) error { - if vmName == "" { - return fmt.Errorf("vmName is required") - } - virsh := m.binPath("virsh", m.cfg.VirshPath) - if force { - _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "destroy", vmName) - return err - } - _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "shutdown", vmName) - return err -} - -func (m *VirshManager) DestroyVM(ctx context.Context, vmName string) error { - if vmName == "" { - return fmt.Errorf("vmName is required") - } - - // Get MAC address before destroying (for DHCP lease cleanup) - mac, macErr := m.GetVMMAC(ctx, vmName) - if macErr != nil { - m.logger.Debug("could not get MAC address for DHCP cleanup", - "vm_name", vmName, - "error", macErr, - ) - } - - virsh := m.binPath("virsh", m.cfg.VirshPath) - // Best-effort destroy if running - _, _ = m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "destroy", vmName) - // Undefine with --remove-all-storage to force cleanup of associated volumes - if _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "undefine", "--remove-all-storage", vmName); err != nil { - // If --remove-all-storage fails (e.g., old libvirt), try without it - if _, err2 := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "undefine", vmName); err2 != nil { - // continue to remove files even if undefine fails - _ = err2 - } - } - - // Release DHCP lease to prevent IP conflicts with future VMs - if mac != "" { - if err := m.ReleaseDHCPLease(ctx, m.cfg.DefaultNetwork, mac); err != nil { - m.logger.Debug("failed to release DHCP lease", - "vm_name", vmName, - "mac", mac, - "error", err, - ) - } else { - m.logger.Info("released DHCP lease", - "vm_name", vmName, - "mac", mac, - ) - } - } - - // Remove workspace - jobDir := filepath.Join(m.cfg.WorkDir, vmName) - if err := os.RemoveAll(jobDir); err != nil { - return fmt.Errorf("cleanup job dir: %w", err) - } - return nil -} - -func (m *VirshManager) CreateSnapshot(ctx context.Context, vmName, snapshotName string, external bool) (SnapshotRef, error) { - if vmName == "" || snapshotName == "" { - return SnapshotRef{}, fmt.Errorf("vmName and snapshotName are required") - } - virsh := m.binPath("virsh", m.cfg.VirshPath) - - if external { - // External disk-only snapshot. - jobDir := filepath.Join(m.cfg.WorkDir, vmName) - snapPath := filepath.Join(jobDir, fmt.Sprintf("snap-%s.qcow2", snapshotName)) - // NOTE: This is a simplified attempt; real-world disk-only snapshots may need - // additional options and disk target identification. - args := []string{ - "--connect", m.cfg.LibvirtURI, "snapshot-create-as", vmName, snapshotName, - "--disk-only", "--atomic", "--no-metadata", - "--diskspec", fmt.Sprintf("vda,file=%s", snapPath), - } - if _, err := m.run(ctx, virsh, args...); err != nil { - return SnapshotRef{}, fmt.Errorf("external snapshot create: %w", err) - } - return SnapshotRef{Name: snapshotName, Kind: "EXTERNAL", Ref: snapPath}, nil - } - - // Internal snapshot (managed by libvirt/qemu). - if _, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "snapshot-create-as", vmName, snapshotName); err != nil { - return SnapshotRef{}, fmt.Errorf("internal snapshot create: %w", err) - } - return SnapshotRef{Name: snapshotName, Kind: "INTERNAL", Ref: snapshotName}, nil -} - -func (m *VirshManager) DiffSnapshot(ctx context.Context, vmName, fromSnapshot, toSnapshot string) (*FSComparePlan, error) { - if vmName == "" || fromSnapshot == "" || toSnapshot == "" { - return nil, fmt.Errorf("vmName, fromSnapshot and toSnapshot are required") - } - - // Implementation shell: - // Strategy options: - // 1) For internal snapshots: use qemu-nbd with snapshot selection to mount and diff trees. - // 2) For external snapshots: mount the two qcow2 snapshot files via qemu-nbd. - // - // Because snapshot storage varies, we return advisory plan data and notes. - plan := &FSComparePlan{ - VMName: vmName, - FromSnapshot: fromSnapshot, - ToSnapshot: toSnapshot, - Notes: []string{}, - } - - // Attempt to detect external snapshot files in job dir. - jobDir := filepath.Join(m.cfg.WorkDir, vmName) - fromPath := filepath.Join(jobDir, fmt.Sprintf("snap-%s.qcow2", fromSnapshot)) - toPath := filepath.Join(jobDir, fmt.Sprintf("snap-%s.qcow2", toSnapshot)) - if fileExists(fromPath) && fileExists(toPath) { - plan.FromRef = fromPath - plan.ToRef = toPath - plan.Notes = append(plan.Notes, - "External snapshots detected. You can mount them with qemu-nbd and diff the trees.", - fmt.Sprintf("sudo modprobe nbd max_part=16 && sudo qemu-nbd --connect=/dev/nbd0 %s", shEscape(fromPath)), - fmt.Sprintf("sudo qemu-nbd --connect=/dev/nbd1 %s", shEscape(toPath)), - "sudo mount /dev/nbd0p1 /mnt/from && sudo mount /dev/nbd1p1 /mnt/to", - "Then run: sudo diff -ruN /mnt/from /mnt/to or use rsync --dry-run to list changes.", - "Be sure to umount and disconnect nbd after.", - ) - return plan, nil - } - - // Fallback: internal snapshots guidance. - plan.Notes = append(plan.Notes, - "Internal snapshots assumed. Use qemu-nbd with -s to select snapshot, then mount and diff.", - "For example: qemu-nbd may support --snapshot= (varies by version) or use qemu-img to create temporary exports.", - "Alternatively, boot the VM into each snapshot separately and export filesystem states.", - ) - return plan, nil -} - -func (m *VirshManager) GetIPAddress(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - if vmName == "" { - return "", "", fmt.Errorf("vmName is required") - } - - m.logger.Info("discovering IP address", - "vm_name", vmName, - "timeout", timeout, - "network", m.cfg.DefaultNetwork, - ) - - // First check VM state - if not running, IP discovery will definitely fail - state, stateErr := m.GetVMState(ctx, vmName) - if stateErr == nil && state != VMStateRunning { - m.logger.Warn("attempting IP discovery on non-running VM", - "vm_name", vmName, - "state", state, - "hint", "VM must be in 'running' state to have an IP address", - ) - } - - // For socket_vmnet, use ARP-based discovery - if m.cfg.DefaultNetwork == "socket_vmnet" { - return m.getIPAddressViaARP(ctx, vmName, timeout) - } - - // For regular libvirt networks, use lease-based discovery - return m.getIPAddressViaLease(ctx, vmName, timeout) -} - -// getIPAddressViaLease discovers IP using libvirt DHCP lease information. -// This works for libvirt-managed networks (default, NAT, etc.) -func (m *VirshManager) getIPAddressViaLease(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - virsh := m.binPath("virsh", m.cfg.VirshPath) - deadline := time.Now().Add(timeout) - startTime := time.Now() - attempt := 0 - for { - attempt++ - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domifaddr", vmName, "--source", "lease") - if err == nil { - ip, mac := parseDomIfAddrIPv4WithMAC(out) - if ip != "" { - m.logger.Info("IP address discovered via lease", - "vm_name", vmName, - "ip_address", ip, - "mac_address", mac, - "attempts", attempt, - "elapsed", time.Since(startTime), - ) - return ip, mac, nil - } - } - - // Log progress every 10 attempts (20 seconds) - if attempt%10 == 0 { - m.logger.Debug("IP discovery in progress (lease)", - "vm_name", vmName, - "attempts", attempt, - "elapsed", time.Since(startTime), - "domifaddr_output", out, - ) - } - - if time.Now().After(deadline) { - break - } - time.Sleep(2 * time.Second) - } - - // Final state check for better error message - finalState, _ := m.GetVMState(ctx, vmName) - m.logger.Error("IP address discovery failed (lease)", - "vm_name", vmName, - "timeout", timeout, - "attempts", attempt, - "final_vm_state", finalState, - ) - - return "", "", fmt.Errorf("ip address not found within timeout (VM state: %s)", finalState) -} - -// getIPAddressViaARP discovers IP using ARP table lookup. -// This is used for socket_vmnet on macOS where libvirt doesn't manage DHCP. -func (m *VirshManager) getIPAddressViaARP(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - // First, get the VM's MAC address from the domain XML - mac, err := m.getVMMAC(ctx, vmName) - if err != nil { - m.logger.Error("failed to get VM MAC address for ARP lookup", - "vm_name", vmName, - "error", err, - ) - return "", "", fmt.Errorf("failed to get VM MAC address: %w", err) - } - - m.logger.Info("starting ARP-based IP discovery", - "vm_name", vmName, - "mac_address", mac, - "timeout", timeout, - ) - - deadline := time.Now().Add(timeout) - startTime := time.Now() - attempt := 0 - for { - attempt++ - ip, err := lookupIPByMAC(mac) - if err == nil && ip != "" { - m.logger.Info("IP address discovered via ARP", - "vm_name", vmName, - "ip_address", ip, - "mac_address", mac, - "attempts", attempt, - "elapsed", time.Since(startTime), - ) - return ip, mac, nil - } - - // Log progress every 10 attempts (20 seconds) - if attempt%10 == 0 { - m.logger.Debug("IP discovery in progress (ARP)", - "vm_name", vmName, - "mac_address", mac, - "attempts", attempt, - "elapsed", time.Since(startTime), - ) - } - - if time.Now().After(deadline) { - break - } - time.Sleep(2 * time.Second) - } - - // Final state check for better error message - finalState, _ := m.GetVMState(ctx, vmName) - m.logger.Error("IP address discovery failed (ARP)", - "vm_name", vmName, - "mac_address", mac, - "timeout", timeout, - "attempts", attempt, - "final_vm_state", finalState, - ) - - return "", "", fmt.Errorf("ip address not found in ARP table within timeout (VM state: %s, MAC: %s)", finalState, mac) -} - -// --- Helpers --- - -func (m *VirshManager) binPath(defaultName, override string) string { - if override != "" { - return override - } - return defaultName -} - -func (m *VirshManager) run(ctx context.Context, bin string, args ...string) (string, error) { - var stdout, stderr bytes.Buffer - // Provide a default timeout if the context has none. - if _, ok := ctx.Deadline(); !ok { - ctx2, cancel := context.WithTimeout(ctx, 120*time.Second) - defer cancel() - ctx = ctx2 - } - cmd := exec.CommandContext(ctx, bin, args...) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - // Pass LIBVIRT_DEFAULT_URI for convenience when set. - env := os.Environ() - if m.cfg.LibvirtURI != "" { - env = append(env, "LIBVIRT_DEFAULT_URI="+m.cfg.LibvirtURI) - } - cmd.Env = env - - err := cmd.Run() - outStr := strings.TrimSpace(stdout.String()) - if err != nil { - errStr := strings.TrimSpace(stderr.String()) - if errStr != "" { - return outStr, fmt.Errorf("%s %s failed: %w: %s", bin, strings.Join(args, " "), err, errStr) - } - return outStr, fmt.Errorf("%s %s failed: %w", bin, strings.Join(args, " "), err) - } - return outStr, nil -} - -func getenvDefault(k, def string) string { - v := os.Getenv(k) - if v == "" { - return def - } - return v -} - -func intFromEnv(k string, def int) int { - v := os.Getenv(k) - if v == "" { - return def - } - var parsed int - _, err := fmt.Sscanf(v, "%d", &parsed) - if err != nil { - return def - } - return parsed -} - -func fileExists(p string) bool { - st, err := os.Stat(p) - return err == nil && !st.IsDir() -} - -func shEscape(s string) string { - // naive escape for use inside run-command; rely on controlled inputs. - s = strings.ReplaceAll(s, `'`, `'\'\'`) - return s -} - -func defaultGuestUser(vmName string) string { - // Heuristic default depending on distro naming conventions. - // Adjust as needed by calling code. - if strings.Contains(strings.ToLower(vmName), "ubuntu") { - return "ubuntu" - } - if strings.Contains(strings.ToLower(vmName), "centos") || strings.Contains(strings.ToLower(vmName), "rhel") { - return "centos" - } - return "cloud-user" -} - -// parseDomIfAddrIPv4WithMAC parses virsh domifaddr output and returns both IP and MAC address. -// This allows callers to verify the IP belongs to the expected VM by checking the MAC. -func parseDomIfAddrIPv4WithMAC(s string) (ip string, mac string) { - // virsh domifaddr output example: - // Name MAC address Protocol Address - // ---------------------------------------------------------------------------- - // vnet0 52:54:00:6b:3c:86 ipv4 192.168.122.63/24 - lines := strings.Split(s, "\n") - for _, l := range lines { - l = strings.TrimSpace(l) - if l == "" || strings.HasPrefix(l, "Name") || strings.HasPrefix(l, "-") { - continue - } - parts := strings.Fields(l) - if len(parts) >= 4 && parts[2] == "ipv4" { - mac = parts[1] - addr := parts[3] - if i := strings.IndexByte(addr, '/'); i > 0 { - ip = addr[:i] - } else { - ip = addr - } - return ip, mac - } - } - return "", "" -} - -// getVMMAC extracts the MAC address from a VM's domain XML. -// For socket_vmnet VMs, the MAC is in the qemu:commandline section. -// For regular VMs, it's in the interface element. -func (m *VirshManager) getVMMAC(ctx context.Context, vmName string) (string, error) { - virsh := m.binPath("virsh", m.cfg.VirshPath) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "dumpxml", vmName) - if err != nil { - return "", fmt.Errorf("failed to get domain XML: %w", err) - } - - // Try to find MAC in qemu:commandline (socket_vmnet) - // Look for: - if strings.Contains(out, "qemu:commandline") { - lines := strings.Split(out, "\n") - for _, line := range lines { - if strings.Contains(line, "virtio-net-pci") && strings.Contains(line, "mac=") { - // Extract MAC from value="...mac=52:54:00:xx:xx:xx..." - start := strings.Index(line, "mac=") - if start != -1 { - start += 4 // skip "mac=" - end := start + 17 // MAC address is 17 chars (xx:xx:xx:xx:xx:xx) - if end <= len(line) { - mac := line[start:end] - // Validate it looks like a MAC - if strings.Count(mac, ":") == 5 { - return mac, nil - } - } - } - } - } - } - - // Try to find MAC in regular interface element - // Look for: - lines := strings.Split(out, "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - if strings.HasPrefix(line, " 00) - normalizedArpMAC := normalizeMAC(arpMAC) - - if normalizedArpMAC == mac { - // Extract IP from (x.x.x.x) - for _, p := range parts { - if strings.HasPrefix(p, "(") && strings.HasSuffix(p, ")") { - ip := p[1 : len(p)-1] - // Validate it looks like an IP - if strings.Count(ip, ".") == 3 { - return ip, nil - } - } - } - } - } - - return "", fmt.Errorf("MAC %s not found in ARP table", mac) -} - -// normalizeMAC normalizes a MAC address by ensuring each octet has two digits. -// e.g., "52:54:0:ab:cd:ef" -> "52:54:00:ab:cd:ef" -func normalizeMAC(mac string) string { - parts := strings.Split(mac, ":") - if len(parts) != 6 { - return mac - } - for i, p := range parts { - if len(p) == 1 { - parts[i] = "0" + p - } - } - return strings.Join(parts, ":") -} - -// --- Domain XML rendering --- - -type domainXMLParams struct { - Name string - MemoryMB int - VCPUs int - DiskPath string - CloudInitISO string // Optional path to cloud-init ISO for networking config - Network string // "default", "user", "socket_vmnet", or custom network name - SocketVMNetPath string // Path to socket_vmnet socket (used when Network="socket_vmnet") - Emulator string // Optional custom emulator path (e.g., wrapper script for socket_vmnet) - BootOrder []string - Arch string // e.g., "x86_64" or "aarch64" - Machine string // e.g., "pc-q35-6.2" or "virt" - DomainType string // e.g., "kvm" or "qemu" - MACAddress string // Optional MAC address for the network interface -} - -func renderDomainXML(p domainXMLParams) (string, error) { - // Set defaults if not provided - if p.Arch == "" { - p.Arch = "x86_64" - } - if p.Machine == "" { - if p.Arch == "aarch64" { - p.Machine = "virt" - } else { - p.Machine = "pc-q35-6.2" - } - } - if p.DomainType == "" { - p.DomainType = "kvm" - } - // Generate MAC address if not provided and using socket_vmnet - if p.MACAddress == "" { - p.MACAddress = generateMACAddress() - } - // Default socket_vmnet path - if p.Network == "socket_vmnet" && p.SocketVMNetPath == "" { - p.SocketVMNetPath = "/opt/homebrew/var/run/socket_vmnet" - } - - // A minimal domain XML; adjust virtio model as needed by your environment. - // Use conditional sections for architecture-specific elements. - // For socket_vmnet, we need the qemu namespace for commandline passthrough. - const tpl = ` - - {{ .Name }} - {{ .MemoryMB }} - {{ .VCPUs }} -{{- if eq .Arch "aarch64" }} - - hvm - - - -{{- else }} - - hvm - - - -{{- end }} - - -{{- if eq .Arch "aarch64" }} - -{{- else }} - - -{{- end }} - -{{- if and (eq .Arch "aarch64") (eq .DomainType "qemu") }} - - cortex-a72 - -{{- else }} - -{{- end }} - -{{- if .Emulator }} - {{ .Emulator }} -{{- end }} - - - - - -{{- if .CloudInitISO }} - - - - - - - -{{- end }} - -{{- if eq .Arch "aarch64" }} - -{{- end }} -{{- if eq .Network "socket_vmnet" }} - -{{- else if or (eq .Network "user") (eq .Network "") }} - - - -{{- else }} - - - - -{{- end }} - - -{{- if ne .Arch "aarch64" }} - -{{- end }} - - /dev/urandom - - -{{- if eq .Network "socket_vmnet" }} - - - - - - -{{- end }} - -` - var b bytes.Buffer - t := template.Must(template.New("domain").Parse(tpl)) - if err := t.Execute(&b, p); err != nil { - return "", err - } - return b.String(), nil -} - -// attachISOToDomainXML is a simple XML string replacement to add a CD-ROM pointing to seed ISO. -// For a production system, consider parsing XML and building a proper DOM. -func (m *VirshManager) attachISOToDomainXML(xmlPath, isoPath string) error { - data, err := os.ReadFile(xmlPath) - if err != nil { - return err - } - xml := string(data) - needle := "" - cdrom := fmt.Sprintf(` - - - - - - `, isoPath) - if strings.Contains(xml, cdrom) { - // already attached - return nil - } - xml = strings.Replace(xml, needle, cdrom+"\n "+needle, 1) - return os.WriteFile(xmlPath, []byte(xml), 0o644) -} - -// buildCloudInitSeed creates a NoCloud seed ISO with a single user and SSH key. -// Requires cloud-localds (cloud-image-utils) on the host if implemented via external tool. -// This implementation writes user-data/meta-data and attempts to use genisoimage or mkisofs. -func (m *VirshManager) buildCloudInitSeed(ctx context.Context, vmName, username, publicKey, outISO string) error { - jobDir := filepath.Dir(outISO) - userData := fmt.Sprintf(`#cloud-config -users: - - name: %s - sudo: ALL=(ALL) NOPASSWD:ALL - groups: users, admin, sudo - shell: /bin/bash - ssh_authorized_keys: - - %s -`, username, publicKey) - - metaData := fmt.Sprintf(`instance-id: %s -local-hostname: %s -`, vmName, vmName) - - userDataPath := filepath.Join(jobDir, "user-data") - metaDataPath := filepath.Join(jobDir, "meta-data") - if err := os.WriteFile(userDataPath, []byte(userData), 0o644); err != nil { - return fmt.Errorf("write user-data: %w", err) - } - if err := os.WriteFile(metaDataPath, []byte(metaData), 0o644); err != nil { - return fmt.Errorf("write meta-data: %w", err) - } - - // Try cloud-localds if available - if hasBin("cloud-localds") { - if _, err := m.run(ctx, "cloud-localds", outISO, userDataPath, metaDataPath); err == nil { - return nil - } - } - - // Fallback to genisoimage/mkisofs - if hasBin("genisoimage") { - // genisoimage -output seed.iso -volid cidata -joliet -rock user-data meta-data - _, err := m.run(ctx, "genisoimage", "-output", outISO, "-volid", "cidata", "-joliet", "-rock", userDataPath, metaDataPath) - return err - } - if hasBin("mkisofs") { - _, err := m.run(ctx, "mkisofs", "-output", outISO, "-V", "cidata", "-J", "-R", userDataPath, metaDataPath) - return err - } - - return fmt.Errorf("cloud-init seed build tools not found: need cloud-localds or genisoimage/mkisofs") -} - -// buildCloudInitSeedForClone creates a cloud-init ISO for a cloned VM. -// The key purpose is to provide a NEW instance-id that differs from what's stored -// on the cloned disk. This forces cloud-init to re-run its initialization, -// including network configuration for the clone's new MAC address. -// -// If SSHCAPubKey is configured, this function also: -// - Creates a 'sandbox' user with sudo access for managed SSH credentials -// - Injects the SSH CA public key and configures sshd to trust it -func (m *VirshManager) buildCloudInitSeedForClone(ctx context.Context, vmName, outISO string) error { - jobDir := filepath.Dir(outISO) - - // Build cloud-init user-data - var userDataBuilder strings.Builder - userDataBuilder.WriteString(`#cloud-config -# Cloud-init config for cloned VMs -# This triggers cloud-init to re-run network configuration - -# Ensure networking is configured via DHCP -network: - version: 2 - ethernets: - id0: - match: - driver: virtio* - dhcp4: true -`) - - // If SSH CA is configured, add sandbox user and SSH CA trust - if m.cfg.SSHCAPubKey != "" { - userDataBuilder.WriteString(` -# Create sandbox user for managed SSH credentials -users: - - default - - name: sandbox - sudo: ALL=(ALL) NOPASSWD:ALL - shell: /bin/bash - lock_passwd: true - -# Write SSH CA public key -write_files: - - path: /etc/ssh/ssh_ca.pub - content: | - `) - userDataBuilder.WriteString(m.cfg.SSHCAPubKey) - userDataBuilder.WriteString(` - permissions: '0644' - owner: root:root - -# Configure sshd to trust the CA -runcmd: - - | - if [ -s /etc/ssh/ssh_ca.pub ]; then - if ! grep -q "TrustedUserCAKeys" /etc/ssh/sshd_config; then - echo "TrustedUserCAKeys /etc/ssh/ssh_ca.pub" >> /etc/ssh/sshd_config - systemctl restart sshd || systemctl restart ssh || true - fi - fi -`) - } - - userData := userDataBuilder.String() - - // Use a unique instance-id based on the VM name - // This is the critical part: cloud-init checks if instance-id has changed - // If it has, cloud-init re-runs initialization including network setup - metaData := fmt.Sprintf(`instance-id: %s -local-hostname: %s -`, vmName, vmName) - - userDataPath := filepath.Join(jobDir, "user-data") - metaDataPath := filepath.Join(jobDir, "meta-data") - if err := os.WriteFile(userDataPath, []byte(userData), 0o644); err != nil { - return fmt.Errorf("write user-data: %w", err) - } - if err := os.WriteFile(metaDataPath, []byte(metaData), 0o644); err != nil { - return fmt.Errorf("write meta-data: %w", err) - } - - // Try cloud-localds if available - if hasBin("cloud-localds") { - if _, err := m.run(ctx, "cloud-localds", outISO, userDataPath, metaDataPath); err == nil { - // Verify the ISO was actually created - if _, statErr := os.Stat(outISO); statErr == nil { - return nil - } else { - log.Printf("WARNING: cloud-localds succeeded but ISO not found at %s: %v", outISO, statErr) - } - } else { - log.Printf("WARNING: cloud-localds failed: %v, trying fallback tools", err) - } - } - - // Fallback to genisoimage/mkisofs - if hasBin("genisoimage") { - _, err := m.run(ctx, "genisoimage", "-output", outISO, "-volid", "cidata", "-joliet", "-rock", userDataPath, metaDataPath) - if err != nil { - log.Printf("WARNING: genisoimage failed: %v", err) - return err - } - // Verify the ISO was actually created - if _, statErr := os.Stat(outISO); statErr != nil { - return fmt.Errorf("genisoimage succeeded but ISO not found at %s: %w", outISO, statErr) - } - return nil - } - if hasBin("mkisofs") { - _, err := m.run(ctx, "mkisofs", "-output", outISO, "-V", "cidata", "-J", "-R", userDataPath, metaDataPath) - if err != nil { - log.Printf("WARNING: mkisofs failed: %v", err) - return err - } - // Verify the ISO was actually created - if _, statErr := os.Stat(outISO); statErr != nil { - return fmt.Errorf("mkisofs succeeded but ISO not found at %s: %w", outISO, statErr) - } - return nil - } - - return fmt.Errorf("cloud-init seed build tools not found: need cloud-localds or genisoimage/mkisofs") -} - -func hasBin(name string) bool { - _, err := exec.LookPath(name) - return err == nil -} - -// ValidateSourceVM performs pre-flight checks on a source VM before cloning. -// It checks: VM state, network interface, MAC address, and IP address. -func (m *VirshManager) ValidateSourceVM(ctx context.Context, vmName string) (*VMValidationResult, error) { - if vmName == "" { - return nil, fmt.Errorf("vmName is required") - } - - result := &VMValidationResult{ - VMName: vmName, - Valid: true, - Warnings: []string{}, - Errors: []string{}, - } - - // Check VM state - state, err := m.GetVMState(ctx, vmName) - if err != nil { - result.Valid = false - result.Errors = append(result.Errors, fmt.Sprintf("Failed to get VM state: %v", err)) - return result, nil - } - result.State = state - - // Check if VM has a network interface with MAC address - mac, macErr := m.GetVMMAC(ctx, vmName) - if macErr != nil { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not get MAC address from VM definition: %v", macErr)) - result.Warnings = append(result.Warnings, - "The source VM may not have a network interface configured properly") - } else { - result.MACAddress = mac - result.HasNetwork = true - } - - // Check if VM has an IP address (only if running) - switch state { - case VMStateRunning: - virsh := m.binPath("virsh", m.cfg.VirshPath) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domifaddr", vmName, "--source", "lease") - if err == nil { - ip, mac := parseDomIfAddrIPv4WithMAC(out) - if ip != "" { - result.IPAddress = ip - if mac != "" && result.MACAddress == "" { - result.MACAddress = mac - result.HasNetwork = true - } - } else { - result.Warnings = append(result.Warnings, - "Source VM is running but has no IP address assigned") - result.Warnings = append(result.Warnings, - "This may indicate cloud-init or DHCP issues - cloned sandboxes may also fail to get IPs") - } - } - - // Check network interface statistics if we have a MAC - if result.MACAddress != "" { - stats, statsErr := m.getVMNetworkStats(ctx, vmName) - if statsErr == nil { - if stats.txPackets == 0 && stats.rxPackets == 0 { - result.Warnings = append(result.Warnings, - "Source VM network interface shows zero TX/RX packets - network may not be functioning") - } else if stats.txPackets == 0 { - result.Warnings = append(result.Warnings, - "Source VM network interface shows zero TX packets - VM may not be sending network traffic") - } - } - } - case VMStateShutOff: - // VM is shut off - this is fine for cloning, but warn that we can't verify network - result.Warnings = append(result.Warnings, - "Source VM is shut off - cannot verify network configuration (IP/DHCP)") - result.Warnings = append(result.Warnings, - "Consider starting the source VM to verify it can obtain an IP before cloning") - default: - // VM is in an unexpected state - result.Warnings = append(result.Warnings, - fmt.Sprintf("Source VM is in state '%s' - expected 'running' or 'shut off'", state)) - } - - // Check if VM has cloud-init CDROM (helpful for diagnostics) - hasCloudInit := m.vmHasCloudInitCDROM(ctx, vmName) - if !hasCloudInit { - result.Warnings = append(result.Warnings, - "Source VM does not appear to have a cloud-init CDROM attached") - result.Warnings = append(result.Warnings, - "Cloned sandboxes will still get their own cloud-init ISO for network config") - } - - return result, nil -} - -// vmNetworkStats holds network interface statistics -type vmNetworkStats struct { - rxBytes int64 - rxPackets int64 - txBytes int64 - txPackets int64 -} - -// getVMNetworkStats returns network interface statistics for a VM -func (m *VirshManager) getVMNetworkStats(ctx context.Context, vmName string) (*vmNetworkStats, error) { - virsh := m.binPath("virsh", m.cfg.VirshPath) - - // Get interface name first - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domiflist", vmName) - if err != nil { - return nil, fmt.Errorf("get interface list: %w", err) - } - - // Parse domiflist to get interface name - var ifaceName string - lines := strings.Split(out, "\n") - for _, line := range lines { - line = strings.TrimSpace(line) - if line == "" || strings.HasPrefix(line, "Interface") || strings.HasPrefix(line, "-") { - continue - } - fields := strings.Fields(line) - if len(fields) >= 1 && fields[0] != "-" { - ifaceName = fields[0] - break - } - } - - if ifaceName == "" || ifaceName == "-" { - // Interface name is "-" for some network types, try to get stats anyway - return nil, fmt.Errorf("no named interface found") - } - - // Get interface stats - out, err = m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domifstat", vmName, ifaceName) - if err != nil { - return nil, fmt.Errorf("get interface stats: %w", err) - } - - stats := &vmNetworkStats{} - for _, line := range strings.Split(out, "\n") { - fields := strings.Fields(line) - if len(fields) >= 2 { - var val int64 - _, _ = fmt.Sscanf(fields[1], "%d", &val) - switch { - case strings.Contains(fields[0], "rx_bytes"): - stats.rxBytes = val - case strings.Contains(fields[0], "rx_packets"): - stats.rxPackets = val - case strings.Contains(fields[0], "tx_bytes"): - stats.txBytes = val - case strings.Contains(fields[0], "tx_packets"): - stats.txPackets = val - } - } - } - - return stats, nil -} - -// vmHasCloudInitCDROM checks if a VM has a cloud-init CDROM attached -func (m *VirshManager) vmHasCloudInitCDROM(ctx context.Context, vmName string) bool { - virsh := m.binPath("virsh", m.cfg.VirshPath) - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "domblklist", vmName, "--details") - if err != nil { - return false - } - - // Look for cdrom device with cloud-init related path - for _, line := range strings.Split(out, "\n") { - fields := strings.Fields(line) - if len(fields) >= 4 && fields[1] == "cdrom" { - path := strings.ToLower(fields[3]) - if strings.Contains(path, "cloud") || strings.Contains(path, "seed") || - strings.Contains(path, "cidata") || strings.Contains(path, "init") { - return true - } - } - } - return false -} - -// CheckHostResources validates that the host has sufficient resources for a new sandbox. -func (m *VirshManager) CheckHostResources(ctx context.Context, requiredCPUs, requiredMemoryMB int) (*ResourceCheckResult, error) { - result := &ResourceCheckResult{ - Valid: true, - RequiredCPUs: requiredCPUs, - RequiredMemoryMB: requiredMemoryMB, - Warnings: []string{}, - Errors: []string{}, - } - - // Check available resources using virsh nodeinfo - info, err := m.getHostInfo(ctx) - if err != nil { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not check host resources: %v", err)) - } else { - result.TotalMemoryMB = info.totalMB - result.AvailableMemoryMB = info.availableMB - result.AvailableCPUs = info.cpus - result.TotalCPUs = info.cpus // Total CPUs on the host - - // Check if we have enough memory - if int64(requiredMemoryMB) > info.availableMB { - result.Valid = false - result.NeedsMemoryApproval = true - result.Errors = append(result.Errors, - fmt.Sprintf("Insufficient Host memory: need %d MB but only %d MB available", - requiredMemoryMB, info.availableMB)) - } else if float64(requiredMemoryMB) > float64(info.availableMB)*0.8 { - // Warn if using more than 80% of available memory - result.NeedsMemoryApproval = true - result.Warnings = append(result.Warnings, - fmt.Sprintf("Low Host memory warning: requesting %d MB of %d MB available (%.1f%%)", - requiredMemoryMB, info.availableMB, - float64(requiredMemoryMB)/float64(info.availableMB)*100)) - } - - // Check if we have enough CPUs - if requiredCPUs > info.cpus { - result.Valid = false - result.NeedsCPUApproval = true - result.Errors = append(result.Errors, - fmt.Sprintf("Insufficient CPUs: need %d but only %d available", - requiredCPUs, info.cpus)) - } - } - - // Check available disk space in work directory - diskInfo, err := m.getWorkDirDiskSpace(ctx) - if err != nil { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Could not check disk space: %v", err)) - } else { - result.AvailableDiskMB = diskInfo.availableMB - - // Warn if disk space is low (less than 10GB) - if diskInfo.availableMB < 10*1024 { - result.Warnings = append(result.Warnings, - fmt.Sprintf("Low disk space warning: only %d MB available in work directory (%s)", - diskInfo.availableMB, m.cfg.WorkDir)) - } - - // Error if disk space is critically low (less than 1GB) - if diskInfo.availableMB < 1024 { - result.Valid = false - result.Errors = append(result.Errors, - fmt.Sprintf("Insufficient disk space: only %d MB available in work directory (%s)", - diskInfo.availableMB, m.cfg.WorkDir)) - } - } - - return result, nil -} - -// hostInfo holds host resource information -type hostInfo struct { - cpus int - totalMB int64 - availableMB int64 -} - -// getHostInfo returns available and total resources on the host -func (m *VirshManager) getHostInfo(ctx context.Context) (*hostInfo, error) { - virsh := m.binPath("virsh", m.cfg.VirshPath) - info := &hostInfo{} - - // Get CPU info from nodeinfo - out, err := m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "nodeinfo") - if err == nil { - for _, line := range strings.Split(out, "\n") { - if strings.HasPrefix(line, "CPU(s):") { - fields := strings.Fields(line) - if len(fields) >= 2 { - _, _ = fmt.Sscanf(fields[1], "%d", &info.cpus) - } - } - if strings.HasPrefix(line, "Memory size:") { - fields := strings.Fields(line) - if len(fields) >= 3 { - var val int64 - _, _ = fmt.Sscanf(fields[2], "%d", &val) - info.totalMB = val / 1024 - info.availableMB = info.totalMB / 2 // Fallback estimate - } - } - } - } - - // Try virsh nodememstats for more accurate available memory - out, err = m.run(ctx, virsh, "--connect", m.cfg.LibvirtURI, "nodememstats") - if err == nil { - var free, buffers, cached int64 - foundFree := false - for _, line := range strings.Split(out, "\n") { - fields := strings.Fields(line) - if len(fields) >= 2 { - var val int64 - _, _ = fmt.Sscanf(fields[len(fields)-2], "%d", &val) - if strings.Contains(fields[0], "free") { - free = val - foundFree = true - } else if strings.Contains(fields[0], "buffers") { - buffers = val - } else if strings.Contains(fields[0], "cached") { - cached = val - } - } - } - if foundFree { - // Available memory is roughly free + buffers + cached - // This is more accurate than just free, as buffers/cached can be reclaimed - info.availableMB = (free + buffers + cached) / 1024 - } - } - - if info.totalMB > 0 { - return info, nil - } - - // Fallback: use free command (Linux) - var stdout bytes.Buffer - cmd := exec.CommandContext(ctx, "free", "-m") - cmd.Stdout = &stdout - if err := cmd.Run(); err == nil { - for _, line := range strings.Split(stdout.String(), "\n") { - if strings.HasPrefix(line, "Mem:") { - fields := strings.Fields(line) - if len(fields) >= 7 { - var total, available int64 - _, _ = fmt.Sscanf(fields[1], "%d", &total) - _, _ = fmt.Sscanf(fields[6], "%d", &available) - info.totalMB = total - info.availableMB = available - return info, nil - } - } - } - } - - // Fallback: use vm_stat command (macOS) - stdout.Reset() - cmd = exec.CommandContext(ctx, "vm_stat") - cmd.Stdout = &stdout - if err := cmd.Run(); err == nil { - // Parse vm_stat output - // Pages free: 246026. - // Pages active: 2124707. - // Pages inactive: 2087528. - // Pages speculative: 33285. - // Pages throttled: 0. - // Pages wired down: 847372. - // Pages purgeable: 12668. - // "Translation faults": 329598642. - // Pages copy-on-write: 15888258. - // Pages zero filled: 204860799. - // Pages reactivated: 2094207. - // Pages purged: 3571658. - // File-backed pages: 1318625. - // Anonymous pages: 2893610. - // Pages stored in compressor: 2339580. - // Pages occupied by compressor: 922112. - // Decompressions: 10777592. - // Compressions: 14932378. - // Pageins: 5954753. - // Pageouts: 166129. - // Swapins: 2266008. - // Swapouts: 2724816. - - // Page size is typically 4096 bytes (4KB) - // Available = (Pages free + Pages inactive + Pages speculative) * 4096 / 1024 / 1024 - // We can get page size from sysctl hw.pagesize, but assuming 16KB for Apple Silicon or 4KB for Intel - // Safer to just assume 4KB or query it. Let's assume standard 4096 first. - pageSize := int64(4096) - // Try to get actual page size - if out, err := exec.Command("pagesize").Output(); err == nil { - _, _ = fmt.Sscanf(strings.TrimSpace(string(out)), "%d", &pageSize) - } - - var free, inactive, speculative int64 - for _, line := range strings.Split(stdout.String(), "\n") { - parts := strings.Split(line, ":") - if len(parts) >= 2 { - key := strings.TrimSpace(parts[0]) - valStr := strings.TrimSpace(parts[1]) - valStr = strings.TrimSuffix(valStr, ".") - var val int64 - _, _ = fmt.Sscanf(valStr, "%d", &val) - - switch key { - case "Pages free": - free = val - case "Pages inactive": - inactive = val - case "Pages speculative": - speculative = val - } - } - } - // Calculate available in MB - info.availableMB = (free + inactive + speculative) * pageSize / 1024 / 1024 - - // For total memory on macOS, use sysctl - if out, err := exec.Command("sysctl", "-n", "hw.memsize").Output(); err == nil { - var totalBytes int64 - _, _ = fmt.Sscanf(strings.TrimSpace(string(out)), "%d", &totalBytes) - info.totalMB = totalBytes / 1024 / 1024 - } - - if info.totalMB > 0 { - return info, nil - } - } - - return nil, fmt.Errorf("could not determine host memory") -} - -// diskSpaceInfo holds disk space information -type diskSpaceInfo struct { - availableMB int64 -} - -// getWorkDirDiskSpace returns available disk space in the work directory -func (m *VirshManager) getWorkDirDiskSpace(ctx context.Context) (*diskSpaceInfo, error) { - workDir := m.cfg.WorkDir - if workDir == "" { - workDir = "/var/lib/libvirt/images/sandboxes" - } - - // Use df command to get disk space - var stdout bytes.Buffer - cmd := exec.CommandContext(ctx, "df", "-m", workDir) - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("df command failed: %w", err) - } - - // Parse df output (second line, 4th column for available) - lines := strings.Split(stdout.String(), "\n") - if len(lines) >= 2 { - fields := strings.Fields(lines[1]) - if len(fields) >= 4 { - var available int64 - _, _ = fmt.Sscanf(fields[3], "%d", &available) - return &diskSpaceInfo{ - availableMB: available, - }, nil - } - } - - return nil, fmt.Errorf("could not parse df output") -} diff --git a/fluid/internal/libvirt/virsh_test.go b/fluid/internal/libvirt/virsh_test.go deleted file mode 100755 index 757b020d..00000000 --- a/fluid/internal/libvirt/virsh_test.go +++ /dev/null @@ -1,783 +0,0 @@ -//go:build libvirt - -package libvirt - -import ( - "strings" - "testing" -) - -func TestRenderDomainXML_CPUMode(t *testing.T) { - tests := []struct { - name string - params domainXMLParams - expectedCPUMode string - }{ - { - name: "x86_64 with kvm uses host-passthrough", - params: domainXMLParams{ - Name: "test-vm", - MemoryMB: 1024, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - Arch: "x86_64", - Machine: "pc-q35-6.2", - DomainType: "kvm", - }, - expectedCPUMode: ``, - }, - { - name: "x86_64 with qemu uses host-passthrough", - params: domainXMLParams{ - Name: "test-vm", - MemoryMB: 1024, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - Arch: "x86_64", - Machine: "pc-q35-6.2", - DomainType: "qemu", - }, - expectedCPUMode: ``, - }, - { - name: "aarch64 with kvm uses host-passthrough", - params: domainXMLParams{ - Name: "test-vm", - MemoryMB: 1024, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - Arch: "aarch64", - Machine: "virt", - DomainType: "kvm", - }, - expectedCPUMode: ``, - }, - { - name: "aarch64 with qemu uses custom cortex-a72 model", - params: domainXMLParams{ - Name: "test-vm", - MemoryMB: 1024, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - Arch: "aarch64", - Machine: "virt", - DomainType: "qemu", - }, - expectedCPUMode: ``, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - xml, err := renderDomainXML(tt.params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - if !strings.Contains(xml, tt.expectedCPUMode) { - t.Errorf("renderDomainXML() expected CPU mode %q not found in XML:\n%s", tt.expectedCPUMode, xml) - } - }) - } -} - -func TestRenderDomainXML_BasicStructure(t *testing.T) { - params := domainXMLParams{ - Name: "test-sandbox", - MemoryMB: 2048, - VCPUs: 4, - DiskPath: "/var/lib/libvirt/images/test-sandbox.qcow2", - Network: "default", - Arch: "x86_64", - Machine: "pc-q35-6.2", - DomainType: "kvm", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - expectedElements := []string{ - ``, - `test-sandbox`, - `2048`, - `4`, - `hvm`, - ``, - ``, - } - - for _, expected := range expectedElements { - if !strings.Contains(xml, expected) { - t.Errorf("renderDomainXML() expected element %q not found in XML:\n%s", expected, xml) - } - } -} - -func TestRenderDomainXML_Aarch64Features(t *testing.T) { - params := domainXMLParams{ - Name: "test-arm-vm", - MemoryMB: 1024, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - Arch: "aarch64", - Machine: "virt", - DomainType: "qemu", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - // aarch64-specific elements - expectedElements := []string{ - ``, - ``, - ``, - ``, - `cortex-a72`, - } - - for _, expected := range expectedElements { - if !strings.Contains(xml, expected) { - t.Errorf("renderDomainXML() expected aarch64 element %q not found in XML:\n%s", expected, xml) - } - } - - // x86_64-specific elements should NOT be present - unexpectedElements := []string{ - ``, - ``, - ``, - } - - for _, unexpected := range unexpectedElements { - if strings.Contains(xml, unexpected) { - t.Errorf("renderDomainXML() unexpected x86_64 element %q found in aarch64 XML:\n%s", unexpected, xml) - } - } -} - -func TestRenderDomainXML_X86Features(t *testing.T) { - params := domainXMLParams{ - Name: "test-x86-vm", - MemoryMB: 1024, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - Arch: "x86_64", - Machine: "pc-q35-6.2", - DomainType: "kvm", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - // x86_64-specific elements - expectedElements := []string{ - ``, - ``, - ``, - } - - for _, expected := range expectedElements { - if !strings.Contains(xml, expected) { - t.Errorf("renderDomainXML() expected x86_64 element %q not found in XML:\n%s", expected, xml) - } - } - - // aarch64-specific elements should NOT be present - unexpectedElements := []string{ - ``, - ``, - ``, - } - - for _, unexpected := range unexpectedElements { - if strings.Contains(xml, unexpected) { - t.Errorf("renderDomainXML() unexpected aarch64 element %q found in x86_64 XML:\n%s", unexpected, xml) - } - } -} - -func TestRenderDomainXML_Defaults(t *testing.T) { - // Test that defaults are applied when fields are empty - params := domainXMLParams{ - Name: "test-defaults", - MemoryMB: 512, - VCPUs: 1, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "default", - // Arch, Machine, and DomainType are empty - should use defaults - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - // Should default to x86_64, pc-q35-6.2, kvm - expectedDefaults := []string{ - ``, - `hvm`, - ``, - } - - for _, expected := range expectedDefaults { - if !strings.Contains(xml, expected) { - t.Errorf("renderDomainXML() expected default element %q not found in XML:\n%s", expected, xml) - } - } -} - -func TestRenderDomainXML_WithCloudInitISO(t *testing.T) { - // Test that cloud-init ISO is properly included in domain XML - params := domainXMLParams{ - Name: "test-cloud-init", - MemoryMB: 2048, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/jobs/test-cloud-init/disk-overlay.qcow2", - CloudInitISO: "/var/lib/libvirt/images/jobs/test-cloud-init/cloud-init.iso", - Network: "default", - Arch: "aarch64", - Machine: "virt", - DomainType: "qemu", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - // Cloud-init ISO elements should be present - expectedElements := []string{ - ``, - ``, - ``, - ``, - ``, - } - - for _, expected := range expectedElements { - if !strings.Contains(xml, expected) { - t.Errorf("renderDomainXML() expected cloud-init element %q not found in XML:\n%s", expected, xml) - } - } -} - -func TestRenderDomainXML_WithoutCloudInitISO(t *testing.T) { - // Test that no cloud-init CDROM is included when CloudInitISO is empty - params := domainXMLParams{ - Name: "test-no-cloud-init", - MemoryMB: 2048, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/jobs/test/disk-overlay.qcow2", - Network: "default", - Arch: "x86_64", - Machine: "pc-q35-6.2", - DomainType: "kvm", - // CloudInitISO is empty - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - // Cloud-init ISO elements should NOT be present - unexpectedElements := []string{ - `device="cdrom"`, - ``, - } - - for _, unexpected := range unexpectedElements { - if strings.Contains(xml, unexpected) { - t.Errorf("renderDomainXML() unexpected cloud-init element %q found in XML when CloudInitISO is empty:\n%s", unexpected, xml) - } - } - - // Main disk should still be present - if !strings.Contains(xml, ``) { - t.Error("renderDomainXML() main disk not found in XML") - } -} - -func TestCloudInitSeedForClone_UniqueInstanceID(t *testing.T) { - // This test verifies the concept that each clone should get a unique instance-id - // The actual buildCloudInitSeedForClone function creates files, so we test the - // expected behavior through the domain XML params - - vmNames := []string{"sbx-abc123", "sbx-def456", "sbx-ghi789"} - - for _, vmName := range vmNames { - params := domainXMLParams{ - Name: vmName, - MemoryMB: 1024, - VCPUs: 1, - DiskPath: "/var/lib/libvirt/images/jobs/" + vmName + "/disk-overlay.qcow2", - CloudInitISO: "/var/lib/libvirt/images/jobs/" + vmName + "/cloud-init.iso", - Network: "default", - Arch: "aarch64", - Machine: "virt", - DomainType: "qemu", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() for %s error = %v", vmName, err) - } - - // Each sandbox should have its own cloud-init ISO path - expectedISOPath := "/var/lib/libvirt/images/jobs/" + vmName + "/cloud-init.iso" - if !strings.Contains(xml, expectedISOPath) { - t.Errorf("renderDomainXML() for %s expected ISO path %q not found in XML", vmName, expectedISOPath) - } - } -} - -func TestRenderDomainXML_UserModeNetworking(t *testing.T) { - tests := []struct { - name string - network string - wantUser bool // true if we expect user-mode networking - }{ - { - name: "user network value", - network: "user", - wantUser: true, - }, - { - name: "empty network value", - network: "", - wantUser: true, - }, - { - name: "default network value", - network: "default", - wantUser: false, - }, - { - name: "custom network value", - network: "br0", - wantUser: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - params := domainXMLParams{ - Name: "test-vm", - MemoryMB: 2048, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: tt.network, - Arch: "aarch64", - Machine: "virt", - DomainType: "qemu", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - hasUserInterface := strings.Contains(xml, ``) - hasNetworkInterface := strings.Contains(xml, ``) - - if tt.wantUser { - if !hasUserInterface { - t.Errorf("expected user-mode networking but got network interface in XML:\n%s", xml) - } - if hasNetworkInterface { - t.Errorf("expected user-mode networking but found network interface in XML:\n%s", xml) - } - } else { - if hasUserInterface { - t.Errorf("expected network interface but got user-mode networking in XML:\n%s", xml) - } - if !hasNetworkInterface { - t.Errorf("expected network interface but not found in XML:\n%s", xml) - } - // Also verify the network name is correct - expectedSource := `` - if !strings.Contains(xml, expectedSource) { - t.Errorf("expected network source %q not found in XML:\n%s", expectedSource, xml) - } - } - }) - } -} - -func TestRenderDomainXML_SocketVMNet(t *testing.T) { - params := domainXMLParams{ - Name: "test-socket-vmnet", - MemoryMB: 2048, - VCPUs: 2, - DiskPath: "/var/lib/libvirt/images/test.qcow2", - Network: "socket_vmnet", - SocketVMNetPath: "/opt/homebrew/var/run/socket_vmnet", - Emulator: "/path/to/qemu-wrapper.sh", - MACAddress: "52:54:00:ab:cd:ef", - Arch: "aarch64", - Machine: "virt", - DomainType: "qemu", - } - - xml, err := renderDomainXML(params) - if err != nil { - t.Fatalf("renderDomainXML() error = %v", err) - } - - // Should have qemu namespace - if !strings.Contains(xml, `xmlns:qemu="http://libvirt.org/schemas/domain/qemu/1.0"`) { - t.Error("expected qemu namespace for socket_vmnet") - } - - // Should have custom emulator - if !strings.Contains(xml, `/path/to/qemu-wrapper.sh`) { - t.Errorf("expected custom emulator in XML:\n%s", xml) - } - - // Should have qemu:commandline with socket networking - if !strings.Contains(xml, ``) { - t.Error("expected qemu:commandline for socket_vmnet") - } - - // Should have socket,fd=3 netdev - if !strings.Contains(xml, `socket,id=vnet,fd=3`) { - t.Errorf("expected socket,fd=3 netdev in XML:\n%s", xml) - } - - // Should have MAC address - if !strings.Contains(xml, `mac=52:54:00:ab:cd:ef`) { - t.Errorf("expected MAC address in XML:\n%s", xml) - } - - // Should NOT have standard interface element - if strings.Contains(xml, ``) || strings.Contains(xml, ``) { - t.Errorf("unexpected standard interface in socket_vmnet XML:\n%s", xml) - } -} - -func TestNormalizeMAC(t *testing.T) { - tests := []struct { - name string - input string - expected string - }{ - { - name: "already normalized", - input: "52:54:00:ab:cd:ef", - expected: "52:54:00:ab:cd:ef", - }, - { - name: "shortened octets", - input: "52:54:0:ab:cd:ef", - expected: "52:54:00:ab:cd:ef", - }, - { - name: "multiple shortened octets", - input: "52:54:0:a:c:e", - expected: "52:54:00:0a:0c:0e", - }, - { - name: "invalid format", - input: "not-a-mac", - expected: "not-a-mac", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := normalizeMAC(tt.input) - if result != tt.expected { - t.Errorf("normalizeMAC(%q) = %q, want %q", tt.input, result, tt.expected) - } - }) - } -} - -func TestGenerateMACAddress(t *testing.T) { - mac := generateMACAddress() - - // Should start with QEMU prefix - if !strings.HasPrefix(mac, "52:54:00:") { - t.Errorf("generateMACAddress() = %q, want prefix '52:54:00:'", mac) - } - - // Should be valid format (17 chars: xx:xx:xx:xx:xx:xx) - if len(mac) != 17 { - t.Errorf("generateMACAddress() = %q, want 17 chars", mac) - } - - // Should have 5 colons - if strings.Count(mac, ":") != 5 { - t.Errorf("generateMACAddress() = %q, want 5 colons", mac) - } - - // Generate another one - should be different (random) - mac2 := generateMACAddress() - if mac == mac2 { - t.Errorf("generateMACAddress() returned same MAC twice: %q", mac) - } -} - -func TestParseVMState(t *testing.T) { - tests := []struct { - name string - output string - expected VMState - }{ - { - name: "running state", - output: "running\n", - expected: VMStateRunning, - }, - { - name: "running state without newline", - output: "running", - expected: VMStateRunning, - }, - { - name: "shut off state", - output: "shut off\n", - expected: VMStateShutOff, - }, - { - name: "paused state", - output: "paused\n", - expected: VMStatePaused, - }, - { - name: "crashed state", - output: "crashed\n", - expected: VMStateCrashed, - }, - { - name: "pmsuspended state", - output: "pmsuspended\n", - expected: VMStateSuspended, - }, - { - name: "unknown state", - output: "some-unknown-state\n", - expected: VMStateUnknown, - }, - { - name: "empty string", - output: "", - expected: VMStateUnknown, - }, - { - name: "whitespace only", - output: " \n", - expected: VMStateUnknown, - }, - { - name: "running with extra whitespace", - output: " running \n", - expected: VMStateRunning, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := parseVMState(tt.output) - if result != tt.expected { - t.Errorf("parseVMState(%q) = %v, want %v", tt.output, result, tt.expected) - } - }) - } -} - -func TestVMState_StringValues(t *testing.T) { - // Verify that VMState constants have the expected string values - tests := []struct { - state VMState - expected string - }{ - {VMStateRunning, "running"}, - {VMStateShutOff, "shut off"}, - {VMStatePaused, "paused"}, - {VMStateCrashed, "crashed"}, - {VMStateSuspended, "pmsuspended"}, - {VMStateUnknown, "unknown"}, - } - - for _, tt := range tests { - t.Run(string(tt.state), func(t *testing.T) { - if string(tt.state) != tt.expected { - t.Errorf("VMState constant %v has value %q, want %q", tt.state, string(tt.state), tt.expected) - } - }) - } -} - -func TestModifyClonedXML_UpdatesCloudInitISO(t *testing.T) { - // Test that modifyClonedXML updates existing CDROM device to use new cloud-init ISO - sourceXML := ` - test-vm - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - hvm - - - - - - - - - - - - - - - - - -
- - -` - - newXML, err := modifyClonedXML(sourceXML, "sbx-clone123", "/var/lib/libvirt/images/jobs/sbx-clone123/disk-overlay.qcow2", "/var/lib/libvirt/images/jobs/sbx-clone123/cloud-init.iso") - if err != nil { - t.Fatalf("modifyClonedXML() error = %v", err) - } - - // Should have updated name - if !strings.Contains(newXML, "sbx-clone123") { - t.Error("modifyClonedXML() did not update VM name") - } - - // Should have updated disk path - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-clone123/disk-overlay.qcow2") { - t.Error("modifyClonedXML() did not update disk path") - } - - // CRITICAL: Should have updated cloud-init ISO path (not the old /tmp/test-vm-seed.img) - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-clone123/cloud-init.iso") { - t.Errorf("modifyClonedXML() did not update cloud-init ISO path in XML:\n%s", newXML) - } - - // Should NOT contain the old cloud-init ISO path - if strings.Contains(newXML, "/tmp/test-vm-seed.img") { - t.Errorf("modifyClonedXML() still contains old cloud-init ISO path in XML:\n%s", newXML) - } - - // UUID should be removed - if strings.Contains(newXML, "12345678-1234-1234-1234-123456789012") { - t.Error("modifyClonedXML() did not remove UUID") - } - - // MAC address should be different from source - if strings.Contains(newXML, "52:54:00:11:22:33") { - t.Error("modifyClonedXML() did not generate new MAC address") - } -} - -func TestModifyClonedXML_AddsCloudInitCDROM(t *testing.T) { - // Test that modifyClonedXML adds CDROM device when source VM has none - sourceXML := ` - test-vm-no-cdrom - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - hvm - - - - - - - - - - - - - -` - - newXML, err := modifyClonedXML(sourceXML, "sbx-new", "/var/lib/libvirt/images/jobs/sbx-new/disk.qcow2", "/var/lib/libvirt/images/jobs/sbx-new/cloud-init.iso") - if err != nil { - t.Fatalf("modifyClonedXML() error = %v", err) - } - - // Should have added CDROM device with cloud-init ISO - if !strings.Contains(newXML, `device="cdrom"`) { - t.Errorf("modifyClonedXML() did not add CDROM device in XML:\n%s", newXML) - } - - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-new/cloud-init.iso") { - t.Errorf("modifyClonedXML() did not add cloud-init ISO path in XML:\n%s", newXML) - } - - // Should have added SCSI controller for the CDROM - if !strings.Contains(newXML, `type="scsi"`) { - t.Errorf("modifyClonedXML() did not add SCSI controller in XML:\n%s", newXML) - } -} - -func TestModifyClonedXML_NoCloudInitISO(t *testing.T) { - // Test that modifyClonedXML works without cloud-init ISO (empty string) - sourceXML := ` - test-vm - 12345678-1234-1234-1234-123456789012 - 2097152 - 2 - - hvm - - - - - - - - - - - - - - - - - - -` - - // Empty cloudInitISO - should not modify CDROM - newXML, err := modifyClonedXML(sourceXML, "sbx-no-cloud", "/var/lib/libvirt/images/jobs/sbx-no-cloud/disk.qcow2", "") - if err != nil { - t.Fatalf("modifyClonedXML() error = %v", err) - } - - // Old CDROM path should still be there (unchanged) - if !strings.Contains(newXML, "/tmp/old-seed.img") { - t.Errorf("modifyClonedXML() modified CDROM when cloudInitISO was empty:\n%s", newXML) - } - - // Name and disk should still be updated - if !strings.Contains(newXML, "sbx-no-cloud") { - t.Error("modifyClonedXML() did not update VM name") - } - if !strings.Contains(newXML, "/var/lib/libvirt/images/jobs/sbx-no-cloud/disk.qcow2") { - t.Error("modifyClonedXML() did not update disk path") - } -} diff --git a/fluid/internal/model/clone_result.go b/fluid/internal/model/clone_result.go deleted file mode 100755 index 3e3a348a..00000000 --- a/fluid/internal/model/clone_result.go +++ /dev/null @@ -1,31 +0,0 @@ -package model - -// CloneResult represents the outcome of cloning a VM to a container. -type CloneResult struct { - // VM is the name of the source VM that was cloned. - VM string `json:"vm"` - - // ContainerID is the stable identifier of the created container. - ContainerID string `json:"container_id"` - - // Image is the full image tag (e.g., "vmclone/node-c:20251215T183000Z"). - Image string `json:"image"` - - // Mode indicates the extraction method used: "snapshot" for running VMs, - // "offline" for stopped VMs. - Mode string `json:"mode"` - - // Status indicates the final state: "ready" on success. - Status string `json:"status"` -} - -// Extraction modes -const ( - ModeSnapshot = "snapshot" - ModeOffline = "offline" -) - -// Result statuses -const ( - StatusReady = "ready" -) diff --git a/fluid/internal/provider/multihost.go b/fluid/internal/provider/multihost.go deleted file mode 100644 index dc36b92a..00000000 --- a/fluid/internal/provider/multihost.go +++ /dev/null @@ -1,33 +0,0 @@ -package provider - -import "context" - -// MultiHostVMInfo extends VM info with host identification. -type MultiHostVMInfo struct { - Name string - UUID string - State string - Persistent bool - DiskPath string - HostName string // Display name of the host - HostAddress string // IP or hostname of the host -} - -// HostError represents an error from querying a specific host. -type HostError struct { - HostName string `json:"host_name"` - HostAddress string `json:"host_address"` - Error string `json:"error"` -} - -// MultiHostListResult contains the aggregated result from querying all hosts. -type MultiHostListResult struct { - VMs []*MultiHostVMInfo - HostErrors []HostError -} - -// MultiHostLister can list VMs across multiple hosts and find which host has a given VM. -type MultiHostLister interface { - ListVMs(ctx context.Context) (*MultiHostListResult, error) - FindHostForVM(ctx context.Context, vmName string) (hostName, hostAddress string, err error) -} diff --git a/fluid/internal/provider/provider.go b/fluid/internal/provider/provider.go deleted file mode 100644 index 0c0067d1..00000000 --- a/fluid/internal/provider/provider.go +++ /dev/null @@ -1,61 +0,0 @@ -package provider - -import ( - "context" - "time" -) - -// Manager defines the VM orchestration operations supported by all providers. -// Implementations exist for libvirt/KVM (via virsh) and Proxmox VE (via REST API). -type Manager interface { - // CloneVM creates a VM from a golden base image and defines it. - // For libvirt: baseImage is a filename in base_image_dir; the image is copied and a new domain defined. - // For Proxmox: templates are VMs, so this delegates to CloneFromVM (baseImage = template name). - // Remote libvirt hosts do not support CloneVM; use CloneFromVM for provider-agnostic cloning. - // cpu and memoryMB are the VM shape. network is provider-specific (e.g., libvirt network name, - // Proxmox bridge name). - CloneVM(ctx context.Context, baseImage, newVMName string, cpu, memoryMB int, network string) (VMRef, error) - - // CloneFromVM creates a clone from an existing VM by name. - // Works uniformly across all providers: resolves the source VM, copies/clones its disk, - // and creates a new VM with the specified shape. Prefer this over CloneVM for - // provider-agnostic code. - CloneFromVM(ctx context.Context, sourceVMName, newVMName string, cpu, memoryMB int, network string) (VMRef, error) - - // InjectSSHKey injects an SSH public key for a user into the VM before boot. - // The mechanism is provider-specific (virt-customize, cloud-init, Proxmox sshkeys param, etc.). - InjectSSHKey(ctx context.Context, sandboxName, username, publicKey string) error - - // StartVM boots a defined VM. - StartVM(ctx context.Context, vmName string) error - - // StopVM gracefully shuts down a VM, or forces if force is true. - StopVM(ctx context.Context, vmName string, force bool) error - - // DestroyVM removes the VM and its storage. - // If the VM is running, it will be stopped first. - DestroyVM(ctx context.Context, vmName string) error - - // CreateSnapshot creates a snapshot with the given name. - // If external is true, attempts a disk-only external snapshot (libvirt-specific; Proxmox ignores this). - CreateSnapshot(ctx context.Context, vmName, snapshotName string, external bool) (SnapshotRef, error) - - // DiffSnapshot prepares a plan to compare two snapshots' filesystems. - // The returned plan includes advice or prepared mounts where possible. - DiffSnapshot(ctx context.Context, vmName, fromSnapshot, toSnapshot string) (*FSComparePlan, error) - - // GetIPAddress attempts to fetch the VM's primary IP. - // Returns the IP address and MAC address of the VM's primary interface. - GetIPAddress(ctx context.Context, vmName string, timeout time.Duration) (ip string, mac string, err error) - - // GetVMState returns the current state of a VM. - GetVMState(ctx context.Context, vmName string) (VMState, error) - - // ValidateSourceVM performs pre-flight checks on a source VM before cloning. - // Returns a ValidationResult with warnings and errors about the VM's readiness. - ValidateSourceVM(ctx context.Context, vmName string) (*VMValidationResult, error) - - // CheckHostResources validates that the host has sufficient resources for a new sandbox. - // Returns a ResourceCheckResult with available resources and any warnings. - CheckHostResources(ctx context.Context, requiredCPUs, requiredMemoryMB int) (*ResourceCheckResult, error) -} diff --git a/fluid/internal/provider/types.go b/fluid/internal/provider/types.go deleted file mode 100644 index fcf2ef38..00000000 --- a/fluid/internal/provider/types.go +++ /dev/null @@ -1,74 +0,0 @@ -package provider - -// VMRef is a minimal reference to a VM. -type VMRef struct { - Name string - UUID string -} - -// SnapshotRef references a snapshot created for a VM. -type SnapshotRef struct { - Name string - // Kind: "INTERNAL" or "EXTERNAL" - Kind string - // Ref is driver-specific; could be an internal UUID or a file path. - Ref string -} - -// FSComparePlan describes a plan for diffing two snapshots' filesystems. -type FSComparePlan struct { - VMName string - FromSnapshot string - ToSnapshot string - - // Best-effort mount points (if prepared); may be empty strings. - FromMount string - ToMount string - - // Devices or files used; informative. - FromRef string - ToRef string - - // Free-form notes with instructions if the manager couldn't mount automatically. - Notes []string -} - -// VMState represents possible VM states. -type VMState string - -const ( - VMStateRunning VMState = "running" - VMStatePaused VMState = "paused" - VMStateShutOff VMState = "shut off" - VMStateCrashed VMState = "crashed" - VMStateSuspended VMState = "pmsuspended" - VMStateUnknown VMState = "unknown" -) - -// VMValidationResult contains the results of validating a source VM. -type VMValidationResult struct { - VMName string `json:"vm_name"` - Valid bool `json:"valid"` - State VMState `json:"state"` - MACAddress string `json:"mac_address,omitempty"` - IPAddress string `json:"ip_address,omitempty"` - HasNetwork bool `json:"has_network"` - Warnings []string `json:"warnings,omitempty"` - Errors []string `json:"errors,omitempty"` -} - -// ResourceCheckResult contains the results of checking host resources. -type ResourceCheckResult struct { - Valid bool `json:"valid"` - AvailableMemoryMB int64 `json:"available_memory_mb"` - TotalMemoryMB int64 `json:"total_memory_mb"` - AvailableCPUs int `json:"available_cpus"` - TotalCPUs int `json:"total_cpus"` - AvailableDiskMB int64 `json:"available_disk_mb"` - RequiredMemoryMB int `json:"required_memory_mb"` - RequiredCPUs int `json:"required_cpus"` - NeedsCPUApproval bool `json:"needs_cpu_approval"` - NeedsMemoryApproval bool `json:"needs_memory_approval"` - Warnings []string `json:"warnings,omitempty"` - Errors []string `json:"errors,omitempty"` -} diff --git a/fluid/internal/proxmox/client_test.go b/fluid/internal/proxmox/client_test.go deleted file mode 100644 index 8242930b..00000000 --- a/fluid/internal/proxmox/client_test.go +++ /dev/null @@ -1,1162 +0,0 @@ -package proxmox - -import ( - "context" - "encoding/json" - "io" - "log/slog" - "net/http" - "net/http/httptest" - "strings" - "sync/atomic" - "testing" - "time" -) - -// newTestServer creates a mock Proxmox API server and returns a Client pointed at it. -func newTestServer(t *testing.T, handler http.HandlerFunc) (*Client, *httptest.Server) { - t.Helper() - server := httptest.NewServer(handler) - cfg := Config{ - Host: server.URL, - TokenID: "test@pam!test", - Secret: "test-secret", - Node: "pve1", - VMIDStart: 9000, - VMIDEnd: 9999, - } - client := NewClient(cfg, nil) - return client, server -} - -// envelope wraps data in Proxmox API response format. -func envelope(data any) []byte { - resp := struct { - Data any `json:"data"` - }{Data: data} - b, _ := json.Marshal(resp) - return b -} - -// --- Auth & Headers --- - -func TestAuthHeader(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - auth := r.Header.Get("Authorization") - if auth != "PVEAPIToken=test@pam!test=test-secret" { - t.Errorf("unexpected auth header: %s", auth) - } - _, _ = w.Write(envelope([]VMListEntry{})) - }) - defer server.Close() - _, _ = client.ListVMs(context.Background()) -} - -func TestContentTypeSetForPOST(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - ct := r.Header.Get("Content-Type") - if r.Method == http.MethodPost { - if ct != "application/x-www-form-urlencoded" { - t.Errorf("expected form content-type for POST with body, got %q", ct) - } - } - _, _ = w.Write(envelope("UPID:test")) - }) - defer server.Close() - // CloneVM sends a POST with url.Values body, so Content-Type should be set - _, _ = client.CloneVM(context.Background(), 100, 200, "clone", true) -} - -func TestContentTypeNotSetForGET(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - ct := r.Header.Get("Content-Type") - if r.Method == http.MethodGet && ct != "" { - t.Errorf("expected no content-type for GET, got %q", ct) - } - _, _ = w.Write(envelope([]VMListEntry{})) - }) - defer server.Close() - _, _ = client.ListVMs(context.Background()) -} - -// --- ListVMs --- - -func TestListVMs(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/api2/json/nodes/pve1/qemu" { - t.Errorf("unexpected path: %s", r.URL.Path) - } - if r.Method != http.MethodGet { - t.Errorf("unexpected method: %s", r.Method) - } - vms := []VMListEntry{ - {VMID: 100, Name: "ubuntu-base", Status: "stopped", Template: 1, MaxMem: 4294967296}, - {VMID: 101, Name: "sandbox-1", Status: "running", CPU: 0.15, Mem: 1073741824}, - } - _, _ = w.Write(envelope(vms)) - }) - defer server.Close() - - vms, err := client.ListVMs(context.Background()) - if err != nil { - t.Fatalf("ListVMs: %v", err) - } - if len(vms) != 2 { - t.Fatalf("expected 2 VMs, got %d", len(vms)) - } - if vms[0].Name != "ubuntu-base" { - t.Errorf("expected ubuntu-base, got %s", vms[0].Name) - } - if vms[0].Template != 1 { - t.Errorf("expected template=1") - } - if vms[0].MaxMem != 4294967296 { - t.Errorf("expected maxmem 4294967296, got %d", vms[0].MaxMem) - } - if vms[1].VMID != 101 { - t.Errorf("expected VMID 101, got %d", vms[1].VMID) - } - if vms[1].Status != "running" { - t.Errorf("expected running, got %s", vms[1].Status) - } -} - -func TestListVMs_Empty(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{})) - }) - defer server.Close() - - vms, err := client.ListVMs(context.Background()) - if err != nil { - t.Fatalf("ListVMs: %v", err) - } - if len(vms) != 0 { - t.Errorf("expected 0 VMs, got %d", len(vms)) - } -} - -// --- GetVMStatus --- - -func TestGetVMStatus(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/api2/json/nodes/pve1/qemu/100/status/current" { - t.Errorf("unexpected path: %s", r.URL.Path) - } - status := VMStatus{ - VMID: 100, - Name: "ubuntu-base", - Status: "running", - QMPStatus: "running", - CPU: 0.15, - Mem: 1073741824, - MaxMem: 4294967296, - MaxDisk: 10737418240, - Uptime: 3600, - PID: 12345, - } - _, _ = w.Write(envelope(status)) - }) - defer server.Close() - - status, err := client.GetVMStatus(context.Background(), 100) - if err != nil { - t.Fatalf("GetVMStatus: %v", err) - } - if status.Status != "running" { - t.Errorf("expected running, got %s", status.Status) - } - if status.VMID != 100 { - t.Errorf("expected VMID 100, got %d", status.VMID) - } - if status.QMPStatus != "running" { - t.Errorf("expected qmpstatus running, got %s", status.QMPStatus) - } - if status.PID != 12345 { - t.Errorf("expected PID 12345, got %d", status.PID) - } - if status.MaxDisk != 10737418240 { - t.Errorf("expected maxdisk, got %d", status.MaxDisk) - } -} - -func TestGetVMStatus_Stopped(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(VMStatus{VMID: 200, Status: "stopped", Template: 1})) - }) - defer server.Close() - - status, err := client.GetVMStatus(context.Background(), 200) - if err != nil { - t.Fatalf("GetVMStatus: %v", err) - } - if status.Status != "stopped" { - t.Errorf("expected stopped, got %s", status.Status) - } - if status.Template != 1 { - t.Errorf("expected template=1") - } -} - -func TestGetVMStatus_Locked(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(VMStatus{VMID: 300, Status: "running", Lock: "clone"})) - }) - defer server.Close() - - status, err := client.GetVMStatus(context.Background(), 300) - if err != nil { - t.Fatalf("GetVMStatus: %v", err) - } - if status.Lock != "clone" { - t.Errorf("expected lock=clone, got %s", status.Lock) - } -} - -// --- GetVMConfig --- - -func TestGetVMConfig(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/api2/json/nodes/pve1/qemu/100/config" { - t.Errorf("unexpected path: %s", r.URL.Path) - } - cfg := VMConfig{ - Name: "ubuntu-base", - Memory: 4096, - Cores: 2, - Sockets: 1, - CPU: "host", - Net0: "virtio=AA:BB:CC:DD:EE:FF,bridge=vmbr0", - Agent: "1", - IPConfig0: "ip=dhcp", - CIUser: "ubuntu", - Boot: "order=scsi0", - } - _, _ = w.Write(envelope(cfg)) - }) - defer server.Close() - - cfg, err := client.GetVMConfig(context.Background(), 100) - if err != nil { - t.Fatalf("GetVMConfig: %v", err) - } - if cfg.Cores != 2 { - t.Errorf("expected 2 cores, got %d", cfg.Cores) - } - if cfg.Agent != "1" { - t.Errorf("expected agent=1, got %s", cfg.Agent) - } - if cfg.Memory != 4096 { - t.Errorf("expected memory 4096, got %d", cfg.Memory) - } - if cfg.CIUser != "ubuntu" { - t.Errorf("expected ciuser=ubuntu, got %s", cfg.CIUser) - } - if cfg.IPConfig0 != "ip=dhcp" { - t.Errorf("expected ipconfig0=ip=dhcp, got %s", cfg.IPConfig0) - } - if cfg.CPU != "host" { - t.Errorf("expected cpu=host, got %s", cfg.CPU) - } -} - -func TestGetVMConfig_Minimal(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(VMConfig{Name: "minimal", Memory: 512, Cores: 1})) - }) - defer server.Close() - - cfg, err := client.GetVMConfig(context.Background(), 100) - if err != nil { - t.Fatalf("GetVMConfig: %v", err) - } - if cfg.Net0 != "" { - t.Errorf("expected empty net0, got %s", cfg.Net0) - } - if cfg.Agent != "" { - t.Errorf("expected empty agent, got %s", cfg.Agent) - } -} - -// --- CloneVM --- - -func TestCloneVM_Full(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - t.Errorf("expected POST, got %s", r.Method) - } - if r.URL.Path != "/api2/json/nodes/pve1/qemu/100/clone" { - t.Errorf("unexpected path: %s", r.URL.Path) - } - body, _ := io.ReadAll(r.Body) - bodyStr := string(body) - if !strings.Contains(bodyStr, "newid=9001") { - t.Errorf("expected newid=9001 in body, got: %s", bodyStr) - } - if !strings.Contains(bodyStr, "name=sandbox-1") { - t.Errorf("expected name=sandbox-1 in body, got: %s", bodyStr) - } - if !strings.Contains(bodyStr, "full=1") { - t.Errorf("expected full=1 in body, got: %s", bodyStr) - } - _, _ = w.Write(envelope("UPID:pve1:00001234:00000000:12345678:qmclone:100:root@pam:")) - }) - defer server.Close() - - upid, err := client.CloneVM(context.Background(), 100, 9001, "sandbox-1", true) - if err != nil { - t.Fatalf("CloneVM: %v", err) - } - if !strings.Contains(upid, "qmclone") { - t.Errorf("expected UPID with qmclone, got %s", upid) - } -} - -func TestCloneVM_Linked(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - body, _ := io.ReadAll(r.Body) - bodyStr := string(body) - if strings.Contains(bodyStr, "full=") { - t.Errorf("linked clone should not have full param, got: %s", bodyStr) - } - _, _ = w.Write(envelope("UPID:pve1:linked")) - }) - defer server.Close() - - upid, err := client.CloneVM(context.Background(), 100, 9001, "sandbox-1", false) - if err != nil { - t.Fatalf("CloneVM linked: %v", err) - } - if upid == "" { - t.Error("expected non-empty UPID") - } -} - -// --- SetVMConfig --- - -func TestSetVMConfig(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPut { - t.Errorf("expected PUT, got %s", r.Method) - } - if r.URL.Path != "/api2/json/nodes/pve1/qemu/100/config" { - t.Errorf("unexpected path: %s", r.URL.Path) - } - body, _ := io.ReadAll(r.Body) - bodyStr := string(body) - if !strings.Contains(bodyStr, "cores=4") { - t.Errorf("expected cores=4 in body, got: %s", bodyStr) - } - if !strings.Contains(bodyStr, "memory=8192") { - t.Errorf("expected memory=8192 in body, got: %s", bodyStr) - } - _, _ = w.Write(envelope(nil)) - }) - defer server.Close() - - params := make(map[string][]string) - params["cores"] = []string{"4"} - params["memory"] = []string{"8192"} - err := client.SetVMConfig(context.Background(), 100, params) - if err != nil { - t.Fatalf("SetVMConfig: %v", err) - } -} - -// --- StartVM --- - -func TestStartVM(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/api2/json/nodes/pve1/qemu/100/status/start" { - t.Errorf("unexpected path: %s", r.URL.Path) - } - if r.Method != http.MethodPost { - t.Errorf("expected POST, got %s", r.Method) - } - _, _ = w.Write(envelope("UPID:pve1:00001234:start")) - }) - defer server.Close() - - upid, err := client.StartVM(context.Background(), 100) - if err != nil { - t.Fatalf("StartVM: %v", err) - } - if upid == "" { - t.Error("expected non-empty UPID") - } -} - -// --- StopVM --- - -func TestStopVM(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/api2/json/nodes/pve1/qemu/100/status/stop" { - t.Errorf("unexpected path: %s", r.URL.Path) - } - _, _ = w.Write(envelope("UPID:pve1:00001234:stop")) - }) - defer server.Close() - - upid, err := client.StopVM(context.Background(), 100) - if err != nil { - t.Fatalf("StopVM: %v", err) - } - if upid == "" { - t.Error("expected non-empty UPID") - } -} - -// --- ShutdownVM --- - -func TestShutdownVM(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/api2/json/nodes/pve1/qemu/100/status/shutdown" { - t.Errorf("unexpected path: %s", r.URL.Path) - } - if r.Method != http.MethodPost { - t.Errorf("expected POST, got %s", r.Method) - } - _, _ = w.Write(envelope("UPID:pve1:00001234:shutdown")) - }) - defer server.Close() - - upid, err := client.ShutdownVM(context.Background(), 100) - if err != nil { - t.Fatalf("ShutdownVM: %v", err) - } - if upid == "" { - t.Error("expected non-empty UPID") - } -} - -// --- DeleteVM --- - -func TestDeleteVM(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodDelete { - t.Errorf("expected DELETE, got %s", r.Method) - } - // Verify purge and destroy-unreferenced-disks params in URL - if !strings.Contains(r.URL.RawQuery, "purge=1") { - t.Errorf("expected purge=1 in query, got %s", r.URL.RawQuery) - } - if !strings.Contains(r.URL.RawQuery, "destroy-unreferenced-disks=1") { - t.Errorf("expected destroy-unreferenced-disks=1, got %s", r.URL.RawQuery) - } - _, _ = w.Write(envelope("UPID:pve1:00001234:delete")) - }) - defer server.Close() - - upid, err := client.DeleteVM(context.Background(), 100) - if err != nil { - t.Fatalf("DeleteVM: %v", err) - } - if upid == "" { - t.Error("expected non-empty UPID") - } -} - -// --- GetTaskStatus --- - -func TestGetTaskStatus(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - status := TaskStatus{ - Status: "stopped", - ExitStatus: "OK", - Type: "qmclone", - Node: "pve1", - PID: 12345, - StartTime: 1700000000, - EndTime: 1700000060, - } - _, _ = w.Write(envelope(status)) - }) - defer server.Close() - - status, err := client.GetTaskStatus(context.Background(), "UPID:pve1:00001234:test") - if err != nil { - t.Fatalf("GetTaskStatus: %v", err) - } - if status.Status != "stopped" { - t.Errorf("expected stopped, got %s", status.Status) - } - if status.ExitStatus != "OK" { - t.Errorf("expected OK, got %s", status.ExitStatus) - } - if status.Type != "qmclone" { - t.Errorf("expected qmclone, got %s", status.Type) - } - if status.PID != 12345 { - t.Errorf("expected PID 12345, got %d", status.PID) - } -} - -func TestGetTaskStatus_Running(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(TaskStatus{Status: "running", Type: "qmstart"})) - }) - defer server.Close() - - status, err := client.GetTaskStatus(context.Background(), "UPID:pve1:running") - if err != nil { - t.Fatalf("GetTaskStatus: %v", err) - } - if status.Status != "running" { - t.Errorf("expected running, got %s", status.Status) - } -} - -// --- WaitForTask --- - -func TestWaitForTask_ImmediateSuccess(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(TaskStatus{Status: "stopped", ExitStatus: "OK"})) - }) - defer server.Close() - - err := client.WaitForTask(context.Background(), "UPID:pve1:test") - if err != nil { - t.Fatalf("WaitForTask: %v", err) - } -} - -func TestWaitForTask_EmptyUPID(t *testing.T) { - // Should return nil immediately without making any API calls - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - t.Error("should not make API call for empty UPID") - }) - defer server.Close() - - err := client.WaitForTask(context.Background(), "") - if err != nil { - t.Fatalf("WaitForTask empty UPID: %v", err) - } -} - -func TestWaitForTask_TaskFailure(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(TaskStatus{Status: "stopped", ExitStatus: "command 'qm clone' failed: storage error"})) - }) - defer server.Close() - - err := client.WaitForTask(context.Background(), "UPID:pve1:fail") - if err == nil { - t.Fatal("expected error for failed task") - } - if !strings.Contains(err.Error(), "task failed") { - t.Errorf("expected 'task failed' in error, got: %s", err.Error()) - } - if !strings.Contains(err.Error(), "storage error") { - t.Errorf("expected exit status in error, got: %s", err.Error()) - } -} - -func TestWaitForTask_ContextCancelled(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(TaskStatus{Status: "running"})) - }) - defer server.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - - err := client.WaitForTask(ctx, "UPID:pve1:slow") - if err == nil { - t.Fatal("expected error for cancelled context") - } -} - -func TestWaitForTask_PollsUntilDone(t *testing.T) { - var callCount int32 - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - n := atomic.AddInt32(&callCount, 1) - if n < 3 { - _, _ = w.Write(envelope(TaskStatus{Status: "running"})) - } else { - _, _ = w.Write(envelope(TaskStatus{Status: "stopped", ExitStatus: "OK"})) - } - }) - defer server.Close() - - err := client.WaitForTask(context.Background(), "UPID:pve1:poll") - if err != nil { - t.Fatalf("WaitForTask: %v", err) - } - if atomic.LoadInt32(&callCount) < 3 { - t.Errorf("expected at least 3 polls, got %d", callCount) - } -} - -// --- CreateSnapshot --- - -func TestCreateSnapshot_WithDescription(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - t.Errorf("expected POST, got %s", r.Method) - } - body, _ := io.ReadAll(r.Body) - bodyStr := string(body) - if !strings.Contains(bodyStr, "snapname=snap1") { - t.Errorf("expected snapname=snap1, got: %s", bodyStr) - } - if !strings.Contains(bodyStr, "description=test+snapshot") { - t.Errorf("expected description in body, got: %s", bodyStr) - } - _, _ = w.Write(envelope("UPID:pve1:snapshot")) - }) - defer server.Close() - - upid, err := client.CreateSnapshot(context.Background(), 100, "snap1", "test snapshot") - if err != nil { - t.Fatalf("CreateSnapshot: %v", err) - } - if upid == "" { - t.Error("expected non-empty UPID") - } -} - -func TestCreateSnapshot_WithoutDescription(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - body, _ := io.ReadAll(r.Body) - bodyStr := string(body) - if strings.Contains(bodyStr, "description") { - t.Errorf("expected no description, got: %s", bodyStr) - } - _, _ = w.Write(envelope("UPID:pve1:snapshot")) - }) - defer server.Close() - - _, err := client.CreateSnapshot(context.Background(), 100, "snap1", "") - if err != nil { - t.Fatalf("CreateSnapshot: %v", err) - } -} - -func TestCreateSnapshot_SyncReturn(t *testing.T) { - // Proxmox sometimes returns null data for synchronous snapshot completion - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(nil)) - }) - defer server.Close() - - upid, err := client.CreateSnapshot(context.Background(), 100, "snap1", "") - if err != nil { - t.Fatalf("CreateSnapshot sync: %v", err) - } - // Should return empty string without error for sync completion - if upid != "" { - t.Errorf("expected empty UPID for sync snapshot, got %s", upid) - } -} - -// --- GetGuestAgentInterfaces --- - -func TestGetGuestAgentInterfaces_WrappedResult(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/api2/json/nodes/pve1/qemu/100/agent/network-get-interfaces" { - t.Errorf("unexpected path: %s", r.URL.Path) - } - ifaces := struct { - Result []NetworkInterface `json:"result"` - }{ - Result: []NetworkInterface{ - { - Name: "eth0", - HardwareAddress: "AA:BB:CC:DD:EE:FF", - IPAddresses: []GuestIPAddress{ - {IPAddressType: "ipv4", IPAddress: "192.168.1.100", Prefix: 24}, - {IPAddressType: "ipv6", IPAddress: "fe80::1", Prefix: 64}, - }, - }, - { - Name: "lo", - HardwareAddress: "00:00:00:00:00:00", - IPAddresses: []GuestIPAddress{ - {IPAddressType: "ipv4", IPAddress: "127.0.0.1", Prefix: 8}, - }, - }, - }, - } - _, _ = w.Write(envelope(ifaces)) - }) - defer server.Close() - - ifaces, err := client.GetGuestAgentInterfaces(context.Background(), 100) - if err != nil { - t.Fatalf("GetGuestAgentInterfaces: %v", err) - } - if len(ifaces) != 2 { - t.Fatalf("expected 2 interfaces, got %d", len(ifaces)) - } - if ifaces[0].Name != "eth0" { - t.Errorf("expected eth0, got %s", ifaces[0].Name) - } - if len(ifaces[0].IPAddresses) != 2 { - t.Errorf("expected 2 IPs on eth0, got %d", len(ifaces[0].IPAddresses)) - } - if ifaces[0].IPAddresses[1].IPAddressType != "ipv6" { - t.Errorf("expected ipv6, got %s", ifaces[0].IPAddresses[1].IPAddressType) - } -} - -func TestGetGuestAgentInterfaces_DirectArray(t *testing.T) { - // Some Proxmox versions may return the array directly without wrapping - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - ifaces := []NetworkInterface{ - { - Name: "ens18", - HardwareAddress: "11:22:33:44:55:66", - IPAddresses: []GuestIPAddress{ - {IPAddressType: "ipv4", IPAddress: "10.0.0.5", Prefix: 24}, - }, - }, - } - _, _ = w.Write(envelope(ifaces)) - }) - defer server.Close() - - ifaces, err := client.GetGuestAgentInterfaces(context.Background(), 100) - if err != nil { - t.Fatalf("GetGuestAgentInterfaces direct: %v", err) - } - if len(ifaces) != 1 { - t.Fatalf("expected 1 interface, got %d", len(ifaces)) - } - if ifaces[0].Name != "ens18" { - t.Errorf("expected ens18, got %s", ifaces[0].Name) - } -} - -func TestGetGuestAgentInterfaces_Empty(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(struct { - Result []NetworkInterface `json:"result"` - }{})) - }) - defer server.Close() - - ifaces, err := client.GetGuestAgentInterfaces(context.Background(), 100) - if err != nil { - t.Fatalf("GetGuestAgentInterfaces: %v", err) - } - if len(ifaces) != 0 { - t.Errorf("expected 0 interfaces, got %d", len(ifaces)) - } -} - -// --- GetNodeStatus --- - -func TestGetNodeStatus(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/api2/json/nodes/pve1/status" { - t.Errorf("unexpected path: %s", r.URL.Path) - } - status := NodeStatus{ - CPU: 0.25, - MaxCPU: 8, - Memory: MemoryStatus{ - Total: 16 * 1024 * 1024 * 1024, - Used: 8 * 1024 * 1024 * 1024, - Free: 8 * 1024 * 1024 * 1024, - }, - RootFS: DiskStatus{ - Total: 100 * 1024 * 1024 * 1024, - Used: 30 * 1024 * 1024 * 1024, - Available: 70 * 1024 * 1024 * 1024, - }, - Uptime: 86400, - KVersion: "6.1.0-amd64", - } - _, _ = w.Write(envelope(status)) - }) - defer server.Close() - - status, err := client.GetNodeStatus(context.Background()) - if err != nil { - t.Fatalf("GetNodeStatus: %v", err) - } - if status.MaxCPU != 8 { - t.Errorf("expected 8 CPUs, got %d", status.MaxCPU) - } - if status.Uptime != 86400 { - t.Errorf("expected uptime 86400, got %d", status.Uptime) - } - if status.KVersion != "6.1.0-amd64" { - t.Errorf("expected kversion, got %s", status.KVersion) - } - if status.Memory.Free != 8*1024*1024*1024 { - t.Errorf("unexpected free memory: %d", status.Memory.Free) - } - if status.RootFS.Available != 70*1024*1024*1024 { - t.Errorf("unexpected available disk: %d", status.RootFS.Available) - } -} - -// --- NextVMID --- - -func TestNextVMID_Gap(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - vms := []VMListEntry{ - {VMID: 9000, Name: "vm1"}, - {VMID: 9001, Name: "vm2"}, - {VMID: 9003, Name: "vm3"}, - } - _, _ = w.Write(envelope(vms)) - }) - defer server.Close() - - id, err := client.NextVMID(context.Background(), 9000, 9999) - if err != nil { - t.Fatalf("NextVMID: %v", err) - } - if id != 9002 { - t.Errorf("expected 9002, got %d", id) - } -} - -func TestNextVMID_FirstAvailable(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{})) - }) - defer server.Close() - - id, err := client.NextVMID(context.Background(), 9000, 9999) - if err != nil { - t.Fatalf("NextVMID: %v", err) - } - if id != 9000 { - t.Errorf("expected 9000, got %d", id) - } -} - -func TestNextVMID_AllUsed(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - vms := []VMListEntry{ - {VMID: 9000}, {VMID: 9001}, {VMID: 9002}, - } - _, _ = w.Write(envelope(vms)) - }) - defer server.Close() - - _, err := client.NextVMID(context.Background(), 9000, 9002) - if err == nil { - t.Error("expected error when all VMIDs used") - } - if !strings.Contains(err.Error(), "no available VMID") { - t.Errorf("expected 'no available VMID', got: %s", err.Error()) - } -} - -func TestNextVMID_OutOfRangeVMsIgnored(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - vms := []VMListEntry{ - {VMID: 100, Name: "outside-range"}, - {VMID: 200, Name: "also-outside"}, - } - _, _ = w.Write(envelope(vms)) - }) - defer server.Close() - - id, err := client.NextVMID(context.Background(), 9000, 9999) - if err != nil { - t.Fatalf("NextVMID: %v", err) - } - if id != 9000 { - t.Errorf("expected 9000 (out-of-range VMs should not affect range), got %d", id) - } -} - -// --- ResizeVM --- - -func TestResizeVM(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPut { - t.Errorf("expected PUT, got %s", r.Method) - } - body, _ := io.ReadAll(r.Body) - bodyStr := string(body) - if !strings.Contains(bodyStr, "cores=4") { - t.Errorf("expected cores=4, got: %s", bodyStr) - } - if !strings.Contains(bodyStr, "memory=8192") { - t.Errorf("expected memory=8192, got: %s", bodyStr) - } - _, _ = w.Write(envelope(nil)) - }) - defer server.Close() - - err := client.ResizeVM(context.Background(), 100, 4, 8192) - if err != nil { - t.Fatalf("ResizeVM: %v", err) - } -} - -// --- HTTP Error handling --- - -func TestAPIError_Forbidden(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusForbidden) - _, _ = w.Write([]byte(`{"errors":{"token":"invalid"}}`)) - }) - defer server.Close() - - _, err := client.ListVMs(context.Background()) - if err == nil { - t.Fatal("expected error for 403 response") - } - if !strings.Contains(err.Error(), "403") { - t.Errorf("expected 403 in error, got: %s", err.Error()) - } -} - -func TestAPIError_NotFound(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) - _, _ = w.Write([]byte(`{"errors":{"vmid":"does not exist"}}`)) - }) - defer server.Close() - - _, err := client.GetVMStatus(context.Background(), 999) - if err == nil { - t.Fatal("expected error for 404") - } - if !strings.Contains(err.Error(), "404") { - t.Errorf("expected 404 in error, got: %s", err.Error()) - } -} - -func TestAPIError_InternalServerError(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - _, _ = w.Write([]byte("Internal Server Error")) - }) - defer server.Close() - - _, err := client.GetNodeStatus(context.Background()) - if err == nil { - t.Fatal("expected error for 500") - } - if !strings.Contains(err.Error(), "500") { - t.Errorf("expected 500 in error, got: %s", err.Error()) - } -} - -func TestAPIError_Unauthorized(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusUnauthorized) - _, _ = w.Write([]byte(`{"data":null}`)) - }) - defer server.Close() - - _, err := client.ListVMs(context.Background()) - if err == nil { - t.Fatal("expected error for 401") - } - if !strings.Contains(err.Error(), "401") { - t.Errorf("expected 401 in error, got: %s", err.Error()) - } -} - -func TestAPIError_InvalidJSON(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write([]byte("not json at all")) - }) - defer server.Close() - - _, err := client.ListVMs(context.Background()) - if err == nil { - t.Fatal("expected error for invalid JSON") - } - if !strings.Contains(err.Error(), "unmarshal") { - t.Errorf("expected unmarshal error, got: %s", err.Error()) - } -} - -func TestAPIError_ConnectionRefused(t *testing.T) { - cfg := Config{ - Host: "http://127.0.0.1:1", // port 1 should refuse connections - TokenID: "test@pam!test", - Secret: "secret", - Node: "pve1", - VMIDStart: 9000, - VMIDEnd: 9999, - } - client := NewClient(cfg, nil) - - _, err := client.ListVMs(context.Background()) - if err == nil { - t.Fatal("expected error for connection refused") - } -} - -func TestAPIError_ContextCancelled(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - time.Sleep(5 * time.Second) - _, _ = w.Write(envelope([]VMListEntry{})) - }) - defer server.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) - defer cancel() - - _, err := client.ListVMs(ctx) - if err == nil { - t.Fatal("expected error for cancelled context") - } -} - -// --- NewClient --- - -func TestNewClient_TrimsTrailingSlash(t *testing.T) { - cfg := Config{ - Host: "https://pve.example.com:8006/", - TokenID: "test", - Secret: "secret", - Node: "pve1", - } - client := NewClient(cfg, nil) - if strings.HasSuffix(client.baseURL, "/") { - t.Errorf("expected trailing slash trimmed, got %s", client.baseURL) - } -} - -func TestNewClient_NilLogger(t *testing.T) { - cfg := Config{ - Host: "https://pve.example.com:8006", - TokenID: "test", - Secret: "secret", - Node: "pve1", - } - client := NewClient(cfg, nil) - if client.logger == nil { - t.Error("expected default logger, got nil") - } -} - -func TestNewClient_CustomTimeout(t *testing.T) { - // Create a slow server that takes longer than the custom timeout - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - time.Sleep(500 * time.Millisecond) - _, _ = w.Write(envelope([]VMListEntry{})) - })) - defer server.Close() - - cfg := Config{ - Host: server.URL, - TokenID: "test@pam!test", - Secret: "test-secret", - Node: "pve1", - Timeout: 100 * time.Millisecond, // Shorter than server delay - } - client := NewClient(cfg, nil) - - // Request should fail due to client timeout - _, err := client.ListVMs(context.Background()) - if err == nil { - t.Fatal("expected timeout error with custom short timeout") - } - - // Now verify a longer timeout works - cfg.Timeout = 5 * time.Second - client = NewClient(cfg, nil) - vms, err := client.ListVMs(context.Background()) - if err != nil { - t.Fatalf("expected success with longer timeout, got: %v", err) - } - if len(vms) != 0 { - t.Errorf("expected 0 VMs, got %d", len(vms)) - } -} - -func TestNewClient_InsecureTLSWarning(t *testing.T) { - var records []slog.Record - handler := &captureHandler{records: &records} - logger := slog.New(handler) - - cfg := Config{ - Host: "https://pve.example.com:8006", - TokenID: "test", - Secret: "secret", - Node: "pve1", - VerifySSL: false, - } - _ = NewClient(cfg, logger) - - found := false - for _, r := range records { - if r.Level == slog.LevelWarn && strings.Contains(r.Message, "TLS certificate verification is disabled") { - found = true - break - } - } - if !found { - t.Error("expected TLS warning log when VerifySSL is false") - } -} - -// captureHandler is a slog.Handler that captures log records for testing. -type captureHandler struct { - records *[]slog.Record -} - -func (h *captureHandler) Enabled(_ context.Context, _ slog.Level) bool { return true } - -func (h *captureHandler) Handle(_ context.Context, r slog.Record) error { - *h.records = append(*h.records, r) - return nil -} - -func (h *captureHandler) WithAttrs(_ []slog.Attr) slog.Handler { return h } - -func (h *captureHandler) WithGroup(_ string) slog.Handler { return h } - -// --- HTTP Retry --- - -func TestHTTPRetry_TransientError(t *testing.T) { - var requestCount int32 - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - n := atomic.AddInt32(&requestCount, 1) - if n < 3 { - w.WriteHeader(http.StatusServiceUnavailable) - _, _ = w.Write([]byte("Service Unavailable")) - return - } - _, _ = w.Write(envelope([]VMListEntry{{VMID: 100, Name: "test-vm"}})) - }) - defer server.Close() - - vms, err := client.ListVMs(context.Background()) - if err != nil { - t.Fatalf("expected success after retries, got: %v", err) - } - if len(vms) != 1 { - t.Fatalf("expected 1 VM, got %d", len(vms)) - } - if vms[0].Name != "test-vm" { - t.Errorf("expected test-vm, got %s", vms[0].Name) - } - if atomic.LoadInt32(&requestCount) != 3 { - t.Errorf("expected 3 requests total, got %d", requestCount) - } -} - -func TestHTTPRetry_NoRetryOn4xx(t *testing.T) { - var requestCount int32 - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - atomic.AddInt32(&requestCount, 1) - w.WriteHeader(http.StatusNotFound) - _, _ = w.Write([]byte(`{"errors":{"vmid":"does not exist"}}`)) - }) - defer server.Close() - - _, err := client.GetVMStatus(context.Background(), 999) - if err == nil { - t.Fatal("expected error for 404") - } - if !strings.Contains(err.Error(), "404") { - t.Errorf("expected 404 in error, got: %s", err.Error()) - } - if atomic.LoadInt32(&requestCount) != 1 { - t.Errorf("expected exactly 1 request (no retries for 4xx), got %d", requestCount) - } -} diff --git a/fluid/internal/proxmox/config.go b/fluid/internal/proxmox/config.go deleted file mode 100644 index 53fe4d8f..00000000 --- a/fluid/internal/proxmox/config.go +++ /dev/null @@ -1,56 +0,0 @@ -package proxmox - -import ( - "fmt" - "time" -) - -// Config holds all settings needed to connect to a Proxmox VE cluster. -type Config struct { - Host string // Base URL, e.g., "https://pve.example.com:8006" - TokenID string // API token ID, e.g., "root@pam!fluid" - Secret string // API token secret - Node string // Target node name, e.g., "pve1" - VerifySSL bool // Verify TLS certificates (set to false only for self-signed certs; disabling exposes connections to MITM attacks) - Storage string // Storage for VM disks, e.g., "local-lvm" - Bridge string // Network bridge, e.g., "vmbr0" - CloneMode string // "full" or "linked" - VMIDStart int // Start of VMID range for sandboxes - VMIDEnd int // End of VMID range for sandboxes - Timeout time.Duration `yaml:"timeout"` // HTTP client timeout -} - -// Validate checks that required config fields are set. -func (c *Config) Validate() error { - if c.Host == "" { - return fmt.Errorf("proxmox host is required") - } - if c.TokenID == "" { - return fmt.Errorf("proxmox token_id is required") - } - if c.Secret == "" { - return fmt.Errorf("proxmox secret is required") - } - if c.Node == "" { - return fmt.Errorf("proxmox node is required") - } - if c.VMIDStart <= 0 { - c.VMIDStart = 9000 - } - if c.VMIDEnd <= 0 { - c.VMIDEnd = 9999 - } - if c.VMIDEnd <= c.VMIDStart { - return fmt.Errorf("proxmox vmid_end (%d) must be greater than vmid_start (%d)", c.VMIDEnd, c.VMIDStart) - } - if c.Timeout == 0 { - c.Timeout = 5 * time.Minute - } - if c.CloneMode == "" { - c.CloneMode = "full" - } - if c.CloneMode != "full" && c.CloneMode != "linked" { - return fmt.Errorf("proxmox clone_mode must be 'full' or 'linked', got %q", c.CloneMode) - } - return nil -} diff --git a/fluid/internal/proxmox/manager.go b/fluid/internal/proxmox/manager.go deleted file mode 100644 index 9ddae0ee..00000000 --- a/fluid/internal/proxmox/manager.go +++ /dev/null @@ -1,404 +0,0 @@ -package proxmox - -import ( - "context" - "fmt" - "log/slog" - "net" - "net/url" - "strings" - "sync" - "time" - - "github.com/aspectrr/fluid.sh/fluid/internal/provider" -) - -// ProxmoxManager implements provider.Manager for Proxmox VE. -type ProxmoxManager struct { - client *Client - cfg Config - resolver *VMResolver - logger *slog.Logger - vmidMu sync.Mutex -} - -// NewProxmoxManager creates a new Proxmox provider manager. -func NewProxmoxManager(cfg Config, logger *slog.Logger) (*ProxmoxManager, error) { - if err := cfg.Validate(); err != nil { - return nil, fmt.Errorf("invalid proxmox config: %w", err) - } - if logger == nil { - logger = slog.Default() - } - - client := NewClient(cfg, logger) - return &ProxmoxManager{ - client: client, - cfg: cfg, - resolver: NewVMResolver(client), - logger: logger, - }, nil -} - -// CloneVM clones a VM from a base image. On Proxmox, base images are templates -// (which are themselves VMs), so CloneVM and CloneFromVM are semantically identical. -// This delegates to CloneFromVM, treating baseImage as a source VM/template name. -func (m *ProxmoxManager) CloneVM(ctx context.Context, baseImage, newVMName string, cpu, memoryMB int, network string) (provider.VMRef, error) { - return m.CloneFromVM(ctx, baseImage, newVMName, cpu, memoryMB, network) -} - -// CloneFromVM creates a clone of an existing VM. -// It resolves the source VM name to a VMID, allocates a new VMID, and clones. -// The network parameter sets the Proxmox bridge via the net0 config param -// (format: "virtio,bridge="). If network is empty, falls back to -// Config.Bridge. If both are empty, the template's network config is preserved. -func (m *ProxmoxManager) CloneFromVM(ctx context.Context, sourceVMName, newVMName string, cpu, memoryMB int, network string) (provider.VMRef, error) { - sourceVMID, err := m.resolver.ResolveVMID(ctx, sourceVMName) - if err != nil { - return provider.VMRef{}, fmt.Errorf("resolve source VM %q: %w", sourceVMName, err) - } - - // Lock to serialize VMID allocation + clone so concurrent callers - // cannot receive the same VMID before Proxmox reserves it. - m.vmidMu.Lock() - newVMID, err := m.client.NextVMID(ctx, m.cfg.VMIDStart, m.cfg.VMIDEnd) - if err != nil { - m.vmidMu.Unlock() - return provider.VMRef{}, fmt.Errorf("allocate VMID: %w", err) - } - - full := m.cfg.CloneMode == "full" - m.logger.Info("cloning VM", - "source", sourceVMName, - "source_vmid", sourceVMID, - "new_name", newVMName, - "new_vmid", newVMID, - "full_clone", full, - ) - - upid, err := m.client.CloneVM(ctx, sourceVMID, newVMID, newVMName, full) - m.vmidMu.Unlock() - if err != nil { - return provider.VMRef{}, fmt.Errorf("clone VM: %w", err) - } - - if err := m.client.WaitForTask(ctx, upid); err != nil { - return provider.VMRef{}, fmt.Errorf("wait for clone: %w", err) - } - - // Configure the clone with requested CPU/memory/network - if cpu > 0 || memoryMB > 0 || network != "" || m.cfg.Bridge != "" { - params := url.Values{} - if cpu > 0 { - params.Set("cores", fmt.Sprintf("%d", cpu)) - } - if memoryMB > 0 { - params.Set("memory", fmt.Sprintf("%d", memoryMB)) - } - if network != "" { - params.Set("net0", fmt.Sprintf("virtio,bridge=%s", network)) - } else if m.cfg.Bridge != "" { - params.Set("net0", fmt.Sprintf("virtio,bridge=%s", m.cfg.Bridge)) - } - if err := m.client.SetVMConfig(ctx, newVMID, params); err != nil { - return provider.VMRef{}, fmt.Errorf("set CPU/memory on clone %d: %w", newVMID, err) - } - } - - // Refresh resolver cache to include the new VM - _ = m.resolver.Refresh(ctx) - - return provider.VMRef{ - Name: newVMName, - UUID: fmt.Sprintf("%d", newVMID), - }, nil -} - -// InjectSSHKey sets SSH keys on a VM via cloud-init configuration. -func (m *ProxmoxManager) InjectSSHKey(ctx context.Context, sandboxName, username, publicKey string) error { - vmid, err := m.resolver.ResolveVMID(ctx, sandboxName) - if err != nil { - return fmt.Errorf("resolve VM %q: %w", sandboxName, err) - } - - params := url.Values{} - if username != "" { - params.Set("ciuser", username) - } - // Proxmox requires URL-encoded SSH keys - params.Set("sshkeys", url.QueryEscape(strings.TrimSpace(publicKey))) - - return m.client.SetVMConfig(ctx, vmid, params) -} - -// StartVM boots a VM. -func (m *ProxmoxManager) StartVM(ctx context.Context, vmName string) error { - vmid, err := m.resolver.ResolveVMID(ctx, vmName) - if err != nil { - return fmt.Errorf("resolve VM %q: %w", vmName, err) - } - - upid, err := m.client.StartVM(ctx, vmid) - if err != nil { - return fmt.Errorf("start VM: %w", err) - } - - return m.client.WaitForTask(ctx, upid) -} - -// StopVM gracefully shuts down or force-stops a VM. -func (m *ProxmoxManager) StopVM(ctx context.Context, vmName string, force bool) error { - vmid, err := m.resolver.ResolveVMID(ctx, vmName) - if err != nil { - return fmt.Errorf("resolve VM %q: %w", vmName, err) - } - - var upid string - if force { - upid, err = m.client.StopVM(ctx, vmid) - } else { - upid, err = m.client.ShutdownVM(ctx, vmid) - } - if err != nil { - return fmt.Errorf("stop VM: %w", err) - } - - return m.client.WaitForTask(ctx, upid) -} - -// DestroyVM stops (if running) and deletes a VM and all its resources. -func (m *ProxmoxManager) DestroyVM(ctx context.Context, vmName string) error { - vmid, err := m.resolver.ResolveVMID(ctx, vmName) - if err != nil { - return fmt.Errorf("resolve VM %q: %w", vmName, err) - } - - // Check if running, stop first - status, err := m.client.GetVMStatus(ctx, vmid) - if err != nil { - return fmt.Errorf("get VM status: %w", err) - } - if status.Status == "running" { - m.logger.Info("stopping running VM before destroy", "vm", vmName, "vmid", vmid) - upid, err := m.client.StopVM(ctx, vmid) - if err != nil { - return fmt.Errorf("stop VM before destroy: %w", err) - } - if err := m.client.WaitForTask(ctx, upid); err != nil { - return fmt.Errorf("wait for stop: %w", err) - } - } - - upid, err := m.client.DeleteVM(ctx, vmid) - if err != nil { - return fmt.Errorf("delete VM: %w", err) - } - - if err := m.client.WaitForTask(ctx, upid); err != nil { - return fmt.Errorf("wait for delete: %w", err) - } - - // Refresh resolver cache - _ = m.resolver.Refresh(ctx) - - return nil -} - -// CreateSnapshot creates a snapshot of a VM. -// The external parameter is ignored for Proxmox. -func (m *ProxmoxManager) CreateSnapshot(ctx context.Context, vmName, snapshotName string, external bool) (provider.SnapshotRef, error) { - vmid, err := m.resolver.ResolveVMID(ctx, vmName) - if err != nil { - return provider.SnapshotRef{}, fmt.Errorf("resolve VM %q: %w", vmName, err) - } - - upid, err := m.client.CreateSnapshot(ctx, vmid, snapshotName, "") - if err != nil { - return provider.SnapshotRef{}, fmt.Errorf("create snapshot: %w", err) - } - - if err := m.client.WaitForTask(ctx, upid); err != nil { - return provider.SnapshotRef{}, fmt.Errorf("wait for snapshot: %w", err) - } - - return provider.SnapshotRef{ - Name: snapshotName, - Kind: "INTERNAL", - Ref: fmt.Sprintf("proxmox:%d:%s", vmid, snapshotName), - }, nil -} - -// DiffSnapshot returns a plan describing the snapshots. -// Proxmox does not support native filesystem diff, so this returns notes. -func (m *ProxmoxManager) DiffSnapshot(ctx context.Context, vmName, fromSnapshot, toSnapshot string) (*provider.FSComparePlan, error) { - return &provider.FSComparePlan{ - VMName: vmName, - FromSnapshot: fromSnapshot, - ToSnapshot: toSnapshot, - Notes: []string{ - "Proxmox does not support native snapshot filesystem diff.", - "To compare changes, mount snapshots manually or use QEMU guest agent.", - }, - }, nil -} - -// GetIPAddress retrieves the VM's primary IPv4 address via the QEMU guest agent. -// Polls until an IP is found or the timeout expires. -// Filtering rules: skips loopback interfaces by name ("lo"), loopback IPs -// (127.0.0.0/8 via net.IP.IsLoopback), link-local IPs (169.254.0.0/16 via -// net.IP.IsLinkLocalUnicast), unparseable IPs, and IPv6 addresses. -// Returns the first valid IPv4 address found. -func (m *ProxmoxManager) GetIPAddress(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - vmid, err := m.resolver.ResolveVMID(ctx, vmName) - if err != nil { - return "", "", fmt.Errorf("resolve VM %q: %w", vmName, err) - } - - deadline := time.Now().Add(timeout) - ticker := time.NewTicker(3 * time.Second) - defer ticker.Stop() - - for { - ifaces, err := m.client.GetGuestAgentInterfaces(ctx, vmid) - if err == nil { - for _, iface := range ifaces { - // Skip loopback - if iface.Name == "lo" { - continue - } - for _, addr := range iface.IPAddresses { - if addr.IPAddressType == "ipv4" { - ip := net.ParseIP(addr.IPAddress) - if ip != nil && !ip.IsLoopback() && !ip.IsLinkLocalUnicast() { - return addr.IPAddress, iface.HardwareAddress, nil - } - } - } - } - } - - if time.Now().After(deadline) { - return "", "", fmt.Errorf("timeout waiting for IP address of VM %q", vmName) - } - - select { - case <-ctx.Done(): - return "", "", ctx.Err() - case <-ticker.C: - } - } -} - -// GetVMState returns the current state of a VM as a provider.VMState. -func (m *ProxmoxManager) GetVMState(ctx context.Context, vmName string) (provider.VMState, error) { - vmid, err := m.resolver.ResolveVMID(ctx, vmName) - if err != nil { - return provider.VMStateUnknown, fmt.Errorf("resolve VM %q: %w", vmName, err) - } - - status, err := m.client.GetVMStatus(ctx, vmid) - if err != nil { - return provider.VMStateUnknown, fmt.Errorf("get VM status: %w", err) - } - - switch status.Status { - case "running": - return provider.VMStateRunning, nil - case "stopped": - return provider.VMStateShutOff, nil - case "paused": - return provider.VMStatePaused, nil - default: - return provider.VMStateUnknown, nil - } -} - -// ValidateSourceVM checks that a source VM exists and is suitable for cloning. -func (m *ProxmoxManager) ValidateSourceVM(ctx context.Context, vmName string) (*provider.VMValidationResult, error) { - result := &provider.VMValidationResult{ - VMName: vmName, - Valid: true, - } - - vmid, err := m.resolver.ResolveVMID(ctx, vmName) - if err != nil { - result.Valid = false - result.Errors = append(result.Errors, fmt.Sprintf("VM %q not found: %v", vmName, err)) - return result, nil - } - - status, err := m.client.GetVMStatus(ctx, vmid) - if err != nil { - result.Valid = false - result.Errors = append(result.Errors, fmt.Sprintf("failed to get VM status: %v", err)) - return result, nil - } - - switch status.Status { - case "running": - result.State = provider.VMStateRunning - case "stopped": - result.State = provider.VMStateShutOff - default: - result.State = provider.VMStateUnknown - } - - // Check VM config for network and guest agent - vmCfg, err := m.client.GetVMConfig(ctx, vmid) - if err != nil { - result.Warnings = append(result.Warnings, fmt.Sprintf("could not read VM config: %v", err)) - } else { - if vmCfg.Net0 == "" { - result.HasNetwork = false - result.Warnings = append(result.Warnings, "VM has no network interface (net0)") - } else { - result.HasNetwork = true - } - - if vmCfg.Agent == "" || vmCfg.Agent == "0" { - result.Warnings = append(result.Warnings, "QEMU guest agent not enabled; IP discovery may not work") - } - } - - return result, nil -} - -// CheckHostResources checks that the Proxmox node has sufficient resources. -func (m *ProxmoxManager) CheckHostResources(ctx context.Context, requiredCPUs, requiredMemoryMB int) (*provider.ResourceCheckResult, error) { - nodeStatus, err := m.client.GetNodeStatus(ctx) - if err != nil { - return nil, fmt.Errorf("get node status: %w", err) - } - - totalMemMB := nodeStatus.Memory.Total / (1024 * 1024) - freeMemMB := nodeStatus.Memory.Free / (1024 * 1024) - freeDiskMB := nodeStatus.RootFS.Available / (1024 * 1024) - - result := &provider.ResourceCheckResult{ - Valid: true, - AvailableMemoryMB: freeMemMB, - TotalMemoryMB: totalMemMB, - AvailableCPUs: nodeStatus.MaxCPU, - TotalCPUs: nodeStatus.MaxCPU, - AvailableDiskMB: freeDiskMB, - RequiredMemoryMB: requiredMemoryMB, - RequiredCPUs: requiredCPUs, - } - - // Memory check - if int64(requiredMemoryMB) > freeMemMB { - result.NeedsMemoryApproval = true - result.Warnings = append(result.Warnings, - fmt.Sprintf("requested %d MB memory but only %d MB free", requiredMemoryMB, freeMemMB)) - } - - // CPU check - Proxmox allows overcommit but warn if high - cpuUsagePct := nodeStatus.CPU * 100 - if cpuUsagePct > 80 { - result.NeedsCPUApproval = true - result.Warnings = append(result.Warnings, - fmt.Sprintf("node CPU usage is %.0f%%", cpuUsagePct)) - } - - return result, nil -} diff --git a/fluid/internal/proxmox/manager_test.go b/fluid/internal/proxmox/manager_test.go deleted file mode 100644 index df415ebc..00000000 --- a/fluid/internal/proxmox/manager_test.go +++ /dev/null @@ -1,1897 +0,0 @@ -package proxmox - -import ( - "context" - "fmt" - "net/http" - "net/http/httptest" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/aspectrr/fluid.sh/fluid/internal/provider" -) - -// mockProxmoxAPI creates a mock Proxmox API server that handles common endpoints. -func mockProxmoxAPI(t *testing.T) (*ProxmoxManager, *httptest.Server) { - t.Helper() - - mux := http.NewServeMux() - - // List VMs - mux.HandleFunc("/api2/json/nodes/pve1/qemu", func(w http.ResponseWriter, r *http.Request) { - vms := []VMListEntry{ - {VMID: 100, Name: "ubuntu-template", Status: "stopped", Template: 1}, - {VMID: 101, Name: "sandbox-1", Status: "running"}, - {VMID: 102, Name: "sandbox-paused", Status: "paused"}, - {VMID: 103, Name: "no-net-vm", Status: "stopped"}, - {VMID: 104, Name: "no-agent-vm", Status: "stopped"}, - } - _, _ = w.Write(envelope(vms)) - }) - - // VM status endpoints - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/status/current", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(VMStatus{VMID: 100, Name: "ubuntu-template", Status: "stopped"})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/101/status/current", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(VMStatus{VMID: 101, Name: "sandbox-1", Status: "running"})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/102/status/current", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(VMStatus{VMID: 102, Name: "sandbox-paused", Status: "paused"})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/103/status/current", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(VMStatus{VMID: 103, Name: "no-net-vm", Status: "stopped"})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/104/status/current", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(VMStatus{VMID: 104, Name: "no-agent-vm", Status: "stopped"})) - }) - - // VM config endpoints - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/config", func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodGet { - _, _ = w.Write(envelope(VMConfig{ - Name: "ubuntu-template", - Memory: 4096, - Cores: 2, - Sockets: 1, - Net0: "virtio=AA:BB:CC:DD:EE:FF,bridge=vmbr0", - Agent: "1", - })) - } else { - _, _ = w.Write(envelope(nil)) - } - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/101/config", func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodGet { - _, _ = w.Write(envelope(VMConfig{ - Name: "sandbox-1", - Memory: 2048, - Cores: 1, - Net0: "virtio=11:22:33:44:55:66,bridge=vmbr0", - Agent: "1", - })) - } else { - _, _ = w.Write(envelope(nil)) - } - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/103/config", func(w http.ResponseWriter, r *http.Request) { - // VM with no network - _, _ = w.Write(envelope(VMConfig{Name: "no-net-vm", Memory: 1024, Cores: 1, Agent: "1"})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/104/config", func(w http.ResponseWriter, r *http.Request) { - // VM with no guest agent - _, _ = w.Write(envelope(VMConfig{Name: "no-agent-vm", Memory: 1024, Cores: 1, Net0: "virtio=FF:FF:FF:FF:FF:FF,bridge=vmbr0"})) - }) - - // Clone - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/clone", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope("UPID:pve1:00001234:clone")) - }) - - // Start/Stop/Shutdown - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/status/start", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope("UPID:pve1:start:100")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/101/status/start", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope("UPID:pve1:start:101")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/101/status/stop", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope("UPID:pve1:stop:101")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/status/stop", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope("UPID:pve1:stop:100")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/101/status/shutdown", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope("UPID:pve1:shutdown:101")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/status/shutdown", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope("UPID:pve1:shutdown:100")) - }) - - // Snapshot - mux.HandleFunc("/api2/json/nodes/pve1/qemu/101/snapshot", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope("UPID:pve1:snapshot:101")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/snapshot", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope("UPID:pve1:snapshot:100")) - }) - - // Guest agent - mux.HandleFunc("/api2/json/nodes/pve1/qemu/101/agent/network-get-interfaces", func(w http.ResponseWriter, r *http.Request) { - ifaces := struct { - Result []NetworkInterface `json:"result"` - }{ - Result: []NetworkInterface{ - { - Name: "lo", - HardwareAddress: "00:00:00:00:00:00", - IPAddresses: []GuestIPAddress{{IPAddressType: "ipv4", IPAddress: "127.0.0.1", Prefix: 8}}, - }, - { - Name: "eth0", - HardwareAddress: "AA:BB:CC:DD:EE:FF", - IPAddresses: []GuestIPAddress{{IPAddressType: "ipv4", IPAddress: "10.0.0.50", Prefix: 24}}, - }, - }, - } - _, _ = w.Write(envelope(ifaces)) - }) - - // Node status - mux.HandleFunc("/api2/json/nodes/pve1/status", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(NodeStatus{ - CPU: 0.15, - MaxCPU: 16, - Memory: MemoryStatus{ - Total: 64 * 1024 * 1024 * 1024, - Used: 16 * 1024 * 1024 * 1024, - Free: 48 * 1024 * 1024 * 1024, - }, - RootFS: DiskStatus{ - Total: 500 * 1024 * 1024 * 1024, - Used: 100 * 1024 * 1024 * 1024, - Available: 400 * 1024 * 1024 * 1024, - }, - })) - }) - - // Task status - always return completed - mux.HandleFunc("/api2/json/nodes/pve1/tasks/", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(TaskStatus{Status: "stopped", ExitStatus: "OK"})) - }) - - // Delete VMs - mux.HandleFunc("/api2/json/nodes/pve1/qemu/101", func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodDelete { - _, _ = w.Write(envelope("UPID:pve1:delete:101")) - } - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100", func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodDelete { - _, _ = w.Write(envelope("UPID:pve1:delete:100")) - } - }) - - // Config for newly cloned VMs - mux.HandleFunc("/api2/json/nodes/pve1/qemu/9000/config", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(nil)) - }) - - server := httptest.NewServer(mux) - - cfg := Config{ - Host: server.URL, - TokenID: "test@pam!test", - Secret: "test-secret", - Node: "pve1", - VMIDStart: 9000, - VMIDEnd: 9999, - CloneMode: "full", - } - - mgr, err := NewProxmoxManager(cfg, nil) - if err != nil { - t.Fatalf("NewProxmoxManager: %v", err) - } - - return mgr, server -} - -// --- Interface compliance --- - -var ( - _ provider.Manager = (*ProxmoxManager)(nil) - _ provider.MultiHostLister = (*MultiNodeManager)(nil) -) - -// --- NewProxmoxManager --- - -func TestNewProxmoxManager_Valid(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(nil)) - })) - defer server.Close() - - cfg := Config{ - Host: server.URL, - TokenID: "root@pam!test", - Secret: "secret", - Node: "pve1", - } - mgr, err := NewProxmoxManager(cfg, nil) - if err != nil { - t.Fatalf("NewProxmoxManager: %v", err) - } - if mgr == nil { - t.Fatal("expected non-nil manager") - } -} - -func TestNewProxmoxManager_InvalidConfig(t *testing.T) { - _, err := NewProxmoxManager(Config{}, nil) - if err == nil { - t.Fatal("expected error for empty config") - } - if !strings.Contains(err.Error(), "invalid proxmox config") { - t.Errorf("expected 'invalid proxmox config', got: %s", err.Error()) - } -} - -func TestNewProxmoxManager_MissingHost(t *testing.T) { - _, err := NewProxmoxManager(Config{ - TokenID: "root@pam!test", - Secret: "secret", - Node: "pve1", - }, nil) - if err == nil { - t.Fatal("expected error for missing host") - } -} - -func TestNewProxmoxManager_MissingSecret(t *testing.T) { - _, err := NewProxmoxManager(Config{ - Host: "https://pve:8006", - TokenID: "root@pam!test", - Node: "pve1", - }, nil) - if err == nil { - t.Fatal("expected error for missing secret") - } -} - -func TestNewProxmoxManager_MissingNode(t *testing.T) { - _, err := NewProxmoxManager(Config{ - Host: "https://pve:8006", - TokenID: "root@pam!test", - Secret: "secret", - }, nil) - if err == nil { - t.Fatal("expected error for missing node") - } -} - -// --- CloneVM --- - -func TestManagerCloneVM_DelegatesToCloneFromVM(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - ref, err := mgr.CloneVM(context.Background(), "ubuntu-template", "clone-via-clonevm", 2, 4096, "vmbr0") - if err != nil { - t.Fatalf("CloneVM: %v", err) - } - if ref.Name != "clone-via-clonevm" { - t.Errorf("expected clone-via-clonevm, got %s", ref.Name) - } - if ref.UUID == "" { - t.Error("expected non-empty UUID (VMID)") - } - if ref.UUID != "9000" { - t.Errorf("expected UUID 9000, got %s", ref.UUID) - } -} - -// --- CloneFromVM --- - -func TestManagerCloneFromVM(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - ref, err := mgr.CloneFromVM(context.Background(), "ubuntu-template", "new-sandbox", 2, 4096, "vmbr0") - if err != nil { - t.Fatalf("CloneFromVM: %v", err) - } - if ref.Name != "new-sandbox" { - t.Errorf("expected new-sandbox, got %s", ref.Name) - } - if ref.UUID == "" { - t.Error("expected non-empty UUID (VMID)") - } - if ref.UUID != "9000" { - t.Errorf("expected UUID 9000, got %s", ref.UUID) - } -} - -func TestManagerCloneFromVM_ZeroCPUMemory(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - // With 0 cpu and 0 memory, should not call SetVMConfig - ref, err := mgr.CloneFromVM(context.Background(), "ubuntu-template", "minimal-clone", 0, 0, "") - if err != nil { - t.Fatalf("CloneFromVM zero: %v", err) - } - if ref.Name != "minimal-clone" { - t.Errorf("expected minimal-clone, got %s", ref.Name) - } -} - -func TestManagerCloneFromVM_SourceNotFound(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - _, err := mgr.CloneFromVM(context.Background(), "nonexistent-vm", "new-sandbox", 2, 4096, "vmbr0") - if err == nil { - t.Fatal("expected error for nonexistent source VM") - } - if !strings.Contains(err.Error(), "not found") { - t.Errorf("expected 'not found', got: %s", err.Error()) - } -} - -func TestManagerCloneFromVM_SetConfigFails(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/api2/json/nodes/pve1/qemu", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{{VMID: 100, Name: "ubuntu-template", Status: "stopped"}})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/clone", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope("UPID:pve1:00001234:clone")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/9000/config", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - _, _ = w.Write([]byte(`{"errors":{"cores":"invalid value"}}`)) - }) - mux.HandleFunc("/api2/json/nodes/pve1/tasks/", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(TaskStatus{Status: "stopped", ExitStatus: "OK"})) - }) - server := httptest.NewServer(mux) - defer server.Close() - - mgr, err := NewProxmoxManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - VMIDStart: 9000, VMIDEnd: 9999, - }, nil) - if err != nil { - t.Fatalf("NewProxmoxManager: %v", err) - } - - _, err = mgr.CloneFromVM(context.Background(), "ubuntu-template", "new-sandbox", 2, 4096, "vmbr0") - if err == nil { - t.Fatal("expected error when SetVMConfig fails") - } - if !strings.Contains(err.Error(), "set CPU/memory on clone") { - t.Errorf("expected 'set CPU/memory on clone' in error, got: %s", err.Error()) - } -} - -func TestManagerCloneFromVM_WithNetwork(t *testing.T) { - var capturedNet0 string - mux := http.NewServeMux() - mux.HandleFunc("/api2/json/nodes/pve1/qemu", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{{VMID: 100, Name: "template"}})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/clone", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope("UPID:pve1:clone")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/9000/config", func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodPut { - if err := r.ParseForm(); err != nil { - t.Fatal(err) - } - capturedNet0 = r.FormValue("net0") - } - _, _ = w.Write(envelope(nil)) - }) - mux.HandleFunc("/api2/json/nodes/pve1/tasks/", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(TaskStatus{Status: "stopped", ExitStatus: "OK"})) - }) - server := httptest.NewServer(mux) - defer server.Close() - - mgr, err := NewProxmoxManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - VMIDStart: 9000, VMIDEnd: 9999, - }, nil) - if err != nil { - t.Fatalf("NewProxmoxManager: %v", err) - } - - ref, err := mgr.CloneFromVM(context.Background(), "template", "net-sandbox", 2, 4096, "vmbr1") - if err != nil { - t.Fatalf("CloneFromVM with network: %v", err) - } - if ref.Name != "net-sandbox" { - t.Errorf("expected net-sandbox, got %s", ref.Name) - } - expected := "virtio,bridge=vmbr1" - if capturedNet0 != expected { - t.Errorf("expected net0=%q, got %q", expected, capturedNet0) - } -} - -// --- StartVM --- - -func TestManagerStartVM(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - err := mgr.StartVM(context.Background(), "ubuntu-template") - if err != nil { - t.Fatalf("StartVM: %v", err) - } -} - -func TestManagerStartVM_NonexistentVM(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - err := mgr.StartVM(context.Background(), "does-not-exist") - if err == nil { - t.Fatal("expected error for nonexistent VM") - } - if !strings.Contains(err.Error(), "not found") { - t.Errorf("expected 'not found', got: %s", err.Error()) - } -} - -// --- StopVM --- - -func TestManagerStopVM_Graceful(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - err := mgr.StopVM(context.Background(), "sandbox-1", false) - if err != nil { - t.Fatalf("StopVM graceful: %v", err) - } -} - -func TestManagerStopVM_Force(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - err := mgr.StopVM(context.Background(), "sandbox-1", true) - if err != nil { - t.Fatalf("StopVM force: %v", err) - } -} - -func TestManagerStopVM_NonexistentVM(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - err := mgr.StopVM(context.Background(), "ghost-vm", false) - if err == nil { - t.Fatal("expected error for nonexistent VM") - } -} - -// --- DestroyVM --- - -func TestManagerDestroyVM_RunningVM(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - // sandbox-1 is running - should stop first, then delete - err := mgr.DestroyVM(context.Background(), "sandbox-1") - if err != nil { - t.Fatalf("DestroyVM running: %v", err) - } -} - -func TestManagerDestroyVM_StoppedVM(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - // ubuntu-template is stopped - should delete directly - err := mgr.DestroyVM(context.Background(), "ubuntu-template") - if err != nil { - t.Fatalf("DestroyVM stopped: %v", err) - } -} - -func TestManagerDestroyVM_NonexistentVM(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - err := mgr.DestroyVM(context.Background(), "nonexistent") - if err == nil { - t.Fatal("expected error for nonexistent VM") - } -} - -// --- GetVMState --- - -func TestManagerGetVMState_Running(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - state, err := mgr.GetVMState(context.Background(), "sandbox-1") - if err != nil { - t.Fatalf("GetVMState: %v", err) - } - if state != provider.VMStateRunning { - t.Errorf("expected running, got %s", state) - } -} - -func TestManagerGetVMState_Stopped(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - state, err := mgr.GetVMState(context.Background(), "ubuntu-template") - if err != nil { - t.Fatalf("GetVMState: %v", err) - } - if state != provider.VMStateShutOff { - t.Errorf("expected shut off, got %s", state) - } -} - -func TestManagerGetVMState_Paused(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - state, err := mgr.GetVMState(context.Background(), "sandbox-paused") - if err != nil { - t.Fatalf("GetVMState: %v", err) - } - if state != provider.VMStatePaused { - t.Errorf("expected paused, got %s", state) - } -} - -func TestManagerGetVMState_Nonexistent(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - _, err := mgr.GetVMState(context.Background(), "ghost") - if err == nil { - t.Fatal("expected error for nonexistent VM") - } -} - -// --- GetIPAddress --- - -func TestManagerGetIPAddress(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - ip, mac, err := mgr.GetIPAddress(context.Background(), "sandbox-1", 5*time.Second) - if err != nil { - t.Fatalf("GetIPAddress: %v", err) - } - if ip != "10.0.0.50" { - t.Errorf("expected 10.0.0.50, got %s", ip) - } - if mac != "AA:BB:CC:DD:EE:FF" { - t.Errorf("expected AA:BB:CC:DD:EE:FF, got %s", mac) - } -} - -func TestManagerGetIPAddress_SkipsLoopback(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/api2/json/nodes/pve1/qemu", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{{VMID: 200, Name: "lo-only"}})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/200/agent/network-get-interfaces", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(struct { - Result []NetworkInterface `json:"result"` - }{ - Result: []NetworkInterface{ - { - Name: "lo", - HardwareAddress: "00:00:00:00:00:00", - IPAddresses: []GuestIPAddress{{IPAddressType: "ipv4", IPAddress: "127.0.0.1", Prefix: 8}}, - }, - }, - })) - }) - server := httptest.NewServer(mux) - defer server.Close() - - mgr, _ := NewProxmoxManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - VMIDStart: 9000, VMIDEnd: 9999, - }, nil) - - _, _, err := mgr.GetIPAddress(context.Background(), "lo-only", 500*time.Millisecond) - if err == nil { - t.Fatal("expected timeout for loopback-only VM") - } - if !strings.Contains(err.Error(), "timeout") { - t.Errorf("expected timeout error, got: %s", err.Error()) - } -} - -func TestManagerGetIPAddress_IPv6Only(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/api2/json/nodes/pve1/qemu", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{{VMID: 201, Name: "v6-only"}})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/201/agent/network-get-interfaces", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(struct { - Result []NetworkInterface `json:"result"` - }{ - Result: []NetworkInterface{ - { - Name: "eth0", - HardwareAddress: "AA:BB:CC:DD:EE:FF", - IPAddresses: []GuestIPAddress{{IPAddressType: "ipv6", IPAddress: "fe80::1", Prefix: 64}}, - }, - }, - })) - }) - server := httptest.NewServer(mux) - defer server.Close() - - mgr, _ := NewProxmoxManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - VMIDStart: 9000, VMIDEnd: 9999, - }, nil) - - _, _, err := mgr.GetIPAddress(context.Background(), "v6-only", 500*time.Millisecond) - if err == nil { - t.Fatal("expected timeout for IPv6-only VM") - } -} - -func TestManagerGetIPAddress_ContextCancelled(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/api2/json/nodes/pve1/qemu", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{{VMID: 202, Name: "slow-vm"}})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/202/agent/network-get-interfaces", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - _, _ = w.Write([]byte("guest agent not running")) - }) - server := httptest.NewServer(mux) - defer server.Close() - - mgr, _ := NewProxmoxManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - VMIDStart: 9000, VMIDEnd: 9999, - }, nil) - - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - - _, _, err := mgr.GetIPAddress(ctx, "slow-vm", 30*time.Second) - if err == nil { - t.Fatal("expected error for cancelled context") - } -} - -func TestManagerGetIPAddress_NonexistentVM(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - _, _, err := mgr.GetIPAddress(context.Background(), "ghost", 1*time.Second) - if err == nil { - t.Fatal("expected error for nonexistent VM") - } -} - -// --- ValidateSourceVM --- - -func TestManagerValidateSourceVM_Valid(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - result, err := mgr.ValidateSourceVM(context.Background(), "ubuntu-template") - if err != nil { - t.Fatalf("ValidateSourceVM: %v", err) - } - if !result.Valid { - t.Error("expected valid VM") - } - if !result.HasNetwork { - t.Error("expected has_network=true") - } - if result.State != provider.VMStateShutOff { - t.Errorf("expected shut off, got %s", result.State) - } - if result.VMName != "ubuntu-template" { - t.Errorf("expected ubuntu-template, got %s", result.VMName) - } -} - -func TestManagerValidateSourceVM_RunningVM(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - result, err := mgr.ValidateSourceVM(context.Background(), "sandbox-1") - if err != nil { - t.Fatalf("ValidateSourceVM: %v", err) - } - if !result.Valid { - t.Error("expected valid VM (running VMs can be cloned)") - } - if result.State != provider.VMStateRunning { - t.Errorf("expected running, got %s", result.State) - } -} - -func TestManagerValidateSourceVM_Nonexistent(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - result, err := mgr.ValidateSourceVM(context.Background(), "nonexistent") - if err != nil { - t.Fatalf("ValidateSourceVM: %v", err) - } - if result.Valid { - t.Error("expected invalid for nonexistent VM") - } - if len(result.Errors) == 0 { - t.Error("expected errors for nonexistent VM") - } -} - -func TestManagerValidateSourceVM_NoNetwork(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - result, err := mgr.ValidateSourceVM(context.Background(), "no-net-vm") - if err != nil { - t.Fatalf("ValidateSourceVM: %v", err) - } - if result.HasNetwork { - t.Error("expected has_network=false") - } - hasNetWarning := false - for _, w := range result.Warnings { - if strings.Contains(w, "no network interface") { - hasNetWarning = true - } - } - if !hasNetWarning { - t.Error("expected warning about no network interface") - } -} - -func TestManagerValidateSourceVM_NoGuestAgent(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - result, err := mgr.ValidateSourceVM(context.Background(), "no-agent-vm") - if err != nil { - t.Fatalf("ValidateSourceVM: %v", err) - } - hasAgentWarning := false - for _, w := range result.Warnings { - if strings.Contains(w, "guest agent") { - hasAgentWarning = true - } - } - if !hasAgentWarning { - t.Error("expected warning about guest agent") - } -} - -// --- CheckHostResources --- - -func TestManagerCheckHostResources_Sufficient(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - result, err := mgr.CheckHostResources(context.Background(), 2, 4096) - if err != nil { - t.Fatalf("CheckHostResources: %v", err) - } - if !result.Valid { - t.Error("expected valid resources") - } - if result.TotalCPUs != 16 { - t.Errorf("expected 16 CPUs, got %d", result.TotalCPUs) - } - if result.RequiredCPUs != 2 { - t.Errorf("expected required 2 CPUs, got %d", result.RequiredCPUs) - } - if result.RequiredMemoryMB != 4096 { - t.Errorf("expected required 4096 MB, got %d", result.RequiredMemoryMB) - } - if result.NeedsMemoryApproval { - t.Error("should not need memory approval (48GB free)") - } - if result.NeedsCPUApproval { - t.Error("should not need CPU approval (15% usage)") - } - // 48GB free = 49152 MB - if result.AvailableMemoryMB != 49152 { - t.Errorf("expected 49152 MB free, got %d", result.AvailableMemoryMB) - } -} - -func TestManagerCheckHostResources_InsufficientMemory(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/api2/json/nodes/pve1/status", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(NodeStatus{ - CPU: 0.10, - MaxCPU: 4, - Memory: MemoryStatus{ - Total: 8 * 1024 * 1024 * 1024, - Used: 7 * 1024 * 1024 * 1024, - Free: 1 * 1024 * 1024 * 1024, // 1GB free - }, - RootFS: DiskStatus{Available: 50 * 1024 * 1024 * 1024}, - })) - }) - server := httptest.NewServer(mux) - defer server.Close() - - mgr, _ := NewProxmoxManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - VMIDStart: 9000, VMIDEnd: 9999, - }, nil) - - result, err := mgr.CheckHostResources(context.Background(), 1, 4096) - if err != nil { - t.Fatalf("CheckHostResources: %v", err) - } - if !result.NeedsMemoryApproval { - t.Error("expected memory approval needed (4GB requested, 1GB free)") - } - if len(result.Warnings) == 0 { - t.Error("expected warning about insufficient memory") - } -} - -func TestManagerCheckHostResources_HighCPU(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/api2/json/nodes/pve1/status", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(NodeStatus{ - CPU: 0.92, // 92% usage - MaxCPU: 4, - Memory: MemoryStatus{ - Total: 8 * 1024 * 1024 * 1024, - Free: 4 * 1024 * 1024 * 1024, - }, - RootFS: DiskStatus{Available: 50 * 1024 * 1024 * 1024}, - })) - }) - server := httptest.NewServer(mux) - defer server.Close() - - mgr, _ := NewProxmoxManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - VMIDStart: 9000, VMIDEnd: 9999, - }, nil) - - result, err := mgr.CheckHostResources(context.Background(), 2, 2048) - if err != nil { - t.Fatalf("CheckHostResources: %v", err) - } - if !result.NeedsCPUApproval { - t.Error("expected CPU approval needed (92% usage)") - } -} - -// --- InjectSSHKey --- - -func TestManagerInjectSSHKey(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - err := mgr.InjectSSHKey(context.Background(), "sandbox-1", "ubuntu", "ssh-ed25519 AAAA... user@host") - if err != nil { - t.Fatalf("InjectSSHKey: %v", err) - } -} - -func TestManagerInjectSSHKey_EmptyUsername(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - // Should not set ciuser when username is empty - err := mgr.InjectSSHKey(context.Background(), "sandbox-1", "", "ssh-rsa AAAA...") - if err != nil { - t.Fatalf("InjectSSHKey empty user: %v", err) - } -} - -func TestManagerInjectSSHKey_NonexistentVM(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - err := mgr.InjectSSHKey(context.Background(), "ghost-vm", "user", "ssh-rsa AAAA...") - if err == nil { - t.Fatal("expected error for nonexistent VM") - } -} - -// --- CreateSnapshot --- - -func TestManagerCreateSnapshot(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - ref, err := mgr.CreateSnapshot(context.Background(), "sandbox-1", "snap1", false) - if err != nil { - t.Fatalf("CreateSnapshot: %v", err) - } - if ref.Name != "snap1" { - t.Errorf("expected snap1, got %s", ref.Name) - } - if ref.Kind != "INTERNAL" { - t.Errorf("expected INTERNAL, got %s", ref.Kind) - } - if !strings.HasPrefix(ref.Ref, "proxmox:") { - t.Errorf("expected proxmox: prefix in ref, got %s", ref.Ref) - } - if !strings.Contains(ref.Ref, "101") { - t.Errorf("expected VMID 101 in ref, got %s", ref.Ref) - } -} - -func TestManagerCreateSnapshot_ExternalIgnored(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - // external=true should work the same as false for Proxmox - ref, err := mgr.CreateSnapshot(context.Background(), "sandbox-1", "snap-ext", true) - if err != nil { - t.Fatalf("CreateSnapshot external: %v", err) - } - if ref.Name != "snap-ext" { - t.Errorf("expected snap-ext, got %s", ref.Name) - } - if ref.Kind != "INTERNAL" { - t.Errorf("expected INTERNAL even with external=true, got %s", ref.Kind) - } -} - -func TestManagerCreateSnapshot_NonexistentVM(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - _, err := mgr.CreateSnapshot(context.Background(), "nonexistent", "snap1", false) - if err == nil { - t.Fatal("expected error for nonexistent VM") - } -} - -// --- DiffSnapshot --- - -func TestManagerDiffSnapshot(t *testing.T) { - mgr, server := mockProxmoxAPI(t) - defer server.Close() - - plan, err := mgr.DiffSnapshot(context.Background(), "sandbox-1", "snap1", "snap2") - if err != nil { - t.Fatalf("DiffSnapshot: %v", err) - } - if plan.VMName != "sandbox-1" { - t.Errorf("expected sandbox-1, got %s", plan.VMName) - } - if plan.FromSnapshot != "snap1" { - t.Errorf("expected snap1, got %s", plan.FromSnapshot) - } - if plan.ToSnapshot != "snap2" { - t.Errorf("expected snap2, got %s", plan.ToSnapshot) - } - if len(plan.Notes) == 0 { - t.Error("expected notes about Proxmox limitations") - } - if plan.FromMount != "" || plan.ToMount != "" { - t.Error("expected empty mounts for Proxmox") - } -} - -// --- Resolver --- - -func TestVMResolverCaching(t *testing.T) { - var callCount int32 - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - atomic.AddInt32(&callCount, 1) - vms := []VMListEntry{{VMID: 100, Name: "cached-vm"}} - _, _ = w.Write(envelope(vms)) - }) - defer server.Close() - - resolver := NewVMResolver(client) - - // First call: cache miss, triggers refresh - vmid, err := resolver.ResolveVMID(context.Background(), "cached-vm") - if err != nil { - t.Fatalf("ResolveVMID: %v", err) - } - if vmid != 100 { - t.Errorf("expected 100, got %d", vmid) - } - firstCount := atomic.LoadInt32(&callCount) - - // Second call: cache hit - _, err = resolver.ResolveVMID(context.Background(), "cached-vm") - if err != nil { - t.Fatalf("ResolveVMID: %v", err) - } - if atomic.LoadInt32(&callCount) != firstCount { - t.Error("expected cached result, but API was called again") - } -} - -func TestVMResolverRefresh(t *testing.T) { - var callCount int32 - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - n := atomic.AddInt32(&callCount, 1) - if n <= 1 { - _, _ = w.Write(envelope([]VMListEntry{{VMID: 100, Name: "vm1"}})) - } else { - // After refresh, new VM appears - _, _ = w.Write(envelope([]VMListEntry{ - {VMID: 100, Name: "vm1"}, - {VMID: 101, Name: "vm2"}, - })) - } - }) - defer server.Close() - - resolver := NewVMResolver(client) - - // Pre-populate cache with first API call (only "vm1") - if err := resolver.Refresh(context.Background()); err != nil { - t.Fatalf("initial refresh: %v", err) - } - if atomic.LoadInt32(&callCount) != 1 { - t.Fatal("expected exactly 1 API call after initial refresh") - } - - // Now resolve vm2 - cache miss triggers second API call which includes vm2 - vmid, err := resolver.ResolveVMID(context.Background(), "vm2") - if err != nil { - t.Fatalf("ResolveVMID after refresh: %v", err) - } - if vmid != 101 { - t.Errorf("expected VMID 101, got %d", vmid) - } - if atomic.LoadInt32(&callCount) != 2 { - t.Error("expected 2 API calls total") - } -} - -func TestVMResolverResolveName(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - vms := []VMListEntry{ - {VMID: 100, Name: "test-vm"}, - {VMID: 200, Name: "other-vm"}, - } - _, _ = w.Write(envelope(vms)) - }) - defer server.Close() - - resolver := NewVMResolver(client) - - name, err := resolver.ResolveName(context.Background(), 200) - if err != nil { - t.Fatalf("ResolveName: %v", err) - } - if name != "other-vm" { - t.Errorf("expected other-vm, got %s", name) - } -} - -func TestVMResolverResolveName_NotFound(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{{VMID: 100, Name: "vm1"}})) - }) - defer server.Close() - - resolver := NewVMResolver(client) - - _, err := resolver.ResolveName(context.Background(), 999) - if err == nil { - t.Fatal("expected error for unknown VMID") - } - if !strings.Contains(err.Error(), "999") { - t.Errorf("expected VMID in error, got: %s", err.Error()) - } -} - -func TestVMResolverListAll(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - vms := []VMListEntry{ - {VMID: 100, Name: "vm1"}, - {VMID: 101, Name: "vm2"}, - {VMID: 102, Name: "vm3"}, - } - _, _ = w.Write(envelope(vms)) - }) - defer server.Close() - - resolver := NewVMResolver(client) - - vms, err := resolver.ListAll(context.Background()) - if err != nil { - t.Fatalf("ListAll: %v", err) - } - if len(vms) != 3 { - t.Errorf("expected 3 VMs, got %d", len(vms)) - } -} - -func TestVMResolverListAll_Cached(t *testing.T) { - var callCount int32 - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - atomic.AddInt32(&callCount, 1) - _, _ = w.Write(envelope([]VMListEntry{{VMID: 100, Name: "vm1"}})) - }) - defer server.Close() - - resolver := NewVMResolver(client) - - // Pre-populate cache - _ = resolver.Refresh(context.Background()) - beforeCount := atomic.LoadInt32(&callCount) - - // ListAll should still call the API (to get fresh data) but not refresh - _, _ = resolver.ListAll(context.Background()) - // With populated cache, ListAll calls ListVMs once (not Refresh + ListVMs) - afterCount := atomic.LoadInt32(&callCount) - if afterCount-beforeCount != 1 { - t.Errorf("expected 1 additional API call, got %d", afterCount-beforeCount) - } -} - -func TestVMResolverRefresh_APIError(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - _, _ = w.Write([]byte("error")) - }) - defer server.Close() - - resolver := NewVMResolver(client) - err := resolver.Refresh(context.Background()) - if err == nil { - t.Fatal("expected error for API failure") - } -} - -// --- Config Validation --- - -func TestConfigValidation(t *testing.T) { - tests := []struct { - name string - cfg Config - wantErr bool - errMsg string - }{ - { - name: "valid full config", - cfg: Config{ - Host: "https://pve.example.com:8006", TokenID: "root@pam!fluid", - Secret: "secret", Node: "pve1", VMIDStart: 9000, VMIDEnd: 9999, - }, - }, - { - name: "valid linked clone", - cfg: Config{ - Host: "https://pve:8006", TokenID: "root@pam!fluid", - Secret: "secret", Node: "pve1", CloneMode: "linked", - }, - }, - { - name: "missing host", - cfg: Config{TokenID: "root@pam!fluid", Secret: "secret", Node: "pve1"}, - wantErr: true, errMsg: "host is required", - }, - { - name: "missing token_id", - cfg: Config{Host: "https://pve:8006", Secret: "secret", Node: "pve1"}, - wantErr: true, errMsg: "token_id is required", - }, - { - name: "missing secret", - cfg: Config{Host: "https://pve:8006", TokenID: "root@pam!test", Node: "pve1"}, - wantErr: true, errMsg: "secret is required", - }, - { - name: "missing node", - cfg: Config{Host: "https://pve:8006", TokenID: "root@pam!test", Secret: "s"}, - wantErr: true, errMsg: "node is required", - }, - { - name: "bad vmid range", - cfg: Config{ - Host: "https://pve:8006", TokenID: "t", Secret: "s", Node: "n", - VMIDStart: 9999, VMIDEnd: 9000, - }, - wantErr: true, errMsg: "vmid_end", - }, - { - name: "equal vmid range", - cfg: Config{ - Host: "https://pve:8006", TokenID: "t", Secret: "s", Node: "n", - VMIDStart: 9000, VMIDEnd: 9000, - }, - wantErr: true, errMsg: "vmid_end", - }, - { - name: "bad clone mode", - cfg: Config{ - Host: "https://pve:8006", TokenID: "t", Secret: "s", Node: "n", - CloneMode: "snapshot", - }, - wantErr: true, errMsg: "clone_mode", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.cfg.Validate() - if (err != nil) != tt.wantErr { - t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr) - } - if tt.wantErr && tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) { - t.Errorf("expected error containing %q, got: %s", tt.errMsg, err.Error()) - } - }) - } -} - -func TestConfigValidation_DefaultVMIDRange(t *testing.T) { - cfg := Config{ - Host: "https://pve:8006", TokenID: "t", Secret: "s", Node: "n", - } - err := cfg.Validate() - if err != nil { - t.Fatalf("Validate: %v", err) - } - if cfg.VMIDStart != 9000 { - t.Errorf("expected default VMIDStart 9000, got %d", cfg.VMIDStart) - } - if cfg.VMIDEnd != 9999 { - t.Errorf("expected default VMIDEnd 9999, got %d", cfg.VMIDEnd) - } -} - -func TestConfigValidation_DefaultCloneMode(t *testing.T) { - cfg := Config{ - Host: "https://pve:8006", TokenID: "t", Secret: "s", Node: "n", - } - _ = cfg.Validate() - if cfg.CloneMode != "full" { - t.Errorf("expected default CloneMode full, got %s", cfg.CloneMode) - } -} - -// --- MultiNodeManager --- - -func TestMultiNodeManager_ListVMs(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - vms := []VMListEntry{ - {VMID: 100, Name: "vm1", Status: "running"}, - {VMID: 101, Name: "vm2", Status: "stopped"}, - {VMID: 102, Name: "vm3", Status: "paused"}, - } - _, _ = w.Write(envelope(vms)) - })) - defer server.Close() - - mnm := NewMultiNodeManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - }, nil) - - result, err := mnm.ListVMs(context.Background()) - if err != nil { - t.Fatalf("ListVMs: %v", err) - } - if len(result.VMs) != 3 { - t.Fatalf("expected 3 VMs, got %d", len(result.VMs)) - } - if len(result.HostErrors) != 0 { - t.Errorf("expected no host errors, got %d", len(result.HostErrors)) - } - - // Check fields - vm := result.VMs[0] - if vm.Name != "vm1" { - t.Errorf("expected vm1, got %s", vm.Name) - } - if vm.UUID != "100" { - t.Errorf("expected UUID 100, got %s", vm.UUID) - } - if vm.State != "running" { - t.Errorf("expected running, got %s", vm.State) - } - if vm.HostName != "pve1" { - t.Errorf("expected pve1, got %s", vm.HostName) - } - if vm.HostAddress != server.URL { - t.Errorf("expected %s, got %s", server.URL, vm.HostAddress) - } - if !vm.Persistent { - t.Error("expected persistent=true") - } -} - -func TestMultiNodeManager_ListVMs_APIError(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusForbidden) - _, _ = w.Write([]byte("forbidden")) - })) - defer server.Close() - - mnm := NewMultiNodeManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - }, nil) - - result, err := mnm.ListVMs(context.Background()) - if err != nil { - t.Fatalf("expected nil error (errors in result), got: %v", err) - } - if len(result.VMs) != 0 { - t.Errorf("expected 0 VMs on error, got %d", len(result.VMs)) - } - if len(result.HostErrors) != 1 { - t.Fatalf("expected 1 host error, got %d", len(result.HostErrors)) - } - if result.HostErrors[0].HostName != "pve1" { - t.Errorf("expected pve1 in error, got %s", result.HostErrors[0].HostName) - } -} - -func TestMultiNodeManager_ListVMs_Empty(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{})) - })) - defer server.Close() - - mnm := NewMultiNodeManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - }, nil) - - result, err := mnm.ListVMs(context.Background()) - if err != nil { - t.Fatalf("ListVMs: %v", err) - } - if len(result.VMs) != 0 { - t.Errorf("expected 0 VMs, got %d", len(result.VMs)) - } -} - -func TestMultiNodeManager_FindHostForVM(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - vms := []VMListEntry{ - {VMID: 100, Name: "vm-alpha"}, - {VMID: 101, Name: "vm-beta"}, - } - _, _ = w.Write(envelope(vms)) - })) - defer server.Close() - - mnm := NewMultiNodeManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - }, nil) - - // Found - host, addr, err := mnm.FindHostForVM(context.Background(), "vm-beta") - if err != nil { - t.Fatalf("FindHostForVM: %v", err) - } - if host != "pve1" { - t.Errorf("expected pve1, got %s", host) - } - if addr != server.URL { - t.Errorf("expected %s, got %s", server.URL, addr) - } - - // Not found - _, _, err = mnm.FindHostForVM(context.Background(), "nonexistent") - if err == nil { - t.Error("expected error for nonexistent VM") - } - if !strings.Contains(err.Error(), "not found") { - t.Errorf("expected 'not found', got: %s", err.Error()) - } -} - -func TestMultiNodeManager_FindHostForVM_APIError(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - _, _ = w.Write([]byte("error")) - })) - defer server.Close() - - mnm := NewMultiNodeManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - }, nil) - - _, _, err := mnm.FindHostForVM(context.Background(), "vm1") - if err == nil { - t.Fatal("expected error for API failure") - } -} - -// --- Linked Clone Manager --- - -func TestManagerCloneFromVM_LinkedClone(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/api2/json/nodes/pve1/qemu", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{{VMID: 100, Name: "template"}})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/clone", func(w http.ResponseWriter, r *http.Request) { - // Verify linked clone does NOT send full=1 - if err := r.ParseForm(); err != nil { - t.Fatal(err) - } - if r.FormValue("full") != "" { - t.Error("linked clone should not have full param") - } - _, _ = w.Write(envelope("UPID:pve1:linked-clone")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/9000/config", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(nil)) - }) - mux.HandleFunc("/api2/json/nodes/pve1/tasks/", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(TaskStatus{Status: "stopped", ExitStatus: "OK"})) - }) - server := httptest.NewServer(mux) - defer server.Close() - - mgr, err := NewProxmoxManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - VMIDStart: 9000, VMIDEnd: 9999, - CloneMode: "linked", - }, nil) - if err != nil { - t.Fatalf("NewProxmoxManager: %v", err) - } - - ref, err := mgr.CloneFromVM(context.Background(), "template", "linked-sandbox", 0, 0, "") - if err != nil { - t.Fatalf("CloneFromVM linked: %v", err) - } - if ref.Name != "linked-sandbox" { - t.Errorf("expected linked-sandbox, got %s", ref.Name) - } -} - -// --- Full lifecycle test --- - -func TestManagerFullLifecycle(t *testing.T) { - // Simulate: clone -> start -> get IP -> snapshot -> stop -> destroy - vmState := "stopped" - - mux := http.NewServeMux() - mux.HandleFunc("/api2/json/nodes/pve1/qemu", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{ - {VMID: 100, Name: "template", Status: "stopped"}, - {VMID: 9000, Name: "lifecycle-vm", Status: vmState}, - })) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/clone", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope("UPID:clone")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/9000/status/current", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(VMStatus{VMID: 9000, Name: "lifecycle-vm", Status: vmState})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/9000/status/start", func(w http.ResponseWriter, r *http.Request) { - vmState = "running" - _, _ = w.Write(envelope("UPID:start")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/9000/status/stop", func(w http.ResponseWriter, r *http.Request) { - vmState = "stopped" - _, _ = w.Write(envelope("UPID:stop")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/9000/status/shutdown", func(w http.ResponseWriter, r *http.Request) { - vmState = "stopped" - _, _ = w.Write(envelope("UPID:shutdown")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/9000/config", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(nil)) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/9000/snapshot", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope("UPID:snapshot")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/9000/agent/network-get-interfaces", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(struct { - Result []NetworkInterface `json:"result"` - }{ - Result: []NetworkInterface{ - { - Name: "eth0", HardwareAddress: "DE:AD:BE:EF:00:01", - IPAddresses: []GuestIPAddress{{IPAddressType: "ipv4", IPAddress: "10.1.1.100", Prefix: 24}}, - }, - }, - })) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/9000", func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodDelete { - _, _ = w.Write(envelope("UPID:delete")) - } - }) - mux.HandleFunc("/api2/json/nodes/pve1/tasks/", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(TaskStatus{Status: "stopped", ExitStatus: "OK"})) - }) - - server := httptest.NewServer(mux) - defer server.Close() - - mgr, err := NewProxmoxManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - VMIDStart: 9000, VMIDEnd: 9999, - }, nil) - if err != nil { - t.Fatalf("setup: %v", err) - } - - // 1. Start - err = mgr.StartVM(context.Background(), "lifecycle-vm") - if err != nil { - t.Fatalf("start: %v", err) - } - - // 2. Get state - state, err := mgr.GetVMState(context.Background(), "lifecycle-vm") - if err != nil { - t.Fatalf("get state: %v", err) - } - if state != provider.VMStateRunning { - t.Errorf("expected running after start, got %s", state) - } - - // 3. Get IP - ip, mac, err := mgr.GetIPAddress(context.Background(), "lifecycle-vm", 5*time.Second) - if err != nil { - t.Fatalf("get IP: %v", err) - } - if ip != "10.1.1.100" { - t.Errorf("expected 10.1.1.100, got %s", ip) - } - if mac != "DE:AD:BE:EF:00:01" { - t.Errorf("expected MAC, got %s", mac) - } - - // 4. Snapshot - snap, err := mgr.CreateSnapshot(context.Background(), "lifecycle-vm", "checkpoint", false) - if err != nil { - t.Fatalf("snapshot: %v", err) - } - if snap.Name != "checkpoint" { - t.Errorf("expected checkpoint, got %s", snap.Name) - } - - // 5. Stop - err = mgr.StopVM(context.Background(), "lifecycle-vm", false) - if err != nil { - t.Fatalf("stop: %v", err) - } - - // 6. Destroy - err = mgr.DestroyVM(context.Background(), "lifecycle-vm") - if err != nil { - t.Fatalf("destroy: %v", err) - } -} - -// --- Edge: VM names with special characters --- - -func TestManagerVMNameWithSpaces(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/api2/json/nodes/pve1/qemu", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{{VMID: 100, Name: "my special vm"}})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/status/current", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(VMStatus{VMID: 100, Status: "stopped"})) - }) - server := httptest.NewServer(mux) - defer server.Close() - - mgr, _ := NewProxmoxManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - VMIDStart: 9000, VMIDEnd: 9999, - }, nil) - - state, err := mgr.GetVMState(context.Background(), "my special vm") - if err != nil { - t.Fatalf("GetVMState with spaces: %v", err) - } - if state != provider.VMStateShutOff { - t.Errorf("expected shut off, got %s", state) - } -} - -// --- Concurrent resolver access --- - -func TestVMResolverConcurrent(t *testing.T) { - client, server := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - vms := []VMListEntry{ - {VMID: 100, Name: "vm1"}, - {VMID: 101, Name: "vm2"}, - {VMID: 102, Name: "vm3"}, - } - _, _ = w.Write(envelope(vms)) - }) - defer server.Close() - - resolver := NewVMResolver(client) - - // Warm cache - _ = resolver.Refresh(context.Background()) - - // Launch concurrent lookups - done := make(chan error, 30) - for range 10 { - go func() { - _, err := resolver.ResolveVMID(context.Background(), "vm1") - done <- err - }() - go func() { - _, err := resolver.ResolveName(context.Background(), 101) - done <- err - }() - go func() { - err := resolver.Refresh(context.Background()) - done <- err - }() - } - - for range 30 { - if err := <-done; err != nil { - t.Errorf("concurrent operation failed: %v", err) - } - } -} - -// --- Verify StopVM routes --- - -func TestManagerStopVM_VerifyGracefulRoute(t *testing.T) { - var shutdownCalled, stopCalled bool - mux := http.NewServeMux() - mux.HandleFunc("/api2/json/nodes/pve1/qemu", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{{VMID: 100, Name: "vm1"}})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/status/shutdown", func(w http.ResponseWriter, r *http.Request) { - shutdownCalled = true - _, _ = w.Write(envelope("UPID:shutdown")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/status/stop", func(w http.ResponseWriter, r *http.Request) { - stopCalled = true - _, _ = w.Write(envelope("UPID:stop")) - }) - mux.HandleFunc("/api2/json/nodes/pve1/tasks/", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(TaskStatus{Status: "stopped", ExitStatus: "OK"})) - }) - server := httptest.NewServer(mux) - defer server.Close() - - mgr, _ := NewProxmoxManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - VMIDStart: 9000, VMIDEnd: 9999, - }, nil) - - // Graceful: should call shutdown, not stop - _ = mgr.StopVM(context.Background(), "vm1", false) - if !shutdownCalled { - t.Error("graceful stop should call /shutdown") - } - if stopCalled { - t.Error("graceful stop should not call /stop") - } - - shutdownCalled = false - stopCalled = false - - // Force: should call stop, not shutdown - _ = mgr.StopVM(context.Background(), "vm1", true) - if !stopCalled { - t.Error("force stop should call /stop") - } - if shutdownCalled { - t.Error("force stop should not call /shutdown") - } -} - -// --- Concurrent VMID allocation --- - -func TestManagerCloneFromVM_ConcurrentSafe(t *testing.T) { - // Track which VMIDs have been claimed via clone requests. - // The mock dynamically adds cloned VMIDs to the "used" set so that - // NextVMID (which calls ListVMs) sees them on subsequent calls. - var mu sync.Mutex - clonedVMIDs := map[int]bool{} - - mux := http.NewServeMux() - - // List VMs - returns template + any previously cloned VMs - mux.HandleFunc("/api2/json/nodes/pve1/qemu", func(w http.ResponseWriter, r *http.Request) { - mu.Lock() - vms := []VMListEntry{{VMID: 100, Name: "template", Status: "stopped"}} - for id := range clonedVMIDs { - vms = append(vms, VMListEntry{VMID: id, Name: fmt.Sprintf("clone-%d", id), Status: "stopped"}) - } - mu.Unlock() - _, _ = w.Write(envelope(vms)) - }) - - // Clone endpoint - record the newid - mux.HandleFunc("/api2/json/nodes/pve1/qemu/100/clone", func(w http.ResponseWriter, r *http.Request) { - if err := r.ParseForm(); err != nil { - t.Errorf("parse form: %v", err) - } - newid := r.FormValue("newid") - var id int - _, _ = fmt.Sscanf(newid, "%d", &id) - mu.Lock() - clonedVMIDs[id] = true - mu.Unlock() - _, _ = w.Write(envelope(fmt.Sprintf("UPID:pve1:clone:%d", id))) - }) - - // Task status - always completed - mux.HandleFunc("/api2/json/nodes/pve1/tasks/", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(TaskStatus{Status: "stopped", ExitStatus: "OK"})) - }) - - // Config for any cloned VM - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - if strings.Contains(r.URL.Path, "/config") { - _, _ = w.Write(envelope(nil)) - return - } - w.WriteHeader(http.StatusNotFound) - }) - - server := httptest.NewServer(mux) - defer server.Close() - - mgr, err := NewProxmoxManager(Config{ - Host: server.URL, - TokenID: "t", - Secret: "s", - Node: "pve1", - VMIDStart: 9000, - VMIDEnd: 9999, - }, nil) - if err != nil { - t.Fatalf("NewProxmoxManager: %v", err) - } - - const N = 3 - type result struct { - ref provider.VMRef - err error - } - results := make(chan result, N) - - for range N { - go func() { - ref, err := mgr.CloneFromVM(context.Background(), "template", "concurrent-clone", 0, 0, "") - results <- result{ref, err} - }() - } - - seen := map[string]bool{} - for range N { - r := <-results - if r.err != nil { - t.Fatalf("CloneFromVM failed: %v", r.err) - } - if seen[r.ref.UUID] { - t.Errorf("duplicate VMID allocated: %s", r.ref.UUID) - } - seen[r.ref.UUID] = true - } - - if len(seen) != N { - t.Errorf("expected %d unique VMIDs, got %d", N, len(seen)) - } -} - -func TestConfigValidation_DefaultTimeout(t *testing.T) { - cfg := Config{ - Host: "https://pve:8006", TokenID: "t", Secret: "s", Node: "n", - } - if cfg.Timeout != 0 { - t.Fatalf("expected zero timeout before Validate, got %v", cfg.Timeout) - } - err := cfg.Validate() - if err != nil { - t.Fatalf("Validate: %v", err) - } - if cfg.Timeout != 5*time.Minute { - t.Errorf("expected default Timeout 5m, got %v", cfg.Timeout) - } -} - -func TestConfigValidation_CustomTimeout(t *testing.T) { - cfg := Config{ - Host: "https://pve:8006", TokenID: "t", Secret: "s", Node: "n", - Timeout: 10 * time.Second, - } - err := cfg.Validate() - if err != nil { - t.Fatalf("Validate: %v", err) - } - // Custom timeout should be preserved, not overwritten by default - if cfg.Timeout != 10*time.Second { - t.Errorf("expected custom Timeout 10s, got %v", cfg.Timeout) - } -} - -func TestManagerGetIPAddress_SkipsLinkLocal(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/api2/json/nodes/pve1/qemu", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{{VMID: 203, Name: "link-local-only"}})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/203/agent/network-get-interfaces", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(struct { - Result []NetworkInterface `json:"result"` - }{ - Result: []NetworkInterface{ - { - Name: "eth0", - HardwareAddress: "AA:BB:CC:DD:EE:FF", - IPAddresses: []GuestIPAddress{{IPAddressType: "ipv4", IPAddress: "169.254.1.100", Prefix: 16}}, - }, - }, - })) - }) - server := httptest.NewServer(mux) - defer server.Close() - - mgr, _ := NewProxmoxManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - VMIDStart: 9000, VMIDEnd: 9999, - }, nil) - - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - - _, _, err := mgr.GetIPAddress(ctx, "link-local-only", 30*time.Second) - if err == nil { - t.Fatal("expected timeout for link-local-only VM") - } -} - -func TestManagerGetIPAddress_SkipsInvalidIP(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/api2/json/nodes/pve1/qemu", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope([]VMListEntry{{VMID: 204, Name: "bad-ip-vm"}})) - }) - mux.HandleFunc("/api2/json/nodes/pve1/qemu/204/agent/network-get-interfaces", func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write(envelope(struct { - Result []NetworkInterface `json:"result"` - }{ - Result: []NetworkInterface{ - { - Name: "eth0", - HardwareAddress: "AA:BB:CC:DD:EE:FF", - IPAddresses: []GuestIPAddress{{IPAddressType: "ipv4", IPAddress: "not-an-ip", Prefix: 24}}, - }, - }, - })) - }) - server := httptest.NewServer(mux) - defer server.Close() - - mgr, _ := NewProxmoxManager(Config{ - Host: server.URL, TokenID: "t", Secret: "s", Node: "pve1", - VMIDStart: 9000, VMIDEnd: 9999, - }, nil) - - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - - _, _, err := mgr.GetIPAddress(ctx, "bad-ip-vm", 30*time.Second) - if err == nil { - t.Fatal("expected timeout for VM with invalid IP") - } -} - -// suppress unused import -var _ = fmt.Sprint diff --git a/fluid/internal/proxmox/multihost.go b/fluid/internal/proxmox/multihost.go deleted file mode 100644 index 44e294fb..00000000 --- a/fluid/internal/proxmox/multihost.go +++ /dev/null @@ -1,76 +0,0 @@ -package proxmox - -import ( - "context" - "fmt" - "log/slog" - - "github.com/aspectrr/fluid.sh/fluid/internal/provider" -) - -// MultiNodeManager implements provider.MultiHostLister for Proxmox. -// Currently operates on a single node; can be extended for multi-node clusters. -type MultiNodeManager struct { - client *Client - cfg Config - logger *slog.Logger -} - -// NewMultiNodeManager creates a new multi-node Proxmox manager. -func NewMultiNodeManager(cfg Config, logger *slog.Logger) *MultiNodeManager { - if logger == nil { - logger = slog.Default() - } - return &MultiNodeManager{ - client: NewClient(cfg, logger), - cfg: cfg, - logger: logger, - } -} - -// ListVMs returns all VMs on the configured Proxmox node. -func (m *MultiNodeManager) ListVMs(ctx context.Context) (*provider.MultiHostListResult, error) { - vms, err := m.client.ListVMs(ctx) - if err != nil { - return &provider.MultiHostListResult{ - HostErrors: []provider.HostError{ - { - HostName: m.cfg.Node, - HostAddress: m.cfg.Host, - Error: err.Error(), - }, - }, - }, nil - } - - result := &provider.MultiHostListResult{} - for _, vm := range vms { - state := vm.Status - result.VMs = append(result.VMs, &provider.MultiHostVMInfo{ - Name: vm.Name, - UUID: fmt.Sprintf("%d", vm.VMID), - State: state, - Persistent: true, - HostName: m.cfg.Node, - HostAddress: m.cfg.Host, - }) - } - - return result, nil -} - -// FindHostForVM searches for a VM by name and returns its host info. -func (m *MultiNodeManager) FindHostForVM(ctx context.Context, vmName string) (string, string, error) { - vms, err := m.client.ListVMs(ctx) - if err != nil { - return "", "", fmt.Errorf("list VMs: %w", err) - } - - for _, vm := range vms { - if vm.Name == vmName { - return m.cfg.Node, m.cfg.Host, nil - } - } - - return "", "", fmt.Errorf("VM %q not found on node %s", vmName, m.cfg.Node) -} diff --git a/fluid/internal/proxmox/naming.go b/fluid/internal/proxmox/naming.go deleted file mode 100644 index 0a7ce7b6..00000000 --- a/fluid/internal/proxmox/naming.go +++ /dev/null @@ -1,105 +0,0 @@ -package proxmox - -import ( - "context" - "fmt" - "sync" -) - -// VMResolver resolves VM names to VMIDs and vice versa. -// It caches the VM list and can be refreshed on demand. -type VMResolver struct { - client *Client - mu sync.RWMutex - byName map[string]int - byID map[int]string -} - -// NewVMResolver creates a new VMResolver backed by the given client. -func NewVMResolver(client *Client) *VMResolver { - return &VMResolver{ - client: client, - byName: make(map[string]int), - byID: make(map[int]string), - } -} - -// Refresh reloads the VM list from Proxmox and rebuilds the cache. -func (r *VMResolver) Refresh(ctx context.Context) error { - vms, err := r.client.ListVMs(ctx) - if err != nil { - return fmt.Errorf("refresh VM list: %w", err) - } - - r.mu.Lock() - defer r.mu.Unlock() - - r.byName = make(map[string]int, len(vms)) - r.byID = make(map[int]string, len(vms)) - for _, vm := range vms { - r.byName[vm.Name] = vm.VMID - r.byID[vm.VMID] = vm.Name - } - return nil -} - -// ResolveVMID returns the VMID for a given VM name. -// If the name is not in the cache, it refreshes first. -func (r *VMResolver) ResolveVMID(ctx context.Context, name string) (int, error) { - r.mu.RLock() - vmid, ok := r.byName[name] - r.mu.RUnlock() - if ok { - return vmid, nil - } - - // Cache miss - refresh and retry - if err := r.Refresh(ctx); err != nil { - return 0, err - } - - r.mu.RLock() - defer r.mu.RUnlock() - vmid, ok = r.byName[name] - if !ok { - return 0, fmt.Errorf("VM %q not found", name) - } - return vmid, nil -} - -// ResolveName returns the name for a given VMID. -func (r *VMResolver) ResolveName(ctx context.Context, vmid int) (string, error) { - r.mu.RLock() - name, ok := r.byID[vmid] - r.mu.RUnlock() - if ok { - return name, nil - } - - if err := r.Refresh(ctx); err != nil { - return "", err - } - - r.mu.RLock() - defer r.mu.RUnlock() - name, ok = r.byID[vmid] - if !ok { - return "", fmt.Errorf("VMID %d not found", vmid) - } - return name, nil -} - -// ListAll returns all cached VM entries. Refreshes if cache is empty. -func (r *VMResolver) ListAll(ctx context.Context) ([]VMListEntry, error) { - r.mu.RLock() - empty := len(r.byName) == 0 - r.mu.RUnlock() - - if empty { - if err := r.Refresh(ctx); err != nil { - return nil, err - } - } - - return r.client.ListVMs(ctx) -} diff --git a/fluid/internal/proxmox/types.go b/fluid/internal/proxmox/types.go deleted file mode 100644 index b82251ef..00000000 --- a/fluid/internal/proxmox/types.go +++ /dev/null @@ -1,107 +0,0 @@ -package proxmox - -// VMStatus represents the status of a QEMU VM from the Proxmox API. -type VMStatus struct { - VMID int `json:"vmid"` - Name string `json:"name"` - Status string `json:"status"` // "running", "stopped", "paused" - QMPStatus string `json:"qmpstatus,omitempty"` - CPU float64 `json:"cpu"` - Mem int64 `json:"mem"` - MaxMem int64 `json:"maxmem"` - MaxDisk int64 `json:"maxdisk"` - Uptime int64 `json:"uptime"` - PID int `json:"pid,omitempty"` - Template int `json:"template,omitempty"` // 1 if template - Lock string `json:"lock,omitempty"` // "clone", "migrate", etc. -} - -// VMConfig represents a VM's configuration from the Proxmox API. -type VMConfig struct { - Name string `json:"name"` - Memory int `json:"memory"` - Cores int `json:"cores"` - Sockets int `json:"sockets"` - CPU string `json:"cpu"` - Net0 string `json:"net0,omitempty"` - IDE2 string `json:"ide2,omitempty"` // cloud-init drive - SCSI0 string `json:"scsi0,omitempty"` - VirtIO0 string `json:"virtio0,omitempty"` - Boot string `json:"boot,omitempty"` - Agent string `json:"agent,omitempty"` // "1" if QEMU guest agent enabled - IPConfig0 string `json:"ipconfig0,omitempty"` - SSHKeys string `json:"sshkeys,omitempty"` - CIUser string `json:"ciuser,omitempty"` -} - -// NodeStatus represents a Proxmox node's resource status. -type NodeStatus struct { - CPU float64 `json:"cpu"` - MaxCPU int `json:"maxcpu"` - Memory MemoryStatus `json:"memory"` - RootFS DiskStatus `json:"rootfs"` - Uptime int64 `json:"uptime"` - KVersion string `json:"kversion"` -} - -// MemoryStatus is memory info from node status. -type MemoryStatus struct { - Total int64 `json:"total"` - Used int64 `json:"used"` - Free int64 `json:"free"` -} - -// DiskStatus is disk info from node status. -type DiskStatus struct { - Total int64 `json:"total"` - Used int64 `json:"used"` - Available int64 `json:"avail"` -} - -// NetworkInterface represents a network interface from the QEMU guest agent. -type NetworkInterface struct { - Name string `json:"name"` - HardwareAddress string `json:"hardware-address"` - IPAddresses []GuestIPAddress `json:"ip-addresses"` -} - -// GuestIPAddress is an IP address from the QEMU guest agent. -type GuestIPAddress struct { - IPAddressType string `json:"ip-address-type"` // "ipv4" or "ipv6" - IPAddress string `json:"ip-address"` - Prefix int `json:"prefix"` -} - -// TaskStatus represents the status of an asynchronous Proxmox task. -type TaskStatus struct { - Status string `json:"status"` // "running", "stopped" - ExitStatus string `json:"exitstatus,omitempty"` // "OK" on success - Type string `json:"type"` - ID string `json:"id"` - Node string `json:"node"` - PID int `json:"pid"` - StartTime int64 `json:"starttime"` - EndTime int64 `json:"endtime,omitempty"` -} - -// VMListEntry represents a VM in the list returned by GET /nodes/{node}/qemu. -type VMListEntry struct { - VMID int `json:"vmid"` - Name string `json:"name"` - Status string `json:"status"` - Template int `json:"template,omitempty"` - MaxMem int64 `json:"maxmem"` - MaxDisk int64 `json:"maxdisk"` - CPU float64 `json:"cpu"` - Mem int64 `json:"mem"` - Uptime int64 `json:"uptime"` -} - -// SnapshotEntry represents a snapshot from Proxmox. -type SnapshotEntry struct { - Name string `json:"name"` - Description string `json:"description,omitempty"` - SnapTime int64 `json:"snaptime,omitempty"` - Parent string `json:"parent,omitempty"` - VMState int `json:"vmstate,omitempty"` // 1 if includes RAM state -} diff --git a/fluid/internal/sshca/access.go b/fluid/internal/sshca/access.go deleted file mode 100755 index be1860ef..00000000 --- a/fluid/internal/sshca/access.go +++ /dev/null @@ -1,519 +0,0 @@ -// Package sshca provides SSH Certificate Authority management for ephemeral sandbox access. -package sshca - -import ( - "context" - "crypto/sha256" - "encoding/base64" - "fmt" - "strings" - "sync" - "time" - - "github.com/google/uuid" -) - -// AccessService orchestrates SSH certificate-based access to sandboxes. -// It handles certificate issuance, session tracking, and cleanup. -type AccessService struct { - ca *CA - store CertificateStore - vmLookup VMInfoProvider - timeNowFn func() time.Time - mu sync.RWMutex - - // Configuration - defaultTTL time.Duration - maxTTL time.Duration - sshPort int - username string -} - -// VMInfoProvider defines the interface for looking up VM/sandbox information. -type VMInfoProvider interface { - // GetSandboxIP returns the IP address of a sandbox. - GetSandboxIP(ctx context.Context, sandboxID string) (string, error) - - // GetSandboxVMName returns the VM name for a sandbox. - GetSandboxVMName(ctx context.Context, sandboxID string) (string, error) - - // IsSandboxRunning checks if the sandbox is in a running state. - IsSandboxRunning(ctx context.Context, sandboxID string) (bool, error) -} - -// AccessServiceConfig configures the access service. -type AccessServiceConfig struct { - // DefaultTTL is the default certificate lifetime. - DefaultTTL time.Duration - - // MaxTTL is the maximum allowed certificate lifetime. - MaxTTL time.Duration - - // SSHPort is the SSH port on VMs (default 22). - SSHPort int - - // Username is the SSH username (default "sandbox"). - Username string -} - -// DefaultAccessServiceConfig returns sensible defaults. -func DefaultAccessServiceConfig() AccessServiceConfig { - return AccessServiceConfig{ - DefaultTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - SSHPort: 22, - Username: "sandbox", - } -} - -// AccessServiceOption configures the AccessService. -type AccessServiceOption func(*AccessService) - -// WithAccessTimeNow overrides the clock (useful for tests). -func WithAccessTimeNow(fn func() time.Time) AccessServiceOption { - return func(s *AccessService) { s.timeNowFn = fn } -} - -// NewAccessService creates a new access service. -func NewAccessService(ca *CA, store CertificateStore, vmLookup VMInfoProvider, cfg AccessServiceConfig, opts ...AccessServiceOption) *AccessService { - if cfg.DefaultTTL == 0 { - cfg.DefaultTTL = 5 * time.Minute - } - if cfg.MaxTTL == 0 { - cfg.MaxTTL = 10 * time.Minute - } - if cfg.SSHPort == 0 { - cfg.SSHPort = 22 - } - if cfg.Username == "" { - cfg.Username = "sandbox" - } - - s := &AccessService{ - ca: ca, - store: store, - vmLookup: vmLookup, - timeNowFn: time.Now, - defaultTTL: cfg.DefaultTTL, - maxTTL: cfg.MaxTTL, - sshPort: cfg.SSHPort, - username: cfg.Username, - } - - for _, opt := range opts { - opt(s) - } - - return s -} - -// RequestAccess issues a short-lived SSH certificate for sandbox access. -func (s *AccessService) RequestAccess(ctx context.Context, req *AccessRequest) (*AccessResponse, error) { - s.mu.Lock() - defer s.mu.Unlock() - - // Validate request - if req.SandboxID == "" { - return nil, fmt.Errorf("sandbox_id is required") - } - if req.UserID == "" { - return nil, fmt.Errorf("user_id is required") - } - if req.PublicKey == "" { - return nil, fmt.Errorf("public_key is required") - } - - // Determine TTL - ttl := time.Duration(req.TTLMinutes) * time.Minute - if ttl == 0 { - ttl = s.defaultTTL - } - if ttl < time.Minute { - ttl = time.Minute - } - if ttl > s.maxTTL { - ttl = s.maxTTL - } - - // Check if sandbox is running - running, err := s.vmLookup.IsSandboxRunning(ctx, req.SandboxID) - if err != nil { - return nil, fmt.Errorf("check sandbox status: %w", err) - } - if !running { - return nil, fmt.Errorf("sandbox %s is not running", req.SandboxID) - } - - // Get sandbox IP - vmIP, err := s.vmLookup.GetSandboxIP(ctx, req.SandboxID) - if err != nil { - return nil, fmt.Errorf("get sandbox IP: %w", err) - } - if vmIP == "" { - return nil, fmt.Errorf("sandbox %s has no IP address", req.SandboxID) - } - - // Get VM name for certificate identity - vmName, err := s.vmLookup.GetSandboxVMName(ctx, req.SandboxID) - if err != nil { - return nil, fmt.Errorf("get sandbox VM name: %w", err) - } - - now := s.timeNowFn() - if req.RequestTime.IsZero() { - req.RequestTime = now - } - - // Issue certificate - certReq := &CertificateRequest{ - UserID: req.UserID, - VMID: vmName, - SandboxID: req.SandboxID, - PublicKey: req.PublicKey, - TTL: ttl, - Principals: []string{s.username}, - SourceIP: req.SourceIP, - RequestTime: req.RequestTime, - } - - cert, err := s.ca.IssueCertificate(ctx, certReq) - if err != nil { - return nil, fmt.Errorf("issue certificate: %w", err) - } - - // Calculate public key fingerprint - fingerprint := s.calculateFingerprint(req.PublicKey) - - // Persist certificate record - record := &CertificateRecord{ - ID: cert.ID, - SandboxID: req.SandboxID, - UserID: req.UserID, - VMID: vmName, - Identity: cert.Identity, - SerialNumber: cert.SerialNumber, - Principals: cert.Principals, - PublicKeyFingerprint: fingerprint, - ValidAfter: cert.ValidAfter, - ValidBefore: cert.ValidBefore, - SourceIP: req.SourceIP, - Status: CertStatusActive, - IssuedAt: now, - } - - if s.store != nil { - if err := s.store.CreateCertificate(ctx, record); err != nil { - return nil, fmt.Errorf("persist certificate: %w", err) - } - } - - // Build response - validUntil := cert.ValidBefore - ttlSeconds := int(validUntil.Sub(now).Seconds()) - - connectCmd := fmt.Sprintf("ssh -i /path/to/key -o CertificateFile=/path/to/key-cert.pub -o StrictHostKeyChecking=no %s@%s", - s.username, vmIP) - - return &AccessResponse{ - CertificateID: cert.ID, - Certificate: cert.Certificate, - VMIPAddress: vmIP, - SSHPort: s.sshPort, - Username: s.username, - ValidUntil: validUntil, - TTLSeconds: ttlSeconds, - ConnectCommand: connectCmd, - }, nil -} - -// RevokeAccess revokes a certificate, immediately terminating access. -func (s *AccessService) RevokeAccess(ctx context.Context, certificateID, reason string) error { - s.mu.Lock() - defer s.mu.Unlock() - - if s.store == nil { - return fmt.Errorf("no certificate store configured") - } - - // Get certificate - cert, err := s.store.GetCertificate(ctx, certificateID) - if err != nil { - return fmt.Errorf("get certificate: %w", err) - } - - if cert.Status == CertStatusRevoked { - return ErrCertAlreadyRevoked - } - - // Revoke certificate - if err := s.store.RevokeCertificate(ctx, certificateID, reason); err != nil { - return fmt.Errorf("revoke certificate: %w", err) - } - - // End any active sessions for this certificate - sessions, err := s.store.GetSessionsByCertificate(ctx, certificateID) - if err != nil { - return fmt.Errorf("get sessions: %w", err) - } - - for _, session := range sessions { - if session.Status == SessionStatusActive || session.Status == SessionStatusPending { - now := s.timeNowFn() - if err := s.store.EndSession(ctx, session.ID, now, "certificate revoked: "+reason); err != nil { - // Log but continue - _ = err - } - } - } - - return nil -} - -// RecordSessionStart records the start of an SSH session. -func (s *AccessService) RecordSessionStart(ctx context.Context, certificateID, sourceIP string) (string, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if s.store == nil { - return "", fmt.Errorf("no certificate store configured") - } - - // Get certificate - cert, err := s.store.GetCertificate(ctx, certificateID) - if err != nil { - return "", fmt.Errorf("get certificate: %w", err) - } - - // Validate certificate is still valid - if cert.Status != CertStatusActive { - return "", fmt.Errorf("certificate status is %s, not active", cert.Status) - } - if cert.IsExpired() { - return "", fmt.Errorf("certificate has expired") - } - - // Get VM IP - vmIP, err := s.vmLookup.GetSandboxIP(ctx, cert.SandboxID) - if err != nil { - vmIP = "" // Non-fatal - } - - // Create session record - sessionID := s.generateSessionID() - now := s.timeNowFn() - - session := &AccessSession{ - ID: sessionID, - CertificateID: certificateID, - SandboxID: cert.SandboxID, - UserID: cert.UserID, - VMID: cert.VMID, - VMIPAddress: vmIP, - SourceIP: sourceIP, - Status: SessionStatusActive, - StartedAt: now, - } - - if err := s.store.CreateSession(ctx, session); err != nil { - return "", fmt.Errorf("create session: %w", err) - } - - // Update certificate last used - if err := s.store.UpdateCertificateLastUsed(ctx, certificateID, now); err != nil { - // Non-fatal, just ignore - _ = err - } - - return sessionID, nil -} - -// RecordSessionEnd records the end of an SSH session. -func (s *AccessService) RecordSessionEnd(ctx context.Context, sessionID, reason string) error { - s.mu.Lock() - defer s.mu.Unlock() - - if s.store == nil { - return fmt.Errorf("no certificate store configured") - } - - now := s.timeNowFn() - return s.store.EndSession(ctx, sessionID, now, reason) -} - -// GetCertificate retrieves certificate information. -func (s *AccessService) GetCertificate(ctx context.Context, id string) (*CertificateRecord, error) { - if s.store == nil { - return nil, fmt.Errorf("no certificate store configured") - } - return s.store.GetCertificate(ctx, id) -} - -// ListCertificates lists certificates with optional filtering. -func (s *AccessService) ListCertificates(ctx context.Context, filter CertificateFilter, opts *ListOptions) ([]*CertificateRecord, error) { - if s.store == nil { - return nil, fmt.Errorf("no certificate store configured") - } - return s.store.ListCertificates(ctx, filter, opts) -} - -// GetActiveCertificatesForSandbox returns all active certificates for a sandbox. -func (s *AccessService) GetActiveCertificatesForSandbox(ctx context.Context, sandboxID string) ([]*CertificateRecord, error) { - if s.store == nil { - return nil, fmt.Errorf("no certificate store configured") - } - filter := CertificateFilter{ - SandboxID: &sandboxID, - ActiveOnly: true, - } - return s.store.ListCertificates(ctx, filter, nil) -} - -// GetActiveSessionsForSandbox returns all active sessions for a sandbox. -func (s *AccessService) GetActiveSessionsForSandbox(ctx context.Context, sandboxID string) ([]*AccessSession, error) { - if s.store == nil { - return nil, fmt.Errorf("no certificate store configured") - } - filter := SessionFilter{ - SandboxID: &sandboxID, - ActiveOnly: true, - } - return s.store.ListSessions(ctx, filter, nil) -} - -// CleanupExpiredCertificates marks expired certificates and ends associated sessions. -func (s *AccessService) CleanupExpiredCertificates(ctx context.Context) (int, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if s.store == nil { - return 0, fmt.Errorf("no certificate store configured") - } - - // Mark expired certificates - count, err := s.store.ExpireCertificates(ctx) - if err != nil { - return 0, fmt.Errorf("expire certificates: %w", err) - } - - // End sessions for expired certificates - filter := SessionFilter{ - ActiveOnly: true, - } - sessions, err := s.store.ListSessions(ctx, filter, nil) - if err != nil { - return count, fmt.Errorf("list sessions: %w", err) - } - - now := s.timeNowFn() - for _, session := range sessions { - // Check if certificate is expired - cert, err := s.store.GetCertificate(ctx, session.CertificateID) - if err != nil { - continue - } - if cert.IsExpired() || cert.Status == CertStatusExpired { - if err := s.store.EndSession(ctx, session.ID, now, "certificate expired"); err != nil { - // Log but continue - _ = err - } - } - } - - return count, nil -} - -// RevokeAllForSandbox revokes all certificates for a sandbox. -// This is typically called when destroying a sandbox. -func (s *AccessService) RevokeAllForSandbox(ctx context.Context, sandboxID, reason string) error { - s.mu.Lock() - defer s.mu.Unlock() - - if s.store == nil { - return nil // No store configured, nothing to revoke - } - - // Get all active certificates for the sandbox - filter := CertificateFilter{ - SandboxID: &sandboxID, - ActiveOnly: true, - } - certs, err := s.store.ListCertificates(ctx, filter, nil) - if err != nil { - return fmt.Errorf("list certificates: %w", err) - } - - // Revoke each certificate - for _, cert := range certs { - if err := s.store.RevokeCertificate(ctx, cert.ID, reason); err != nil { - // Log but continue - _ = err - } - } - - // End all active sessions - sessionFilter := SessionFilter{ - SandboxID: &sandboxID, - ActiveOnly: true, - } - sessions, err := s.store.ListSessions(ctx, sessionFilter, nil) - if err != nil { - return fmt.Errorf("list sessions: %w", err) - } - - now := s.timeNowFn() - for _, session := range sessions { - if err := s.store.EndSession(ctx, session.ID, now, reason); err != nil { - // Log but continue - _ = err - } - } - - return nil -} - -// GetCAPublicKey returns the CA public key for VM configuration. -func (s *AccessService) GetCAPublicKey() (string, error) { - return s.ca.GetPublicKey() -} - -// calculateFingerprint computes the SHA256 fingerprint of a public key. -func (s *AccessService) calculateFingerprint(publicKey string) string { - parts := strings.SplitN(publicKey, " ", 3) - if len(parts) < 2 { - return "" - } - - keyData, err := base64.StdEncoding.DecodeString(parts[1]) - if err != nil { - return "" - } - - hash := sha256.Sum256(keyData) - return fmt.Sprintf("SHA256:%s", base64.StdEncoding.EncodeToString(hash[:])) -} - -// generateSessionID generates a unique session identifier. -func (s *AccessService) generateSessionID() string { - id := uuid.NewString() - return fmt.Sprintf("SESS-%s", strings.ToUpper(id[:8])) -} - -// StartCleanupRoutine starts a background goroutine to periodically clean up expired certificates. -func (s *AccessService) StartCleanupRoutine(ctx context.Context, interval time.Duration) { - go func() { - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if _, err := s.CleanupExpiredCertificates(ctx); err != nil { - // Log error but continue - _ = err - } - } - } - }() -} diff --git a/fluid/internal/sshca/ca_test.go b/fluid/internal/sshca/ca_test.go deleted file mode 100755 index 223683f4..00000000 --- a/fluid/internal/sshca/ca_test.go +++ /dev/null @@ -1,567 +0,0 @@ -package sshca - -import ( - "context" - "os" - "path/filepath" - "strings" - "testing" - "time" -) - -func TestGenerateCA(t *testing.T) { - // Create temp directory - tempDir, err := os.MkdirTemp("", "sshca-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "test_ca") - comment := "test-ssh-ca" - - // Generate CA - err = GenerateCA(keyPath, comment) - if err != nil { - t.Fatalf("GenerateCA failed: %v", err) - } - - // Check private key exists - if _, err := os.Stat(keyPath); os.IsNotExist(err) { - t.Error("private key was not created") - } - - // Check public key exists - pubKeyPath := keyPath + ".pub" - if _, err := os.Stat(pubKeyPath); os.IsNotExist(err) { - t.Error("public key was not created") - } - - // Check private key permissions - info, err := os.Stat(keyPath) - if err != nil { - t.Fatalf("failed to stat private key: %v", err) - } - if info.Mode().Perm() != 0o600 { - t.Errorf("private key has wrong permissions: %o, expected 0600", info.Mode().Perm()) - } - - // Check public key content - pubKeyBytes, err := os.ReadFile(pubKeyPath) - if err != nil { - t.Fatalf("failed to read public key: %v", err) - } - pubKey := string(pubKeyBytes) - if !strings.HasPrefix(pubKey, "ssh-ed25519 ") { - t.Errorf("public key has wrong format: %s", pubKey[:min(len(pubKey), 50)]) - } - if !strings.Contains(pubKey, comment) { - t.Errorf("public key does not contain comment: %s", pubKey) - } -} - -func TestGenerateUserKeyPair(t *testing.T) { - comment := "test-user-key" - - privateKey, publicKey, err := GenerateUserKeyPair(comment) - if err != nil { - t.Fatalf("GenerateUserKeyPair failed: %v", err) - } - - // Check private key format - if !strings.Contains(privateKey, "OPENSSH PRIVATE KEY") { - t.Error("private key is not in OpenSSH format") - } - - // Check public key format - if !strings.HasPrefix(publicKey, "ssh-ed25519 ") { - t.Errorf("public key has wrong format: %s", publicKey[:min(len(publicKey), 50)]) - } - if !strings.Contains(publicKey, comment) { - t.Errorf("public key does not contain comment") - } -} - -func TestNewCA(t *testing.T) { - cfg := DefaultConfig() - cfg.CAKeyPath = "/nonexistent/path" - cfg.EnforceKeyPermissions = false - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - if ca == nil { - t.Error("NewCA returned nil") - } else if ca.sshKeygen == "" { - t.Error("ssh-keygen path not set") - } -} - -func TestCAInitialize(t *testing.T) { - // Create temp directory with CA keys - tempDir, err := os.MkdirTemp("", "sshca-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "test_ca") - err = GenerateCA(keyPath, "test-ca") - if err != nil { - t.Fatalf("failed to generate CA: %v", err) - } - - // Create CA instance - cfg := Config{ - CAKeyPath: keyPath, - CAPubKeyPath: keyPath + ".pub", - WorkDir: tempDir, - DefaultTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - DefaultPrincipals: []string{"sandbox"}, - EnforceKeyPermissions: true, - } - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - // Initialize CA - err = ca.Initialize(context.Background()) - if err != nil { - t.Fatalf("Initialize failed: %v", err) - } - - if !ca.initialized { - t.Error("CA not marked as initialized") - } - - // Get public key - pubKey, err := ca.GetPublicKey() - if err != nil { - t.Fatalf("GetPublicKey failed: %v", err) - } - if !strings.HasPrefix(pubKey, "ssh-ed25519 ") { - t.Errorf("public key has wrong format: %s", pubKey[:min(len(pubKey), 50)]) - } -} - -func TestCAInitializeNotFound(t *testing.T) { - cfg := Config{ - CAKeyPath: "/nonexistent/path/ssh_ca", - CAPubKeyPath: "/nonexistent/path/ssh_ca.pub", - WorkDir: "/tmp", - EnforceKeyPermissions: false, - } - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - err = ca.Initialize(context.Background()) - if err == nil { - t.Error("Initialize should have failed with nonexistent key") - } -} - -func TestCAIssueCertificate(t *testing.T) { - // Create temp directory with CA keys - tempDir, err := os.MkdirTemp("", "sshca-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "test_ca") - err = GenerateCA(keyPath, "test-ca") - if err != nil { - t.Fatalf("failed to generate CA: %v", err) - } - - // Generate user key - _, userPubKey, err := GenerateUserKeyPair("test-user") - if err != nil { - t.Fatalf("failed to generate user key: %v", err) - } - - // Create and initialize CA - cfg := Config{ - CAKeyPath: keyPath, - CAPubKeyPath: keyPath + ".pub", - WorkDir: tempDir, - DefaultTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - DefaultPrincipals: []string{"sandbox"}, - EnforceKeyPermissions: true, - } - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - err = ca.Initialize(context.Background()) - if err != nil { - t.Fatalf("Initialize failed: %v", err) - } - - // Issue certificate - req := &CertificateRequest{ - UserID: "test-user", - VMID: "test-vm", - SandboxID: "SBX-123", - PublicKey: userPubKey, - TTL: 5 * time.Minute, - Principals: []string{"sandbox"}, - SourceIP: "127.0.0.1", - RequestTime: time.Now(), - } - - cert, err := ca.IssueCertificate(context.Background(), req) - if err != nil { - t.Fatalf("IssueCertificate failed: %v", err) - } - - // Validate certificate - if cert.ID == "" { - t.Error("certificate ID is empty") - } - if cert.Identity == "" { - t.Error("certificate identity is empty") - } - if !strings.Contains(cert.Identity, "test-user") { - t.Errorf("identity should contain user ID: %s", cert.Identity) - } - if !strings.Contains(cert.Identity, "test-vm") { - t.Errorf("identity should contain VM ID: %s", cert.Identity) - } - if cert.Certificate == "" { - t.Error("certificate content is empty") - } - if !strings.Contains(cert.Certificate, "cert-v01@openssh.com") { - t.Error("certificate is not in OpenSSH certificate format") - } - if cert.SerialNumber == 0 { - t.Error("serial number should not be zero") - } - if len(cert.Principals) == 0 { - t.Error("principals should not be empty") - } - if cert.ValidBefore.Before(cert.ValidAfter) { - t.Error("ValidBefore should be after ValidAfter") - } - - // Check certificate info - info := cert.GetCertInfo() - if info.IsExpired { - t.Error("certificate should not be expired immediately after issuance") - } - if info.TimeToExpiry <= 0 { - t.Error("time to expiry should be positive") - } -} - -func TestCAIssueCertificateNotInitialized(t *testing.T) { - cfg := DefaultConfig() - cfg.EnforceKeyPermissions = false - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - req := &CertificateRequest{ - UserID: "test-user", - VMID: "test-vm", - SandboxID: "SBX-123", - PublicKey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAITest test", - } - - _, err = ca.IssueCertificate(context.Background(), req) - if err != ErrCANotInitialized { - t.Errorf("expected ErrCANotInitialized, got: %v", err) - } -} - -func TestCAValidateRequest(t *testing.T) { - // Create temp directory with CA keys - tempDir, err := os.MkdirTemp("", "sshca-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "test_ca") - err = GenerateCA(keyPath, "test-ca") - if err != nil { - t.Fatalf("failed to generate CA: %v", err) - } - - cfg := Config{ - CAKeyPath: keyPath, - CAPubKeyPath: keyPath + ".pub", - WorkDir: tempDir, - DefaultTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - DefaultPrincipals: []string{"sandbox"}, - EnforceKeyPermissions: true, - } - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - err = ca.Initialize(context.Background()) - if err != nil { - t.Fatalf("Initialize failed: %v", err) - } - - tests := []struct { - name string - req *CertificateRequest - wantErr bool - }{ - { - name: "missing UserID", - req: &CertificateRequest{ - VMID: "test-vm", - SandboxID: "SBX-123", - PublicKey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAITest test", - }, - wantErr: true, - }, - { - name: "missing VMID", - req: &CertificateRequest{ - UserID: "test-user", - SandboxID: "SBX-123", - PublicKey: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAITest test", - }, - wantErr: true, - }, - { - name: "missing PublicKey", - req: &CertificateRequest{ - UserID: "test-user", - VMID: "test-vm", - SandboxID: "SBX-123", - }, - wantErr: true, - }, - { - name: "invalid PublicKey format", - req: &CertificateRequest{ - UserID: "test-user", - VMID: "test-vm", - SandboxID: "SBX-123", - PublicKey: "not-a-valid-key", - }, - wantErr: true, - }, - { - name: "unsupported key type", - req: &CertificateRequest{ - UserID: "test-user", - VMID: "test-vm", - SandboxID: "SBX-123", - PublicKey: "ssh-dss AAAAB3NzaC1kc3MAAACBA test", - }, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := ca.IssueCertificate(context.Background(), tt.req) - if (err != nil) != tt.wantErr { - t.Errorf("IssueCertificate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestCATTLCapping(t *testing.T) { - // Create temp directory with CA keys - tempDir, err := os.MkdirTemp("", "sshca-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "test_ca") - err = GenerateCA(keyPath, "test-ca") - if err != nil { - t.Fatalf("failed to generate CA: %v", err) - } - - _, userPubKey, err := GenerateUserKeyPair("test-user") - if err != nil { - t.Fatalf("failed to generate user key: %v", err) - } - - cfg := Config{ - CAKeyPath: keyPath, - CAPubKeyPath: keyPath + ".pub", - WorkDir: tempDir, - DefaultTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - DefaultPrincipals: []string{"sandbox"}, - EnforceKeyPermissions: true, - } - - ca, err := NewCA(cfg) - if err != nil { - t.Fatalf("NewCA failed: %v", err) - } - - err = ca.Initialize(context.Background()) - if err != nil { - t.Fatalf("Initialize failed: %v", err) - } - - // Request certificate with TTL exceeding max - req := &CertificateRequest{ - UserID: "test-user", - VMID: "test-vm", - SandboxID: "SBX-123", - PublicKey: userPubKey, - TTL: 30 * time.Minute, // Exceeds MaxTTL - } - - cert, err := ca.IssueCertificate(context.Background(), req) - if err != nil { - t.Fatalf("IssueCertificate failed: %v", err) - } - - // Check that TTL was capped - actualTTL := cert.ValidBefore.Sub(cert.IssuedAt) - // Allow for some clock skew (the cert adds 1 minute before valid_after) - if actualTTL > 11*time.Minute { - t.Errorf("TTL should be capped to MaxTTL (10m), got: %v", actualTTL) - } -} - -func TestCertificateConnectCommand(t *testing.T) { - cert := &Certificate{ - Principals: []string{"sandbox"}, - } - - cmd := cert.SSHConnectCommand("/path/to/key", "/path/to/key-cert.pub", "192.168.1.100", 22) - - if !strings.Contains(cmd, "-i /path/to/key") { - t.Error("command should contain private key path") - } - if !strings.Contains(cmd, "CertificateFile=/path/to/key-cert.pub") { - t.Error("command should contain certificate path") - } - if !strings.Contains(cmd, "sandbox@192.168.1.100") { - t.Error("command should contain user@host") - } - if !strings.Contains(cmd, "-p 22") { - t.Error("command should contain port") - } -} - -func TestEnsureSSHCA(t *testing.T) { - t.Run("creates CA when files do not exist", func(t *testing.T) { - tempDir, err := os.MkdirTemp("", "sshca-ensure-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "ssh-ca", "ssh-ca") - pubPath := keyPath + ".pub" - - created, err := EnsureSSHCA(keyPath, pubPath, "test-ca") - if err != nil { - t.Fatalf("EnsureSSHCA failed: %v", err) - } - if !created { - t.Error("expected created=true when files did not exist") - } - - // Verify files exist - if _, err := os.Stat(keyPath); os.IsNotExist(err) { - t.Error("private key was not created") - } - if _, err := os.Stat(pubPath); os.IsNotExist(err) { - t.Error("public key was not created") - } - }) - - t.Run("does not recreate existing CA", func(t *testing.T) { - tempDir, err := os.MkdirTemp("", "sshca-ensure-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "ssh-ca") - pubPath := keyPath + ".pub" - - // Create CA first - if err := GenerateCA(keyPath, "test-ca"); err != nil { - t.Fatalf("failed to generate CA: %v", err) - } - - // Get original file info - origInfo, _ := os.Stat(keyPath) - origModTime := origInfo.ModTime() - - // Call EnsureSSHCA - created, err := EnsureSSHCA(keyPath, pubPath, "test-ca") - if err != nil { - t.Fatalf("EnsureSSHCA failed: %v", err) - } - if created { - t.Error("expected created=false when files already exist") - } - - // Verify file was not modified - newInfo, _ := os.Stat(keyPath) - if newInfo.ModTime() != origModTime { - t.Error("existing CA key was modified") - } - }) - - t.Run("errors on inconsistent state", func(t *testing.T) { - tempDir, err := os.MkdirTemp("", "sshca-ensure-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - keyPath := filepath.Join(tempDir, "ssh-ca") - pubPath := keyPath + ".pub" - - // Create only the private key - if err := os.MkdirAll(tempDir, 0o700); err != nil { - t.Fatalf("failed to create dir: %v", err) - } - if err := os.WriteFile(keyPath, []byte("fake-key"), 0o600); err != nil { - t.Fatalf("failed to write key: %v", err) - } - - // EnsureSSHCA should error due to inconsistent state - _, err = EnsureSSHCA(keyPath, pubPath, "test-ca") - if err == nil { - t.Error("expected error for inconsistent state (only private key exists)") - } - if !strings.Contains(err.Error(), "inconsistent") { - t.Errorf("expected inconsistent error, got: %v", err) - } - }) -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/fluid/internal/sshca/memstore.go b/fluid/internal/sshca/memstore.go deleted file mode 100755 index 0c25cf35..00000000 --- a/fluid/internal/sshca/memstore.go +++ /dev/null @@ -1,320 +0,0 @@ -// Package sshca provides SSH Certificate Authority management for ephemeral sandbox access. -package sshca - -import ( - "context" - "sync" - "time" -) - -// MemoryStore provides an in-memory implementation of CertificateStore. -// This is primarily useful for development and testing. -// For production use, implement a database-backed store. -type MemoryStore struct { - mu sync.RWMutex - certificates map[string]*CertificateRecord - sessions map[string]*AccessSession -} - -// NewMemoryStore creates a new in-memory certificate store. -func NewMemoryStore() *MemoryStore { - return &MemoryStore{ - certificates: make(map[string]*CertificateRecord), - sessions: make(map[string]*AccessSession), - } -} - -// CreateCertificate persists a new certificate record. -func (s *MemoryStore) CreateCertificate(ctx context.Context, cert *CertificateRecord) error { - s.mu.Lock() - defer s.mu.Unlock() - - s.certificates[cert.ID] = cert - return nil -} - -// GetCertificate retrieves a certificate by ID. -func (s *MemoryStore) GetCertificate(ctx context.Context, id string) (*CertificateRecord, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - cert, ok := s.certificates[id] - if !ok { - return nil, ErrCertNotFound - } - return cert, nil -} - -// GetCertificateBySerial retrieves a certificate by serial number. -func (s *MemoryStore) GetCertificateBySerial(ctx context.Context, serial uint64) (*CertificateRecord, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - for _, cert := range s.certificates { - if cert.SerialNumber == serial { - return cert, nil - } - } - return nil, ErrCertNotFound -} - -// ListCertificates retrieves certificates matching the filter. -func (s *MemoryStore) ListCertificates(ctx context.Context, filter CertificateFilter, opts *ListOptions) ([]*CertificateRecord, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - var results []*CertificateRecord - now := time.Now() - - for _, cert := range s.certificates { - // Apply filters - if filter.SandboxID != nil && cert.SandboxID != *filter.SandboxID { - continue - } - if filter.UserID != nil && cert.UserID != *filter.UserID { - continue - } - if filter.VMID != nil && cert.VMID != *filter.VMID { - continue - } - if filter.Status != nil && cert.Status != *filter.Status { - continue - } - if filter.ActiveOnly { - if cert.Status != CertStatusActive || now.After(cert.ValidBefore) { - continue - } - } - if filter.IssuedAfter != nil && cert.IssuedAt.Before(*filter.IssuedAfter) { - continue - } - if filter.IssuedBefore != nil && cert.IssuedAt.After(*filter.IssuedBefore) { - continue - } - - results = append(results, cert) - } - - // Apply pagination - if opts != nil { - if opts.Offset > 0 && opts.Offset < len(results) { - results = results[opts.Offset:] - } else if opts.Offset >= len(results) { - results = nil - } - if opts.Limit > 0 && opts.Limit < len(results) { - results = results[:opts.Limit] - } - } - - return results, nil -} - -// UpdateCertificateStatus updates the status of a certificate. -func (s *MemoryStore) UpdateCertificateStatus(ctx context.Context, id string, status CertStatus) error { - s.mu.Lock() - defer s.mu.Unlock() - - cert, ok := s.certificates[id] - if !ok { - return ErrCertNotFound - } - - cert.Status = status - return nil -} - -// RevokeCertificate marks a certificate as revoked. -func (s *MemoryStore) RevokeCertificate(ctx context.Context, id string, reason string) error { - s.mu.Lock() - defer s.mu.Unlock() - - cert, ok := s.certificates[id] - if !ok { - return ErrCertNotFound - } - - if cert.Status == CertStatusRevoked { - return ErrCertAlreadyRevoked - } - - now := time.Now() - cert.Status = CertStatusRevoked - cert.RevokedAt = &now - cert.RevokeReason = reason - return nil -} - -// UpdateCertificateLastUsed updates the last used timestamp. -func (s *MemoryStore) UpdateCertificateLastUsed(ctx context.Context, id string, at time.Time) error { - s.mu.Lock() - defer s.mu.Unlock() - - cert, ok := s.certificates[id] - if !ok { - return ErrCertNotFound - } - - cert.LastUsedAt = &at - return nil -} - -// ExpireCertificates marks all expired certificates as EXPIRED. -func (s *MemoryStore) ExpireCertificates(ctx context.Context) (int, error) { - s.mu.Lock() - defer s.mu.Unlock() - - now := time.Now() - count := 0 - - for _, cert := range s.certificates { - if cert.Status == CertStatusActive && now.After(cert.ValidBefore) { - cert.Status = CertStatusExpired - count++ - } - } - - return count, nil -} - -// DeleteCertificate removes a certificate record. -func (s *MemoryStore) DeleteCertificate(ctx context.Context, id string) error { - s.mu.Lock() - defer s.mu.Unlock() - - delete(s.certificates, id) - return nil -} - -// CreateSession persists a new access session record. -func (s *MemoryStore) CreateSession(ctx context.Context, session *AccessSession) error { - s.mu.Lock() - defer s.mu.Unlock() - - s.sessions[session.ID] = session - return nil -} - -// GetSession retrieves a session by ID. -func (s *MemoryStore) GetSession(ctx context.Context, id string) (*AccessSession, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - session, ok := s.sessions[id] - if !ok { - return nil, ErrSessionNotFound - } - return session, nil -} - -// ListSessions retrieves sessions matching the filter. -func (s *MemoryStore) ListSessions(ctx context.Context, filter SessionFilter, opts *ListOptions) ([]*AccessSession, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - var results []*AccessSession - - for _, session := range s.sessions { - // Apply filters - if filter.CertificateID != nil && session.CertificateID != *filter.CertificateID { - continue - } - if filter.SandboxID != nil && session.SandboxID != *filter.SandboxID { - continue - } - if filter.UserID != nil && session.UserID != *filter.UserID { - continue - } - if filter.Status != nil && session.Status != *filter.Status { - continue - } - if filter.ActiveOnly && session.Status != SessionStatusActive && session.Status != SessionStatusPending { - continue - } - if filter.StartedAfter != nil && session.StartedAt.Before(*filter.StartedAfter) { - continue - } - - results = append(results, session) - } - - // Apply pagination - if opts != nil { - if opts.Offset > 0 && opts.Offset < len(results) { - results = results[opts.Offset:] - } else if opts.Offset >= len(results) { - results = nil - } - if opts.Limit > 0 && opts.Limit < len(results) { - results = results[:opts.Limit] - } - } - - return results, nil -} - -// UpdateSessionStatus updates the status of a session. -func (s *MemoryStore) UpdateSessionStatus(ctx context.Context, id string, status SessionStatus, reason string) error { - s.mu.Lock() - defer s.mu.Unlock() - - session, ok := s.sessions[id] - if !ok { - return ErrSessionNotFound - } - - session.Status = status - session.DisconnectReason = reason - return nil -} - -// EndSession marks a session as ended. -func (s *MemoryStore) EndSession(ctx context.Context, id string, endedAt time.Time, reason string) error { - s.mu.Lock() - defer s.mu.Unlock() - - session, ok := s.sessions[id] - if !ok { - return ErrSessionNotFound - } - - session.Status = SessionStatusEnded - session.EndedAt = &endedAt - session.DisconnectReason = reason - - duration := int(endedAt.Sub(session.StartedAt).Seconds()) - session.DurationSeconds = &duration - - return nil -} - -// GetActiveSessions returns all currently active sessions. -func (s *MemoryStore) GetActiveSessions(ctx context.Context) ([]*AccessSession, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - var results []*AccessSession - for _, session := range s.sessions { - if session.Status == SessionStatusActive || session.Status == SessionStatusPending { - results = append(results, session) - } - } - return results, nil -} - -// GetSessionsByCertificate returns all sessions for a certificate. -func (s *MemoryStore) GetSessionsByCertificate(ctx context.Context, certID string) ([]*AccessSession, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - var results []*AccessSession - for _, session := range s.sessions { - if session.CertificateID == certID { - results = append(results, session) - } - } - return results, nil -} - -// Verify MemoryStore implements CertificateStore at compile time. -var _ CertificateStore = (*MemoryStore)(nil) diff --git a/fluid/internal/sshca/vmadapter.go b/fluid/internal/sshca/vmadapter.go deleted file mode 100755 index 7ed5d938..00000000 --- a/fluid/internal/sshca/vmadapter.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package sshca provides SSH Certificate Authority management for ephemeral sandbox access. -package sshca - -import ( - "context" - "fmt" - - "github.com/aspectrr/fluid.sh/fluid/internal/store" -) - -// SandboxStore defines the minimal interface needed to look up sandbox information. -type SandboxStore interface { - GetSandbox(ctx context.Context, id string) (*store.Sandbox, error) -} - -// VMAdapter implements VMInfoProvider by delegating to the sandbox store. -type VMAdapter struct { - store SandboxStore -} - -// NewVMAdapter creates a new VM adapter. -func NewVMAdapter(st SandboxStore) *VMAdapter { - return &VMAdapter{ - store: st, - } -} - -// GetSandboxIP returns the IP address of a sandbox. -func (a *VMAdapter) GetSandboxIP(ctx context.Context, sandboxID string) (string, error) { - sb, err := a.store.GetSandbox(ctx, sandboxID) - if err != nil { - return "", fmt.Errorf("get sandbox: %w", err) - } - - if sb.IPAddress == nil || *sb.IPAddress == "" { - return "", fmt.Errorf("sandbox %s has no IP address", sandboxID) - } - - return *sb.IPAddress, nil -} - -// GetSandboxVMName returns the VM name for a sandbox. -func (a *VMAdapter) GetSandboxVMName(ctx context.Context, sandboxID string) (string, error) { - sb, err := a.store.GetSandbox(ctx, sandboxID) - if err != nil { - return "", fmt.Errorf("get sandbox: %w", err) - } - - return sb.SandboxName, nil -} - -// IsSandboxRunning checks if the sandbox is in a running state. -func (a *VMAdapter) IsSandboxRunning(ctx context.Context, sandboxID string) (bool, error) { - sb, err := a.store.GetSandbox(ctx, sandboxID) - if err != nil { - return false, fmt.Errorf("get sandbox: %w", err) - } - - return sb.State == store.SandboxStateRunning, nil -} - -// Verify VMAdapter implements VMInfoProvider at compile time. -var _ VMInfoProvider = (*VMAdapter)(nil) diff --git a/fluid/internal/sshkeys/manager_test.go b/fluid/internal/sshkeys/manager_test.go deleted file mode 100755 index ab90adad..00000000 --- a/fluid/internal/sshkeys/manager_test.go +++ /dev/null @@ -1,1013 +0,0 @@ -package sshkeys - -import ( - "context" - "os" - "path/filepath" - "strings" - "sync" - "testing" - "time" - - "github.com/aspectrr/fluid.sh/fluid/internal/sshca" -) - -// testCA creates a real CA for testing. -// Returns the CA and a cleanup function. -func testCA(t *testing.T) (*sshca.CA, func()) { - t.Helper() - - tempDir, err := os.MkdirTemp("", "sshkeys-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - - keyPath := filepath.Join(tempDir, "test_ca") - - // Generate CA keypair. - if err := sshca.GenerateCA(keyPath, "test-ca"); err != nil { - _ = os.RemoveAll(tempDir) - t.Fatalf("failed to generate CA: %v", err) - } - - cfg := sshca.Config{ - CAKeyPath: keyPath, - CAPubKeyPath: keyPath + ".pub", - WorkDir: filepath.Join(tempDir, "work"), - DefaultTTL: 5 * time.Minute, - MaxTTL: 10 * time.Minute, - DefaultPrincipals: []string{"sandbox"}, - EnforceKeyPermissions: false, // Disable for tests - } - - ca, err := sshca.NewCA(cfg) - if err != nil { - _ = os.RemoveAll(tempDir) - t.Fatalf("failed to create CA: %v", err) - } - - if err := ca.Initialize(context.Background()); err != nil { - _ = os.RemoveAll(tempDir) - t.Fatalf("failed to initialize CA: %v", err) - } - - return ca, func() { - _ = os.RemoveAll(tempDir) - } -} - -func TestNewKeyManager(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - cfg := Config{ - KeyDir: tempDir, - CertificateTTL: 5 * time.Minute, - RefreshMargin: 30 * time.Second, - DefaultUsername: "sandbox", - } - - km, err := NewKeyManager(ca, cfg, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - if km.ca == nil { - t.Error("CA is nil") - } - if km.cfg.KeyDir != tempDir { - t.Errorf("KeyDir mismatch: got %s, want %s", km.cfg.KeyDir, tempDir) - } -} - -func TestNewKeyManager_NilCA(t *testing.T) { - _, err := NewKeyManager(nil, Config{}, nil) - if err == nil { - t.Error("expected error for nil CA") - } -} - -func TestNewKeyManager_DefaultConfig(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - // Empty config should use defaults. - km, err := NewKeyManager(ca, Config{KeyDir: tempDir}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - defaults := DefaultConfig() - if km.cfg.CertificateTTL != defaults.CertificateTTL { - t.Errorf("CertificateTTL mismatch: got %v, want %v", km.cfg.CertificateTTL, defaults.CertificateTTL) - } - if km.cfg.RefreshMargin != defaults.RefreshMargin { - t.Errorf("RefreshMargin mismatch: got %v, want %v", km.cfg.RefreshMargin, defaults.RefreshMargin) - } - if km.cfg.DefaultUsername != defaults.DefaultUsername { - t.Errorf("DefaultUsername mismatch: got %s, want %s", km.cfg.DefaultUsername, defaults.DefaultUsername) - } -} - -func TestGetCredentials_GeneratesNewKeys(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir, CertificateTTL: 5 * time.Minute}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - creds, err := km.GetCredentials(ctx, "SBX-123", "sandbox") - if err != nil { - t.Fatalf("GetCredentials failed: %v", err) - } - - // Check credentials. - if creds.SandboxID != "SBX-123" { - t.Errorf("SandboxID mismatch: got %s, want SBX-123", creds.SandboxID) - } - if creds.Username != "sandbox" { - t.Errorf("Username mismatch: got %s, want sandbox", creds.Username) - } - if creds.PrivateKeyPath == "" { - t.Error("PrivateKeyPath is empty") - } - if creds.CertificatePath == "" { - t.Error("CertificatePath is empty") - } - if creds.ValidUntil.IsZero() { - t.Error("ValidUntil is zero") - } - - // Check files exist. - if _, err := os.Stat(creds.PrivateKeyPath); os.IsNotExist(err) { - t.Error("private key file does not exist") - } - if _, err := os.Stat(creds.CertificatePath); os.IsNotExist(err) { - t.Error("certificate file does not exist") - } -} - -func TestGetCredentials_ReturnsCached(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir, CertificateTTL: 5 * time.Minute}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - - // First call generates. - creds1, err := km.GetCredentials(ctx, "SBX-123", "sandbox") - if err != nil { - t.Fatalf("GetCredentials (1) failed: %v", err) - } - - // Second call should return cached. - creds2, err := km.GetCredentials(ctx, "SBX-123", "sandbox") - if err != nil { - t.Fatalf("GetCredentials (2) failed: %v", err) - } - - // Should be the same credentials. - if creds1.PrivateKeyPath != creds2.PrivateKeyPath { - t.Error("expected cached credentials to be returned") - } -} - -func TestGetCredentials_RegeneratesOnExpiry(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{ - KeyDir: tempDir, - CertificateTTL: 5 * time.Minute, - RefreshMargin: 30 * time.Second, - }, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - // First call generates. - creds1, err := km.GetCredentials(ctx, "SBX-123", "sandbox") - if err != nil { - t.Fatalf("GetCredentials (1) failed: %v", err) - } - - // Simulate time passing by modifying cached credentials to be expired. - km.mu.Lock() - for key, creds := range km.credentials { - creds.ValidUntil = time.Now().Add(-1 * time.Minute) // Already expired - km.credentials[key] = creds - } - km.mu.Unlock() - - // Second call should regenerate. - creds2, err := km.GetCredentials(ctx, "SBX-123", "sandbox") - if err != nil { - t.Fatalf("GetCredentials (2) failed: %v", err) - } - - // ValidUntil should be different (new certificate was issued). - // Note: paths are the same because sandbox ID is the same, but the - // certificate content and expiry time will be different. - if creds2.ValidUntil.Before(time.Now()) { - t.Error("expected new credentials with valid expiry after regeneration") - } - // New expiry should be after the old (expired) one. - if !creds2.ValidUntil.After(creds1.ValidUntil) { - t.Error("expected new credentials to have later expiry than expired ones") - } -} - -func TestGetCredentials_DefaultUsername(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{ - KeyDir: tempDir, - DefaultUsername: "myuser", - }, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - - // Empty username should use default. - creds, err := km.GetCredentials(ctx, "SBX-123", "") - if err != nil { - t.Fatalf("GetCredentials failed: %v", err) - } - - if creds.Username != "myuser" { - t.Errorf("Username mismatch: got %s, want myuser", creds.Username) - } -} - -func TestGetCredentials_ConcurrentSafety(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - sandboxID := "SBX-CONCURRENT" - - // Launch multiple goroutines requesting the same sandbox's credentials. - var wg sync.WaitGroup - results := make(chan *Credentials, 10) - errors := make(chan error, 10) - - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - creds, err := km.GetCredentials(ctx, sandboxID, "sandbox") - if err != nil { - errors <- err - return - } - results <- creds - }() - } - - wg.Wait() - close(results) - close(errors) - - // Check for errors. - for err := range errors { - t.Errorf("GetCredentials error: %v", err) - } - - // All results should have the same private key path (cached). - var firstPath string - for creds := range results { - if firstPath == "" { - firstPath = creds.PrivateKeyPath - } else if creds.PrivateKeyPath != firstPath { - t.Error("concurrent calls returned different credentials") - } - } -} - -func TestCleanupSandbox_RemovesFiles(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - - // Generate credentials. - creds, err := km.GetCredentials(ctx, "SBX-CLEANUP", "sandbox") - if err != nil { - t.Fatalf("GetCredentials failed: %v", err) - } - - // Verify files exist. - if _, err := os.Stat(creds.PrivateKeyPath); os.IsNotExist(err) { - t.Fatal("private key file should exist") - } - - // Cleanup. - if err := km.CleanupSandbox(ctx, "SBX-CLEANUP"); err != nil { - t.Fatalf("CleanupSandbox failed: %v", err) - } - - // Verify files are gone. - if _, err := os.Stat(creds.PrivateKeyPath); !os.IsNotExist(err) { - t.Error("private key file should be deleted") - } - sandboxDir := km.sandboxKeyDir("SBX-CLEANUP") - if _, err := os.Stat(sandboxDir); !os.IsNotExist(err) { - t.Error("sandbox key directory should be deleted") - } - - // Verify cache is cleared. - km.mu.RLock() - if len(km.credentials) > 0 { - t.Error("credentials should be cleared from cache") - } - km.mu.RUnlock() -} - -func TestCleanupSandbox_EmptySandboxID(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - err = km.CleanupSandbox(context.Background(), "") - if err == nil { - t.Error("expected error for empty sandboxID") - } -} - -func TestKeyFilePermissions(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - creds, err := km.GetCredentials(ctx, "SBX-PERM", "sandbox") - if err != nil { - t.Fatalf("GetCredentials failed: %v", err) - } - - // Check private key permissions. - info, err := os.Stat(creds.PrivateKeyPath) - if err != nil { - t.Fatalf("failed to stat private key: %v", err) - } - perm := info.Mode().Perm() - if perm != 0o600 { - t.Errorf("private key has wrong permissions: %o, expected 0600", perm) - } -} - -func TestCredentials_IsExpired(t *testing.T) { - tests := []struct { - name string - validUntil time.Time - margin time.Duration - want bool - }{ - { - name: "not expired", - validUntil: time.Now().Add(10 * time.Minute), - margin: 30 * time.Second, - want: false, - }, - { - name: "expired", - validUntil: time.Now().Add(-1 * time.Minute), - margin: 30 * time.Second, - want: true, - }, - { - name: "within margin", - validUntil: time.Now().Add(20 * time.Second), - margin: 30 * time.Second, - want: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &Credentials{ValidUntil: tt.validUntil} - if got := c.IsExpired(tt.margin); got != tt.want { - t.Errorf("IsExpired() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGetSourceVMCredentials_GeneratesNewKeys(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir, CertificateTTL: 5 * time.Minute}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - creds, err := km.GetSourceVMCredentials(ctx, "golden-ubuntu-22.04") - if err != nil { - t.Fatalf("GetSourceVMCredentials failed: %v", err) - } - - // Check credentials. - if creds.SandboxID != "golden-ubuntu-22.04" { - t.Errorf("SandboxID mismatch: got %s, want golden-ubuntu-22.04", creds.SandboxID) - } - if creds.Username != "fluid-readonly" { - t.Errorf("Username mismatch: got %s, want fluid-readonly", creds.Username) - } - if creds.PrivateKeyPath == "" { - t.Error("PrivateKeyPath is empty") - } - if creds.CertificatePath == "" { - t.Error("CertificatePath is empty") - } - if creds.ValidUntil.IsZero() { - t.Error("ValidUntil is zero") - } - - // Check files exist. - if _, err := os.Stat(creds.PrivateKeyPath); os.IsNotExist(err) { - t.Error("private key file does not exist") - } - if _, err := os.Stat(creds.CertificatePath); os.IsNotExist(err) { - t.Error("certificate file does not exist") - } -} - -func TestGetSourceVMCredentials_ReturnsCached(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir, CertificateTTL: 5 * time.Minute}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - - // First call generates. - creds1, err := km.GetSourceVMCredentials(ctx, "golden-ubuntu-22.04") - if err != nil { - t.Fatalf("GetSourceVMCredentials (1) failed: %v", err) - } - - // Second call should return cached. - creds2, err := km.GetSourceVMCredentials(ctx, "golden-ubuntu-22.04") - if err != nil { - t.Fatalf("GetSourceVMCredentials (2) failed: %v", err) - } - - // Should be the same credentials. - if creds1.PrivateKeyPath != creds2.PrivateKeyPath { - t.Error("expected cached credentials to be returned") - } - if creds1.ValidUntil != creds2.ValidUntil { - t.Error("expected cached credentials to have same expiry") - } -} - -func TestGetSourceVMCredentials_RegeneratesOnExpiry(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{ - KeyDir: tempDir, - CertificateTTL: 5 * time.Minute, - RefreshMargin: 30 * time.Second, - }, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - // First call generates. - creds1, err := km.GetSourceVMCredentials(ctx, "golden-ubuntu-22.04") - if err != nil { - t.Fatalf("GetSourceVMCredentials (1) failed: %v", err) - } - - // Simulate time passing by modifying cached credentials to be expired. - km.mu.Lock() - for key, creds := range km.credentials { - if key == "sourcevm:golden-ubuntu-22.04:fluid-readonly" { - creds.ValidUntil = time.Now().Add(-1 * time.Minute) // Already expired - km.credentials[key] = creds - } - } - km.mu.Unlock() - - // Second call should regenerate. - creds2, err := km.GetSourceVMCredentials(ctx, "golden-ubuntu-22.04") - if err != nil { - t.Fatalf("GetSourceVMCredentials (2) failed: %v", err) - } - - // ValidUntil should be different (new certificate was issued). - if creds2.ValidUntil.Before(time.Now()) { - t.Error("expected new credentials with valid expiry after regeneration") - } - // New expiry should be after the old (expired) one. - if !creds2.ValidUntil.After(creds1.ValidUntil) { - t.Error("expected new credentials to have later expiry than expired ones") - } -} - -func TestGetSourceVMCredentials_FilesystemLayout(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - - testCases := []struct { - name string - sourceVMName string - expectedDirName string - }{ - { - name: "simple name", - sourceVMName: "ubuntu-22.04", - expectedDirName: "sourcevm-ubuntu-22_04", - }, - { - name: "name with dots", - sourceVMName: "golden.ubuntu.22.04", - expectedDirName: "sourcevm-golden_ubuntu_22_04", - }, - { - name: "name with hyphens", - sourceVMName: "my-golden-vm", - expectedDirName: "sourcevm-my-golden-vm", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - creds, err := km.GetSourceVMCredentials(ctx, tc.sourceVMName) - if err != nil { - t.Fatalf("GetSourceVMCredentials failed: %v", err) - } - - // Verify the directory structure. - expectedDir := filepath.Join(tempDir, tc.expectedDirName) - expectedKeyPath := filepath.Join(expectedDir, "key") - expectedCertPath := filepath.Join(expectedDir, "key-cert.pub") - - if creds.PrivateKeyPath != expectedKeyPath { - t.Errorf("PrivateKeyPath mismatch: got %s, want %s", creds.PrivateKeyPath, expectedKeyPath) - } - if creds.CertificatePath != expectedCertPath { - t.Errorf("CertificatePath mismatch: got %s, want %s", creds.CertificatePath, expectedCertPath) - } - - // Verify directory exists with correct permissions. - info, err := os.Stat(expectedDir) - if err != nil { - t.Fatalf("failed to stat directory: %v", err) - } - if !info.IsDir() { - t.Error("expected directory, got file") - } - perm := info.Mode().Perm() - if perm != 0o700 { - t.Errorf("directory has wrong permissions: %o, expected 0700", perm) - } - - // Verify private key has correct permissions. - keyInfo, err := os.Stat(creds.PrivateKeyPath) - if err != nil { - t.Fatalf("failed to stat private key: %v", err) - } - keyPerm := keyInfo.Mode().Perm() - if keyPerm != 0o600 { - t.Errorf("private key has wrong permissions: %o, expected 0600", keyPerm) - } - }) - } -} - -func TestGetSourceVMCredentials_EmptySourceVMName(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - _, err = km.GetSourceVMCredentials(context.Background(), "") - if err == nil { - t.Error("expected error for empty sourceVMName") - } -} - -func TestGetSourceVMCredentials_ConcurrentSafety(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tempDir, err := os.MkdirTemp("", "keymanager-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tempDir) }() - - km, err := NewKeyManager(ca, Config{KeyDir: tempDir}, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - sourceVMName := "golden-concurrent" - - // Launch multiple goroutines requesting the same source VM's credentials. - var wg sync.WaitGroup - results := make(chan *Credentials, 10) - errors := make(chan error, 10) - - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - creds, err := km.GetSourceVMCredentials(ctx, sourceVMName) - if err != nil { - errors <- err - return - } - results <- creds - }() - } - - wg.Wait() - close(results) - close(errors) - - // Check for errors. - for err := range errors { - t.Errorf("GetSourceVMCredentials error: %v", err) - } - - // All results should have the same private key path (cached). - var firstPath string - for creds := range results { - if firstPath == "" { - firstPath = creds.PrivateKeyPath - } else if creds.PrivateKeyPath != firstPath { - t.Error("concurrent calls returned different credentials") - } - } -} - -// TestSanitizeVMName tests the VM name sanitization function. -func TestSanitizeVMName(t *testing.T) { - testCases := []struct { - name string - input string - expected string - }{ - { - name: "simple alphanumeric", - input: "ubuntu2204", - expected: "ubuntu2204", - }, - { - name: "with dots", - input: "ubuntu.22.04", - expected: "ubuntu_22_04", - }, - { - name: "with hyphens", - input: "my-golden-vm", - expected: "my-golden-vm", - }, - { - name: "with underscores", - input: "test_vm_123", - expected: "test_vm_123", - }, - { - name: "path traversal with ..", - input: "../../../etc/passwd", - expected: "_________etc_passwd", - }, - { - name: "path traversal with /", - input: "/etc/passwd", - expected: "_etc_passwd", - }, - { - name: "path traversal complex", - input: "../../vm/../config", - expected: "______vm____config", - }, - { - name: "special characters", - input: "vm@#$%^&*()name", - expected: "vm_________name", - }, - { - name: "spaces", - input: "my vm name", - expected: "my_vm_name", - }, - { - name: "mixed valid and invalid", - input: "vm-123.test/path", - expected: "vm-123_test_path", - }, - { - name: "unicode characters", - input: "vm-名前", - expected: "vm-__", // Each unicode character is replaced with underscore - }, - { - name: "backslash (Windows path)", - input: "C:\\Users\\test", - expected: "C__Users_test", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := sanitizeVMName(tc.input) - if result != tc.expected { - t.Errorf("sanitizeVMName(%q) = %q, want %q", tc.input, result, tc.expected) - } - }) - } -} - -// TestSourceVMCredentialsPathTraversal tests that path traversal attacks are prevented. -func TestSourceVMCredentialsPathTraversal(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tmpDir, err := os.MkdirTemp("", "sshkeys-traversal-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tmpDir) }() - - cfg := DefaultConfig() - cfg.KeyDir = tmpDir - cfg.CertificateTTL = 5 * time.Minute - - km, err := NewKeyManager(ca, cfg, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - - // Test cases that attempt path traversal - testCases := []struct { - name string - vmName string - shouldCreate bool // whether the directory should be created - }{ - { - name: "path traversal with ..", - vmName: "../../etc/passwd", - shouldCreate: true, // should create sanitized directory - }, - { - name: "absolute path", - vmName: "/tmp/evil", - shouldCreate: true, - }, - { - name: "complex traversal", - vmName: "../../../root/.ssh/id_rsa", - shouldCreate: true, - }, - { - name: "normal name", - vmName: "ubuntu-22.04", - shouldCreate: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - creds, err := km.GetSourceVMCredentials(ctx, tc.vmName) - if err != nil { - t.Fatalf("GetSourceVMCredentials failed: %v", err) - } - - // Verify that the private key path is within the expected directory - if !strings.HasPrefix(creds.PrivateKeyPath, tmpDir) { - t.Errorf("private key path %q is not within base directory %q", creds.PrivateKeyPath, tmpDir) - } - - // Verify that the certificate path is within the expected directory - if !strings.HasPrefix(creds.CertificatePath, tmpDir) { - t.Errorf("certificate path %q is not within base directory %q", creds.CertificatePath, tmpDir) - } - - // Verify that files were actually created - if _, err := os.Stat(creds.PrivateKeyPath); os.IsNotExist(err) { - t.Errorf("private key file does not exist: %s", creds.PrivateKeyPath) - } - if _, err := os.Stat(creds.CertificatePath); os.IsNotExist(err) { - t.Errorf("certificate file does not exist: %s", creds.CertificatePath) - } - - // Verify that the parent directory name contains the sanitized VM name - parentDir := filepath.Base(filepath.Dir(creds.PrivateKeyPath)) - expectedPrefix := "sourcevm-" - if !strings.HasPrefix(parentDir, expectedPrefix) { - t.Errorf("parent directory %q does not start with expected prefix %q", parentDir, expectedPrefix) - } - - // Verify no path traversal occurred - the directory should be directly under tmpDir - expectedDir := filepath.Join(tmpDir, parentDir) - actualDir := filepath.Dir(creds.PrivateKeyPath) - if actualDir != expectedDir { - t.Errorf("directory mismatch: got %q, want %q", actualDir, expectedDir) - } - }) - } -} - -// TestSourceVMCredentialsSanitizationInPath tests that the directory name is sanitized. -func TestSourceVMCredentialsSanitizationInPath(t *testing.T) { - ca, cleanup := testCA(t) - defer cleanup() - - tmpDir, err := os.MkdirTemp("", "sshkeys-sanitize-test-") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer func() { _ = os.RemoveAll(tmpDir) }() - - cfg := DefaultConfig() - cfg.KeyDir = tmpDir - cfg.CertificateTTL = 5 * time.Minute - - km, err := NewKeyManager(ca, cfg, nil) - if err != nil { - t.Fatalf("NewKeyManager failed: %v", err) - } - defer func() { _ = km.Close() }() - - ctx := context.Background() - - // Test that the directory name is properly sanitized - vmName := "../../../evil/../../path" - creds, err := km.GetSourceVMCredentials(ctx, vmName) - if err != nil { - t.Fatalf("GetSourceVMCredentials failed: %v", err) - } - - // The directory should be named with the sanitized version - dirName := filepath.Base(filepath.Dir(creds.PrivateKeyPath)) - expectedDirName := "sourcevm-" + sanitizeVMName(vmName) - - if dirName != expectedDirName { - t.Errorf("directory name = %q, want %q", dirName, expectedDirName) - } - - // Verify the directory doesn't contain any path separators - if filepath.Base(dirName) != dirName { - t.Errorf("directory name %q contains path separators", dirName) - } -} diff --git a/fluid/internal/vm/service.go b/fluid/internal/vm/service.go deleted file mode 100755 index 0953260e..00000000 --- a/fluid/internal/vm/service.go +++ /dev/null @@ -1,2156 +0,0 @@ -package vm - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "log/slog" - "os" - "os/exec" - "strings" - "sync" - "time" - - "github.com/google/uuid" - - "github.com/aspectrr/fluid.sh/fluid/internal/config" - "github.com/aspectrr/fluid.sh/fluid/internal/provider" // VM manager interface - "github.com/aspectrr/fluid.sh/fluid/internal/readonly" - "github.com/aspectrr/fluid.sh/fluid/internal/sshkeys" - "github.com/aspectrr/fluid.sh/fluid/internal/store" - "github.com/aspectrr/fluid.sh/fluid/internal/telemetry" -) - -// RemoteManagerFactory creates a provider.Manager for a remote host. -// This allows the service to create managers for sandboxes on different hosts -// without depending on a specific provider implementation. -type RemoteManagerFactory func(host config.HostConfig) provider.Manager - -// Service orchestrates VM operations and data persistence. -// It represents the main application layer for sandbox lifecycle, command exec, -// snapshotting, diffing, and artifact generation orchestration. -type Service struct { - mgr provider.Manager - store store.Store - ssh SSHRunner - keyMgr sshkeys.KeyProvider // Optional: manages SSH keys for RunCommand - telemetry telemetry.Service - cfg Config - remoteManagerFactory RemoteManagerFactory // Creates managers for remote hosts - timeNowFn func() time.Time - logger *slog.Logger -} - -// Config controls default VM parameters and timeouts used by the service. -type Config struct { - // Default libvirt network name (e.g., "default") used when creating VMs. - Network string - - // Default shape if not provided by callers. - DefaultVCPUs int - DefaultMemoryMB int - - // CommandTimeout sets a default timeout for RunCommand when caller doesn't provide one. - CommandTimeout time.Duration - - // IPDiscoveryTimeout controls how long StartSandbox waits for the VM IP (when requested). - IPDiscoveryTimeout time.Duration - - // SSHReadinessTimeout controls how long to wait for SSH to become available after IP discovery. - // If zero, SSH readiness check is skipped. Default: 60s - SSHReadinessTimeout time.Duration - - // SSHProxyJump specifies a jump host for SSH connections to VMs. - // Format: "user@host:port" or just "host" for default user/port. - // Required when VMs are on an isolated network not directly reachable. - SSHProxyJump string -} - -// Option configures the Service during construction. -type Option func(*Service) - -// WithSSHRunner overrides the default SSH runner implementation. -func WithSSHRunner(r SSHRunner) Option { - return func(s *Service) { s.ssh = r } -} - -// WithTelemetry sets the telemetry service. -func WithTelemetry(t telemetry.Service) Option { - return func(s *Service) { s.telemetry = t } -} - -// WithTimeNow overrides the clock (useful for tests). -func WithTimeNow(fn func() time.Time) Option { - return func(s *Service) { s.timeNowFn = fn } -} - -// WithLogger sets a custom logger for the service. -func WithLogger(l *slog.Logger) Option { - return func(s *Service) { s.logger = l } -} - -// WithKeyManager sets a key manager for managed SSH credentials. -// When set, RunCommand can be called without explicit privateKeyPath. -func WithKeyManager(km sshkeys.KeyProvider) Option { - return func(s *Service) { s.keyMgr = km } -} - -// WithRemoteManagerFactory sets the factory for creating remote managers. -func WithRemoteManagerFactory(f RemoteManagerFactory) Option { - return func(s *Service) { s.remoteManagerFactory = f } -} - -// NewService constructs a VM service with the provided manager, store and config. -func NewService(mgr provider.Manager, st store.Store, cfg Config, opts ...Option) *Service { - if cfg.DefaultVCPUs <= 0 { - cfg.DefaultVCPUs = 2 - } - if cfg.DefaultMemoryMB <= 0 { - cfg.DefaultMemoryMB = 2048 - } - if cfg.CommandTimeout <= 0 { - cfg.CommandTimeout = 10 * time.Minute - } - if cfg.IPDiscoveryTimeout <= 0 { - cfg.IPDiscoveryTimeout = 2 * time.Minute - } - if cfg.SSHReadinessTimeout <= 0 { - cfg.SSHReadinessTimeout = 60 * time.Second - } - s := &Service{ - mgr: mgr, - store: st, - cfg: cfg, - ssh: &DefaultSSHRunner{DefaultProxyJump: cfg.SSHProxyJump, Logger: slog.Default()}, - timeNowFn: time.Now, - logger: slog.Default(), - } - for _, o := range opts { - o(s) - } - // Ensure SSH runner uses the same logger - if r, ok := s.ssh.(*DefaultSSHRunner); ok { - r.Logger = s.logger - } - // Default to noop telemetry if not provided - if s.telemetry == nil { - s.telemetry = telemetry.NewNoopService() - } - return s -} - -// getManagerForSandbox returns the appropriate manager for a sandbox. -// If the sandbox was created on a remote host and a remote factory is available, -// returns a remote manager. Otherwise, returns the local manager. -func (s *Service) getManagerForSandbox(sb *store.Sandbox) provider.Manager { - if sb.HostAddress != nil && *sb.HostAddress != "" && s.remoteManagerFactory != nil { - hostName := "" - if sb.HostName != nil { - hostName = *sb.HostName - } - host := config.HostConfig{ - Name: hostName, - Address: *sb.HostAddress, - SSHUser: "root", - SSHPort: 22, - } - return s.remoteManagerFactory(host) - } - return s.mgr -} - -// ResourceValidationResult contains the results of validating resources for sandbox creation. -// If NeedsApproval is true, the caller should request human approval before proceeding. -type ResourceValidationResult struct { - Valid bool - NeedsApproval bool - SourceVMValid bool - VMErrors []string - VMWarnings []string - ResourceCheck *provider.ResourceCheckResult -} - -// CheckResourcesForSandbox validates resources without failing. -// Returns a ResourceValidationResult that indicates whether approval is needed. -func (s *Service) CheckResourcesForSandbox(ctx context.Context, mgr provider.Manager, sourceVMName string, cpu, memoryMB int) *ResourceValidationResult { - result := &ResourceValidationResult{ - Valid: true, - NeedsApproval: false, - SourceVMValid: true, - } - - // Apply defaults - if cpu <= 0 { - cpu = s.cfg.DefaultVCPUs - } - if memoryMB <= 0 { - memoryMB = s.cfg.DefaultMemoryMB - } - - // 1. Validate source VM - vmValidation, err := mgr.ValidateSourceVM(ctx, sourceVMName) - if err != nil { - s.logger.Warn("source VM validation failed", "source_vm", sourceVMName, "error", err) - result.SourceVMValid = false - result.VMErrors = append(result.VMErrors, fmt.Sprintf("validation error: %v", err)) - result.Valid = false - } else if !vmValidation.Valid { - result.SourceVMValid = false - result.VMErrors = vmValidation.Errors - result.VMWarnings = vmValidation.Warnings - result.Valid = false - } else { - result.VMWarnings = vmValidation.Warnings - } - - // 2. Check host resources - resourceCheck, err := mgr.CheckHostResources(ctx, cpu, memoryMB) - if err != nil { - s.logger.Warn("host resource check failed", "error", err) - // Resource check failed, but this shouldn't block - set needsApproval - result.ResourceCheck = &provider.ResourceCheckResult{ - Valid: false, - RequiredMemoryMB: memoryMB, - RequiredCPUs: cpu, - Errors: []string{fmt.Sprintf("resource check error: %v", err)}, - } - result.NeedsApproval = true - } else { - result.ResourceCheck = resourceCheck - if !resourceCheck.Valid { - // Resources insufficient - needs approval but can proceed - result.NeedsApproval = true - result.Valid = false - } - } - - return result -} - -// GetManager returns the default manager. -func (s *Service) GetManager() provider.Manager { - return s.mgr -} - -// GetRemoteManager returns a manager for a specific remote host. -func (s *Service) GetRemoteManager(host *config.HostConfig) provider.Manager { - if host == nil || s.remoteManagerFactory == nil { - return s.mgr - } - return s.remoteManagerFactory(*host) -} - -// GetDefaultMemory returns the default memory in MB -func (s *Service) GetDefaultMemory() int { - return s.cfg.DefaultMemoryMB -} - -// GetDefaultCPUs returns the default number of CPUs -func (s *Service) GetDefaultCPUs() int { - return s.cfg.DefaultVCPUs -} - -// CreateSandbox clones a VM from an existing VM and persists a Sandbox record. -// -// sourceSandboxName is the name of the existing VM in libvirt to clone from. -// SandboxName is optional; if empty, a name will be generated. -// cpu and memoryMB are optional; if <=0 the service defaults are used. -// ttlSeconds is optional; if provided, sets the TTL for auto garbage collection. -// autoStart if true will start the VM immediately after creation. -// waitForIP if true (and autoStart is true), will wait for IP discovery. -// Returns the sandbox, the discovered IP (if autoStart and waitForIP), and any error. -// validateIPUniqueness checks if the given IP is already assigned to another running sandbox. -// Returns an error if the IP is assigned to a different sandbox that is still running. -func (s *Service) validateIPUniqueness(ctx context.Context, currentSandboxID, ip string) error { - // Check both RUNNING and STARTING sandboxes to prevent race conditions - // where two sandboxes might discover the same IP simultaneously - statesToCheck := []store.SandboxState{ - store.SandboxStateRunning, - store.SandboxStateStarting, - } - - for _, state := range statesToCheck { - stateFilter := state - sandboxes, err := s.store.ListSandboxes(ctx, store.SandboxFilter{ - State: &stateFilter, - }, nil) - if err != nil { - return fmt.Errorf("list sandboxes (state=%s) for IP validation: %w", state, err) - } - - for _, sb := range sandboxes { - if sb.ID == currentSandboxID { - continue // Skip the current sandbox - } - if sb.IPAddress != nil && *sb.IPAddress == ip { - s.logger.Error("IP address conflict detected", - "conflict_ip", ip, - "current_sandbox_id", currentSandboxID, - "conflicting_sandbox_id", sb.ID, - "conflicting_sandbox_name", sb.SandboxName, - "conflicting_sandbox_state", sb.State, - ) - return fmt.Errorf("IP %s is already assigned to sandbox %s (vm: %s, state: %s)", ip, sb.ID, sb.SandboxName, sb.State) - } - } - } - return nil -} - -// waitForSSH waits until SSH is accepting connections on the given IP. -// It uses exponential backoff to probe SSH readiness. -// proxyJump is optional - used when the sandbox is on a remote host. -func (s *Service) waitForSSH(ctx context.Context, sandboxID, ip, proxyJump string, timeout time.Duration) error { - if timeout <= 0 { - return nil // SSH readiness check disabled - } - - // Skip if no key manager configured - if s.keyMgr == nil { - s.logger.Debug("no key manager configured, skipping SSH readiness check") - return nil - } - - s.logger.Info("waiting for SSH to become ready", - "sandbox_id", sandboxID, - "ip", ip, - "timeout", timeout, - ) - - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - // Get credentials for SSH probe - use default "sandbox" user - creds, err := s.keyMgr.GetCredentials(ctx, sandboxID, "sandbox") - if err != nil { - s.logger.Warn("failed to get SSH credentials for readiness check, skipping", - "sandbox_id", sandboxID, - "error", err, - ) - return nil // Don't fail sandbox creation if we can't get creds - } - - // Use short command timeout for probes - probeTimeout := 10 * time.Second - - // Exponential backoff: 1s, 2s, 4s, 8s, 16s (capped) - initialDelay := 1 * time.Second - maxDelay := 16 * time.Second - attempt := 0 - - for { - select { - case <-ctx.Done(): - return fmt.Errorf("SSH readiness timeout after %v: %w", timeout, ctx.Err()) - default: - } - - // Try to run a simple command - _, _, exitCode, runErr := s.ssh.RunWithCert( - ctx, - ip, - creds.Username, - creds.PrivateKeyPath, - creds.CertificatePath, - "true", // Simple command that succeeds if SSH works - probeTimeout, - nil, - proxyJump, - ) - - if runErr == nil && exitCode == 0 { - s.logger.Info("SSH is ready", - "sandbox_id", sandboxID, - "ip", ip, - "attempts", attempt+1, - ) - return nil - } - - // Calculate backoff delay - delay := initialDelay * time.Duration(1< maxDelay { - delay = maxDelay - } - - s.logger.Debug("SSH not ready, retrying", - "sandbox_id", sandboxID, - "ip", ip, - "attempt", attempt+1, - "delay", delay, - "error", runErr, - ) - - select { - case <-time.After(delay): - attempt++ - case <-ctx.Done(): - return fmt.Errorf("SSH readiness timeout after %v: %w", timeout, ctx.Err()) - } - } -} - -func (s *Service) CreateSandbox(ctx context.Context, sourceSandboxName, agentID, sandboxName string, cpu, memoryMB int, ttlSeconds *int, autoStart, waitForIP bool) (*store.Sandbox, string, error) { - if strings.TrimSpace(sourceSandboxName) == "" { - return nil, "", fmt.Errorf("sourceSandboxName is required") - } - if strings.TrimSpace(agentID) == "" { - return nil, "", fmt.Errorf("agentID is required") - } - if cpu <= 0 { - cpu = s.cfg.DefaultVCPUs - } - if memoryMB <= 0 { - memoryMB = s.cfg.DefaultMemoryMB - } - - // Use provided sandbox name or generate one with sbx- prefix - if sandboxName == "" { - sandboxName = fmt.Sprintf("sbx-%s", shortID()) - } - - s.logger.Info("creating sandbox", - "source_vm_name", sourceSandboxName, - "agent_id", agentID, - "sandbox_name", sandboxName, - "cpu", cpu, - "memory_mb", memoryMB, - "auto_start", autoStart, - "wait_for_ip", waitForIP, - ) - - jobID := fmt.Sprintf("JOB-%s", shortID()) - - // Create the VM via libvirt manager by cloning from existing VM - _, err := s.mgr.CloneFromVM(ctx, sourceSandboxName, sandboxName, cpu, memoryMB, s.cfg.Network) - if err != nil { - s.logger.Error("failed to clone VM", - "source_vm_name", sourceSandboxName, - "sandbox_name", sandboxName, - "error", err, - ) - return nil, "", fmt.Errorf("clone vm: %w", err) - } - - sb := &store.Sandbox{ - ID: fmt.Sprintf("SBX-%s", shortID()), - JobID: jobID, - AgentID: agentID, - SandboxName: sandboxName, - BaseImage: sourceSandboxName, // Store the source VM name for reference - Network: s.cfg.Network, - State: store.SandboxStateCreated, - TTLSeconds: ttlSeconds, - VCPUs: cpu, - MemoryMB: memoryMB, - CreatedAt: s.timeNowFn().UTC(), - UpdatedAt: s.timeNowFn().UTC(), - } - if err := s.store.CreateSandbox(ctx, sb); err != nil { - return nil, "", fmt.Errorf("persist sandbox: %w", err) - } - - s.logger.Debug("sandbox cloned successfully", - "sandbox_id", sb.ID, - "sandbox_name", sandboxName, - ) - - // If autoStart is requested, start the VM immediately - var ip string - if autoStart { - s.logger.Info("auto-starting sandbox", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - ) - - if err := s.mgr.StartVM(ctx, sb.SandboxName); err != nil { - s.logger.Error("auto-start failed", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateError, nil) - return sb, "", fmt.Errorf("auto-start vm: %w", err) - } - - // Update state -> STARTING - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateStarting, nil); err != nil { - return sb, "", err - } - sb.State = store.SandboxStateStarting - - if waitForIP { - s.logger.Info("waiting for IP address", - "sandbox_id", sb.ID, - "timeout", s.cfg.IPDiscoveryTimeout, - ) - - var mac string - ip, mac, err = s.mgr.GetIPAddress(ctx, sb.SandboxName, s.cfg.IPDiscoveryTimeout) - if err != nil { - s.logger.Warn("IP discovery failed", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "error", err, - ) - // Still mark as running even if we couldn't discover the IP - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil) - sb.State = store.SandboxStateRunning - return sb, "", fmt.Errorf("get ip: %w", err) - } - - // Validate IP uniqueness before storing - if err := s.validateIPUniqueness(ctx, sb.ID, ip); err != nil { - s.logger.Error("IP conflict during sandbox creation", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "ip_address", ip, - "mac_address", mac, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil) - sb.State = store.SandboxStateRunning - return sb, "", fmt.Errorf("ip conflict: %w", err) - } - - // Wait for SSH to become ready before marking as RUNNING - // Local sandbox - no proxy jump needed - if err := s.waitForSSH(ctx, sb.ID, ip, "", s.cfg.SSHReadinessTimeout); err != nil { - s.logger.Warn("SSH readiness check failed", - "sandbox_id", sb.ID, - "ip_address", ip, - "error", err, - ) - // Don't fail - sandbox is still usable, just may need retries - } - - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, &ip); err != nil { - return sb, ip, err - } - sb.State = store.SandboxStateRunning - sb.IPAddress = &ip - } else { - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil); err != nil { - return sb, "", err - } - sb.State = store.SandboxStateRunning - } - } - - s.logger.Info("sandbox created", - "sandbox_id", sb.ID, - "state", sb.State, - "ip_address", ip, - ) - - s.telemetry.Track("sandbox_create", map[string]any{ - "sandbox_id": sb.ID, - "base_image": sb.BaseImage, - "cpu": cpu, - "memory_mb": memoryMB, - "auto_start": autoStart, - "wait_for_ip": waitForIP, - "agent_id": agentID, - "success": true, - }) - - return sb, ip, nil -} - -// CreateSandboxOnHost creates a sandbox on a specific remote host. -// This is used when multi-host support is enabled and the source VM is on a remote host. -func (s *Service) CreateSandboxOnHost(ctx context.Context, host *config.HostConfig, sourceSandboxName, agentID, sandboxName string, cpu, memoryMB int, ttlSeconds *int, autoStart, waitForIP bool) (*store.Sandbox, string, error) { - if host == nil { - return nil, "", fmt.Errorf("host is required for remote sandbox creation") - } - if strings.TrimSpace(sourceSandboxName) == "" { - return nil, "", fmt.Errorf("sourceSandboxName is required") - } - if strings.TrimSpace(agentID) == "" { - return nil, "", fmt.Errorf("agentID is required") - } - if cpu <= 0 { - cpu = s.cfg.DefaultVCPUs - } - if memoryMB <= 0 { - memoryMB = s.cfg.DefaultMemoryMB - } - - // Use provided sandbox name or generate one with sbx- prefix - if sandboxName == "" { - sandboxName = fmt.Sprintf("sbx-%s", shortID()) - } - - // Create a remote manager for this host - if s.remoteManagerFactory == nil { - return nil, "", fmt.Errorf("remote manager factory not configured") - } - remoteMgr := s.remoteManagerFactory(*host) - - s.logger.Info("creating sandbox on remote host", - "host_name", host.Name, - "host_address", host.Address, - "source_vm_name", sourceSandboxName, - "agent_id", agentID, - "sandbox_name", sandboxName, - "cpu", cpu, - "memory_mb", memoryMB, - "auto_start", autoStart, - "wait_for_ip", waitForIP, - ) - - jobID := fmt.Sprintf("JOB-%s", shortID()) - - // Create the VM via remote libvirt manager - _, err := remoteMgr.CloneFromVM(ctx, sourceSandboxName, sandboxName, cpu, memoryMB, s.cfg.Network) - if err != nil { - s.logger.Error("failed to clone VM on remote host", - "host", host.Name, - "source_vm_name", sourceSandboxName, - "sandbox_name", sandboxName, - "error", err, - ) - return nil, "", fmt.Errorf("clone vm on host %s: %w", host.Name, err) - } - - hostName := host.Name - hostAddr := host.Address - sb := &store.Sandbox{ - ID: fmt.Sprintf("SBX-%s", shortID()), - JobID: jobID, - AgentID: agentID, - SandboxName: sandboxName, - BaseImage: sourceSandboxName, - Network: s.cfg.Network, - State: store.SandboxStateCreated, - TTLSeconds: ttlSeconds, - VCPUs: cpu, - MemoryMB: memoryMB, - HostName: &hostName, - HostAddress: &hostAddr, - CreatedAt: s.timeNowFn().UTC(), - UpdatedAt: s.timeNowFn().UTC(), - } - if err := s.store.CreateSandbox(ctx, sb); err != nil { - return nil, "", fmt.Errorf("persist sandbox: %w", err) - } - - s.logger.Debug("sandbox cloned on remote host", - "sandbox_id", sb.ID, - "sandbox_name", sandboxName, - "host", host.Name, - ) - - // If autoStart is requested, start the VM immediately - var ip string - if autoStart { - s.logger.Info("auto-starting sandbox on remote host", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "host", host.Name, - ) - - if err := remoteMgr.StartVM(ctx, sb.SandboxName); err != nil { - s.logger.Error("auto-start failed on remote host", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "host", host.Name, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateError, nil) - return sb, "", fmt.Errorf("auto-start vm on host %s: %w", host.Name, err) - } - - // Update state -> STARTING - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateStarting, nil); err != nil { - return sb, "", err - } - sb.State = store.SandboxStateStarting - - if waitForIP { - s.logger.Info("waiting for IP address on remote host", - "sandbox_id", sb.ID, - "host", host.Name, - "timeout", s.cfg.IPDiscoveryTimeout, - ) - - var mac string - ip, mac, err = remoteMgr.GetIPAddress(ctx, sb.SandboxName, s.cfg.IPDiscoveryTimeout) - if err != nil { - s.logger.Warn("IP discovery failed on remote host", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "host", host.Name, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil) - sb.State = store.SandboxStateRunning - return sb, "", fmt.Errorf("get ip on host %s: %w", host.Name, err) - } - - // Validate IP uniqueness - if err := s.validateIPUniqueness(ctx, sb.ID, ip); err != nil { - s.logger.Error("IP conflict on remote host", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "ip_address", ip, - "mac_address", mac, - "host", host.Name, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil) - sb.State = store.SandboxStateRunning - return sb, "", fmt.Errorf("ip conflict on host %s: %w", host.Name, err) - } - - // Wait for SSH to become ready before marking as RUNNING - // Remote sandbox - use host address as proxy jump - proxyJump := fmt.Sprintf("root@%s", host.Address) - if err := s.waitForSSH(ctx, sb.ID, ip, proxyJump, s.cfg.SSHReadinessTimeout); err != nil { - s.logger.Warn("SSH readiness check failed on remote host", - "sandbox_id", sb.ID, - "ip_address", ip, - "host", host.Name, - "error", err, - ) - // Don't fail - sandbox is still usable, just may need retries - } - - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, &ip); err != nil { - return sb, ip, err - } - sb.State = store.SandboxStateRunning - sb.IPAddress = &ip - } else { - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil); err != nil { - return sb, "", err - } - sb.State = store.SandboxStateRunning - } - } - - s.logger.Info("sandbox created on remote host", - "sandbox_id", sb.ID, - "host", host.Name, - "state", sb.State, - "ip_address", ip, - ) - - s.telemetry.Track("sandbox_create", map[string]any{ - "sandbox_id": sb.ID, - "base_image": sb.BaseImage, - "cpu": cpu, - "memory_mb": memoryMB, - "auto_start": autoStart, - "wait_for_ip": waitForIP, - "agent_id": agentID, - "host_name": host.Name, - "host_address": host.Address, - "success": true, - }) - - return sb, ip, nil -} - -func (s *Service) GetSandboxes(ctx context.Context, filter store.SandboxFilter, opts *store.ListOptions) ([]*store.Sandbox, error) { - return s.store.ListSandboxes(ctx, filter, opts) -} - -// GetSandbox retrieves a single sandbox by ID. -func (s *Service) GetSandbox(ctx context.Context, sandboxID string) (*store.Sandbox, error) { - if strings.TrimSpace(sandboxID) == "" { - return nil, fmt.Errorf("sandboxID is required") - } - return s.store.GetSandbox(ctx, sandboxID) -} - -// GetSandboxCommands retrieves all commands executed in a sandbox. -func (s *Service) GetSandboxCommands(ctx context.Context, sandboxID string, opts *store.ListOptions) ([]*store.Command, error) { - if strings.TrimSpace(sandboxID) == "" { - return nil, fmt.Errorf("sandboxID is required") - } - // Verify sandbox exists - if _, err := s.store.GetSandbox(ctx, sandboxID); err != nil { - return nil, err - } - return s.store.ListCommands(ctx, sandboxID, opts) -} - -// InjectSSHKey injects a public key for a user into the VM disk prior to boot. -func (s *Service) InjectSSHKey(ctx context.Context, sandboxID, username, publicKey string) error { - if strings.TrimSpace(sandboxID) == "" { - return fmt.Errorf("sandboxID is required") - } - if strings.TrimSpace(username) == "" { - return fmt.Errorf("username is required") - } - if strings.TrimSpace(publicKey) == "" { - return fmt.Errorf("publicKey is required") - } - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return err - } - // Get the appropriate manager (local or remote) for this sandbox - mgr := s.getManagerForSandbox(sb) - if err := mgr.InjectSSHKey(ctx, sb.SandboxName, username, publicKey); err != nil { - return fmt.Errorf("inject ssh key: %w", err) - } - sb.UpdatedAt = s.timeNowFn().UTC() - return s.store.UpdateSandbox(ctx, sb) -} - -// StartSandbox boots the VM and optionally waits for IP discovery. -// Returns the discovered IP if waitForIP is true and discovery succeeds (empty string otherwise). -func (s *Service) StartSandbox(ctx context.Context, sandboxID string, waitForIP bool) (string, error) { - if strings.TrimSpace(sandboxID) == "" { - return "", fmt.Errorf("sandboxID is required") - } - - s.logger.Info("starting sandbox", - "sandbox_id", sandboxID, - "wait_for_ip", waitForIP, - ) - - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return "", err - } - - s.logger.Debug("sandbox found", - "sandbox_name", sb.SandboxName, - "current_state", sb.State, - "host_name", sb.HostName, - "host_address", sb.HostAddress, - ) - - // Get the appropriate manager (local or remote) for this sandbox - mgr := s.getManagerForSandbox(sb) - - if err := mgr.StartVM(ctx, sb.SandboxName); err != nil { - s.logger.Error("failed to start VM", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateError, nil) - return "", fmt.Errorf("start vm: %w", err) - } - - // Update state -> STARTING - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateStarting, nil); err != nil { - return "", err - } - - var ip string - if waitForIP { - s.logger.Info("waiting for IP address", - "sandbox_id", sb.ID, - "timeout", s.cfg.IPDiscoveryTimeout, - ) - - var mac string - ip, mac, err = mgr.GetIPAddress(ctx, sb.SandboxName, s.cfg.IPDiscoveryTimeout) - if err != nil { - s.logger.Warn("IP discovery failed", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "error", err, - ) - // Still mark as running even if we couldn't discover the IP - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil) - return "", fmt.Errorf("get ip: %w", err) - } - - // Validate IP uniqueness before storing - if err := s.validateIPUniqueness(ctx, sb.ID, ip); err != nil { - s.logger.Error("IP conflict during sandbox start", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "ip_address", ip, - "mac_address", mac, - "error", err, - ) - _ = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil) - return "", fmt.Errorf("ip conflict: %w", err) - } - - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, &ip); err != nil { - return "", err - } - } else { - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, nil); err != nil { - return "", err - } - } - - s.logger.Info("sandbox started", - "sandbox_id", sb.ID, - "ip_address", ip, - ) - - s.telemetry.Track("sandbox_start", map[string]any{ - "sandbox_id": sb.ID, - "wait_for_ip": waitForIP, - "success": true, - }) - - return ip, nil -} - -// DiscoverIP attempts to discover the IP address for a sandbox. -// This is useful for async workflows where wait_for_ip was false during start. -// Returns the discovered IP address, or an error if discovery fails. -func (s *Service) DiscoverIP(ctx context.Context, sandboxID string) (string, error) { - if strings.TrimSpace(sandboxID) == "" { - return "", fmt.Errorf("sandboxID is required") - } - - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return "", err - } - - // Check if VM is in a state where IP discovery makes sense - if sb.State != store.SandboxStateRunning && sb.State != store.SandboxStateStarting { - return "", fmt.Errorf("sandbox is in state %s, must be running or starting for IP discovery", sb.State) - } - - // Get the appropriate manager (local or remote) for this sandbox - mgr := s.getManagerForSandbox(sb) - - s.logger.Info("discovering IP for sandbox", - "sandbox_id", sandboxID, - "sandbox_name", sb.SandboxName, - ) - - ip, mac, err := mgr.GetIPAddress(ctx, sb.SandboxName, s.cfg.IPDiscoveryTimeout) - if err != nil { - return "", fmt.Errorf("ip discovery failed: %w", err) - } - - // Validate IP uniqueness - if err := s.validateIPUniqueness(ctx, sb.ID, ip); err != nil { - s.logger.Warn("IP conflict during discovery", - "sandbox_id", sb.ID, - "ip_address", ip, - "mac_address", mac, - "error", err, - ) - return "", fmt.Errorf("ip conflict: %w", err) - } - - // Update the sandbox with the discovered IP - if err := s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateRunning, &ip); err != nil { - return "", fmt.Errorf("persist ip: %w", err) - } - - s.logger.Info("IP discovered and stored", - "sandbox_id", sandboxID, - "ip_address", ip, - "mac_address", mac, - ) - - return ip, nil -} - -// StopSandbox gracefully shuts down the VM or forces if force is true. -func (s *Service) StopSandbox(ctx context.Context, sandboxID string, force bool) error { - if strings.TrimSpace(sandboxID) == "" { - return fmt.Errorf("sandboxID is required") - } - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return err - } - // Get the appropriate manager (local or remote) for this sandbox - mgr := s.getManagerForSandbox(sb) - if err := mgr.StopVM(ctx, sb.SandboxName, force); err != nil { - return fmt.Errorf("stop vm: %w", err) - } - err = s.store.UpdateSandboxState(ctx, sb.ID, store.SandboxStateStopped, sb.IPAddress) - if err == nil { - s.telemetry.Track("sandbox_stop", map[string]any{ - "sandbox_id": sb.ID, - "force": force, - "success": true, - }) - } - return err -} - -// DestroySandbox forcibly destroys and undefines the VM and removes its workspace. -// The sandbox is then soft-deleted from the store. Returns the sandbox info after destruction. -func (s *Service) DestroySandbox(ctx context.Context, sandboxID string) (*store.Sandbox, error) { - if strings.TrimSpace(sandboxID) == "" { - return nil, fmt.Errorf("sandboxID is required") - } - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return nil, err - } - - // Cleanup managed SSH keys for this sandbox (non-fatal if it fails) - if s.keyMgr != nil { - if err := s.keyMgr.CleanupSandbox(ctx, sandboxID); err != nil { - s.logger.Warn("failed to cleanup SSH keys", - "sandbox_id", sandboxID, - "error", err, - ) - } - } - - // Get the appropriate manager (local or remote) for this sandbox - mgr := s.getManagerForSandbox(sb) - if err := mgr.DestroyVM(ctx, sb.SandboxName); err != nil { - return nil, fmt.Errorf("destroy vm: %w", err) - } - if err := s.store.DeleteSandbox(ctx, sandboxID); err != nil { - return nil, err - } - // Update state to reflect destruction - sb.State = store.SandboxStateDestroyed - - s.telemetry.Track("sandbox_destroy", map[string]any{ - "sandbox_id": sandboxID, - "success": true, - }) - - return sb, nil -} - -// CreateSnapshot creates a snapshot and persists a Snapshot record. -func (s *Service) CreateSnapshot(ctx context.Context, sandboxID, name string, external bool) (*store.Snapshot, error) { - if strings.TrimSpace(sandboxID) == "" || strings.TrimSpace(name) == "" { - return nil, fmt.Errorf("sandboxID and name are required") - } - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return nil, err - } - // Get the appropriate manager (local or remote) for this sandbox - mgr := s.getManagerForSandbox(sb) - ref, err := mgr.CreateSnapshot(ctx, sb.SandboxName, name, external) - if err != nil { - return nil, fmt.Errorf("create snapshot: %w", err) - } - sn := &store.Snapshot{ - ID: fmt.Sprintf("SNP-%s", shortID()), - SandboxID: sb.ID, - Name: ref.Name, - Kind: snapshotKindFromString(ref.Kind), - Ref: ref.Ref, - CreatedAt: s.timeNowFn().UTC(), - } - if err := s.store.CreateSnapshot(ctx, sn); err != nil { - return nil, err - } - - s.telemetry.Track("snapshot_create", map[string]any{ - "sandbox_id": sandboxID, - "snapshot_name": name, - "snapshot_kind": ref.Kind, - "external": external, - "success": true, - }) - - return sn, nil -} - -// DiffSnapshots computes a normalized change set between two snapshots and persists a Diff. -// Note: This implementation currently aggregates command history into CommandsRun and -// leaves file/package/service diffs empty. A dedicated diff engine should populate these fields -// by mounting snapshots and computing differences. -func (s *Service) DiffSnapshots(ctx context.Context, sandboxID, from, to string) (*store.Diff, error) { - if strings.TrimSpace(sandboxID) == "" || strings.TrimSpace(from) == "" || strings.TrimSpace(to) == "" { - return nil, fmt.Errorf("sandboxID, from, to are required") - } - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return nil, err - } - - // Get the appropriate manager (local or remote) for this sandbox - mgr := s.getManagerForSandbox(sb) - - // Best-effort: get a plan (notes/instructions) from manager; ignore failure. - _, _ = mgr.DiffSnapshot(ctx, sb.SandboxName, from, to) - - // For now, compose CommandsRun from command history as partial diff signal. - cmds, err := s.store.ListCommands(ctx, sandboxID, &store.ListOptions{OrderBy: "started_at", Asc: true}) - if err != nil && !errors.Is(err, store.ErrNotFound) { - return nil, fmt.Errorf("list commands: %w", err) - } - var cr []store.CommandSummary - for _, c := range cmds { - cr = append(cr, store.CommandSummary{ - Cmd: c.Command, - ExitCode: c.ExitCode, - At: c.EndedAt, - }) - } - - diff := &store.Diff{ - ID: fmt.Sprintf("DIF-%s", shortID()), - SandboxID: sandboxID, - FromSnapshot: from, - ToSnapshot: to, - DiffJSON: store.ChangeDiff{ - FilesModified: []string{}, - FilesAdded: []string{}, - FilesRemoved: []string{}, - PackagesAdded: []store.PackageInfo{}, - PackagesRemoved: []store.PackageInfo{}, - ServicesChanged: []store.ServiceChange{}, - CommandsRun: cr, - }, - CreatedAt: s.timeNowFn().UTC(), - } - if err := s.store.SaveDiff(ctx, diff); err != nil { - return nil, err - } - - s.telemetry.Track("snapshot_diff", map[string]any{ - "sandbox_id": sandboxID, - "from_snapshot": from, - "to_snapshot": to, - "success": true, - }) - - return diff, nil -} - -// RunCommand executes a command inside the sandbox via SSH. -// If privateKeyPath is empty and a key manager is configured, managed credentials will be used. -// Otherwise, username and privateKeyPath are required for SSH auth. -func (s *Service) RunCommand(ctx context.Context, sandboxID, username, privateKeyPath, command string, timeout time.Duration, env map[string]string) (*store.Command, error) { - return s.RunCommandWithCallback(ctx, sandboxID, username, privateKeyPath, command, timeout, env, nil) -} - -// RunCommandWithCallback executes a command inside the sandbox via SSH with optional streaming output. -// If outputCallback is non-nil, it will be called for each chunk of output as it arrives. -// The full output is still returned in the Command result after the command completes. -func (s *Service) RunCommandWithCallback(ctx context.Context, sandboxID, username, privateKeyPath, command string, timeout time.Duration, env map[string]string, outputCallback OutputCallback) (*store.Command, error) { - if strings.TrimSpace(sandboxID) == "" { - return nil, fmt.Errorf("sandboxID is required") - } - if strings.TrimSpace(command) == "" { - return nil, fmt.Errorf("command is required") - } - if timeout <= 0 { - timeout = s.cfg.CommandTimeout - } - - // Determine if we're using managed credentials - var useManagedCreds bool - var certPath string - if strings.TrimSpace(privateKeyPath) == "" { - if s.keyMgr == nil { - return nil, fmt.Errorf("privateKeyPath is required (no key manager configured)") - } - useManagedCreds = true - // Default username for managed credentials - if strings.TrimSpace(username) == "" { - username = "sandbox" - } - } else { - // Traditional mode: username is required - if strings.TrimSpace(username) == "" { - return nil, fmt.Errorf("username is required") - } - } - - sb, err := s.store.GetSandbox(ctx, sandboxID) - if err != nil { - return nil, err - } - - // Get the appropriate manager (local or remote) for this sandbox - mgr := s.getManagerForSandbox(sb) - - // Always re-discover IP to ensure we have the correct one for THIS sandbox. - // This is important because: - // 1. Cached IPs might be stale if the VM was restarted - // 2. Another sandbox might have been assigned the same IP erroneously - // 3. DHCP leases can change - ip, mac, err := mgr.GetIPAddress(ctx, sb.SandboxName, s.cfg.IPDiscoveryTimeout) - if err != nil { - return nil, fmt.Errorf("discover ip for sandbox %s (vm: %s): %w", sb.ID, sb.SandboxName, err) - } - - // Check if this IP is already assigned to a DIFFERENT running sandbox - if err := s.validateIPUniqueness(ctx, sb.ID, ip); err != nil { - s.logger.Warn("IP conflict detected", - "sandbox_id", sb.ID, - "sandbox_name", sb.SandboxName, - "ip_address", ip, - "mac_address", mac, - "error", err, - ) - return nil, fmt.Errorf("ip conflict: %w", err) - } - - // Update IP if it changed or wasn't set - if sb.IPAddress == nil || *sb.IPAddress != ip { - if err := s.store.UpdateSandboxState(ctx, sb.ID, sb.State, &ip); err != nil { - return nil, fmt.Errorf("persist ip: %w", err) - } - } - - // Get managed credentials if needed - if useManagedCreds { - creds, err := s.keyMgr.GetCredentials(ctx, sandboxID, username) - if err != nil { - return nil, fmt.Errorf("get managed credentials: %w", err) - } - privateKeyPath = creds.PrivateKeyPath - certPath = creds.CertificatePath - username = creds.Username - } - - cmdID := fmt.Sprintf("CMD-%s", shortID()) - now := s.timeNowFn().UTC() - - // Encode environment for persistence. - var envJSON *string - if len(env) > 0 { - b, _ := json.Marshal(env) - tmp := string(b) - envJSON = &tmp - } - - // Determine proxy jump - if sandbox is on remote host, use that host as jump - proxyJump := "" - if sb.HostAddress != nil && *sb.HostAddress != "" { - // Format: user@host for SSH ProxyJump - proxyJump = fmt.Sprintf("root@%s", *sb.HostAddress) - } - - // Execute SSH command - var stdout, stderr string - var code int - var runErr error - - if outputCallback != nil { - // Use streaming variant - outputChan := make(chan OutputChunk, 100) - - // Goroutine to forward chunks to callback - go func() { - for chunk := range outputChan { - outputCallback(chunk) - } - }() - - if useManagedCreds { - stdout, stderr, code, runErr = s.ssh.RunWithCertStreaming(ctx, ip, username, privateKeyPath, certPath, commandWithEnv(command, env), timeout, env, proxyJump, outputChan) - } else { - stdout, stderr, code, runErr = s.ssh.RunStreaming(ctx, ip, username, privateKeyPath, commandWithEnv(command, env), timeout, env, proxyJump, outputChan) - } - close(outputChan) - } else { - // Use existing non-streaming variant - if useManagedCreds { - stdout, stderr, code, runErr = s.ssh.RunWithCert(ctx, ip, username, privateKeyPath, certPath, commandWithEnv(command, env), timeout, env, proxyJump) - } else { - stdout, stderr, code, runErr = s.ssh.Run(ctx, ip, username, privateKeyPath, commandWithEnv(command, env), timeout, env, proxyJump) - } - } - - cmd := &store.Command{ - ID: cmdID, - SandboxID: sandboxID, - Command: command, - EnvJSON: envJSON, - Stdout: stdout, - Stderr: stderr, - ExitCode: code, - StartedAt: now, - EndedAt: s.timeNowFn().UTC(), - } - if err := s.store.SaveCommand(ctx, cmd); err != nil { - return nil, fmt.Errorf("save command: %w", err) - } - - s.telemetry.Track("sandbox_command", map[string]any{ - "sandbox_id": sandboxID, - "command_id": cmdID, - "exit_code": code, - "duration_ms": cmd.EndedAt.Sub(cmd.StartedAt).Milliseconds(), - "success": true, - }) - - if runErr != nil { - return cmd, fmt.Errorf("ssh run: %w", runErr) - } - return cmd, nil -} - -// OutputChunk represents a piece of streaming output from a command -type OutputChunk struct { - Data []byte - IsStderr bool - IsRetry bool - Retry *RetryInfo -} - -// RetryInfo contains details about a retry attempt -type RetryInfo struct { - Attempt int - Max int - Delay time.Duration - Error string -} - -// OutputCallback is called for each chunk of streaming output -type OutputCallback func(chunk OutputChunk) - -// SSHRunner executes commands on a remote host via SSH. -type SSHRunner interface { - // Run executes command on user@addr using the provided private key file. - // Returns stdout, stderr, and exit code. Implementations should use StrictHostKeyChecking=no - // or a known_hosts strategy appropriate for ephemeral sandboxes. - // proxyJump is optional - if non-empty, SSH will jump through that host. - Run(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (stdout, stderr string, exitCode int, err error) - - // RunWithCert executes command using certificate-based authentication. - // The certPath should point to the SSH certificate file (key-cert.pub). - // proxyJump is optional - if non-empty, SSH will jump through that host. - RunWithCert(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (stdout, stderr string, exitCode int, err error) - - // RunStreaming executes command with streaming output sent to outputChan. - // proxyJump is optional - if non-empty, SSH will jump through that host. - RunStreaming(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string, proxyJump string, outputChan chan<- OutputChunk) (stdout, stderr string, exitCode int, err error) - - // RunWithCertStreaming executes command using certificate-based authentication with streaming output. - // proxyJump is optional - if non-empty, SSH will jump through that host. - RunWithCertStreaming(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string, proxyJump string, outputChan chan<- OutputChunk) (stdout, stderr string, exitCode int, err error) -} - -// DefaultSSHRunner is a simple implementation backed by the system's ssh binary. -type DefaultSSHRunner struct { - // Logger for retry and connection status - Logger *slog.Logger - - // DefaultProxyJump specifies a default jump host for SSH connections. - // Can be overridden per-call via the proxyJump parameter. - // Format: "user@host:port" or just "host" for default user/port. - DefaultProxyJump string - - // MaxRetries is the maximum number of retry attempts for transient SSH failures. - // Default: 5 - MaxRetries int - - // InitialRetryDelay is the initial delay before the first retry. - // Default: 2s - InitialRetryDelay time.Duration - - // MaxRetryDelay is the maximum delay between retries. - // Default: 30s - MaxRetryDelay time.Duration -} - -// sshRetryConfig returns the retry configuration with defaults applied. -func (r *DefaultSSHRunner) sshRetryConfig() (maxRetries int, initialDelay, maxDelay time.Duration) { - maxRetries = r.MaxRetries - if maxRetries <= 0 { - maxRetries = 5 - } - initialDelay = r.InitialRetryDelay - if initialDelay <= 0 { - initialDelay = 2 * time.Second - } - maxDelay = r.MaxRetryDelay - if maxDelay <= 0 { - maxDelay = 30 * time.Second - } - return -} - -// isRetryableSSHError checks if the error indicates a transient SSH failure -// that should be retried (e.g., connection refused, sshd not ready). -func isRetryableSSHError(stderr string, exitCode int) bool { - // Exit code 255 indicates SSH connection failure - if exitCode != 255 { - return false - } - // Check for common transient connection errors - retryablePatterns := []string{ - "Connection refused", - "Connection closed", - "Connection reset", - "Connection timed out", - "No route to host", - "Network is unreachable", - "Host is down", - "port 22: Connection refused", - "port 65535", // Malformed connection error - "UNKNOWN", // SSH parsing error during connection failure - } - stderrLower := strings.ToLower(stderr) - for _, pattern := range retryablePatterns { - if strings.Contains(stderrLower, strings.ToLower(pattern)) { - return true - } - } - return false -} - -// Run implements SSHRunner.Run using the local ssh client. -// It disables strict host key checking and sets a connect timeout. -// It assumes the VM is reachable on the default SSH port (22). -// Includes retry logic with exponential backoff for transient connection failures. -func (r *DefaultSSHRunner) Run(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, _ map[string]string, proxyJump string) (string, string, int, error) { - // Pre-flight check: verify the private key file exists and has correct permissions - keyInfo, err := os.Stat(privateKeyPath) - if err != nil { - if os.IsNotExist(err) { - return "", "", 255, fmt.Errorf("ssh key file not found: %s", privateKeyPath) - } - return "", "", 255, fmt.Errorf("ssh key file error: %w", err) - } - // Check permissions - SSH keys should not be world-readable - if keyInfo.Mode().Perm()&0o077 != 0 { - return "", "", 255, fmt.Errorf("ssh key file %s has insecure permissions %o (should be 0600 or stricter)", privateKeyPath, keyInfo.Mode().Perm()) - } - - if _, ok := ctx.Deadline(); !ok && timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - args := []string{ - "-i", privateKeyPath, - "-o", "StrictHostKeyChecking=no", - "-o", "UserKnownHostsFile=/dev/null", - "-o", "ConnectTimeout=15", - "-o", "ServerAliveInterval=30", - "-o", "ServerAliveCountMax=1000", - } - // Add ProxyJump if provided, otherwise use default - effectiveProxyJump := proxyJump - if effectiveProxyJump == "" { - effectiveProxyJump = r.DefaultProxyJump - } - if effectiveProxyJump != "" { - args = append(args, "-J", effectiveProxyJump) - } - args = append(args, - fmt.Sprintf("%s@%s", user, addr), - "--", - command, - ) - - maxRetries, initialDelay, maxDelay := r.sshRetryConfig() - var lastStdout, lastStderr string - var lastExitCode int - var lastErr error - - for attempt := 0; attempt <= maxRetries; attempt++ { - // Check context before each attempt - if ctx.Err() != nil { - return lastStdout, lastStderr, lastExitCode, fmt.Errorf("context cancelled after %d attempts: %w", attempt, ctx.Err()) - } - - var stdout, stderr bytes.Buffer - cmd := exec.CommandContext(ctx, "ssh", args...) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - err = cmd.Run() - exitCode := 0 - if err != nil { - // Best-effort extract exit code - var ee *exec.ExitError - if errors.As(err, &ee) { - exitCode = ee.ExitCode() - } else { - exitCode = 255 - } - stderrStr := stderr.String() - - // Check if this is a retryable error - if attempt < maxRetries && isRetryableSSHError(stderrStr, exitCode) { - // Calculate backoff delay: 2s, 4s, 8s, 16s, 30s (capped) - delay := initialDelay * time.Duration(1< maxDelay { - delay = maxDelay - } - if r.Logger != nil { - r.Logger.Warn("SSH connection failed, retrying", - "attempt", attempt+1, - "max_retries", maxRetries, - "delay", delay, - "addr", addr, - "stderr", stderrStr, - ) - } - select { - case <-time.After(delay): - // Continue to next attempt - case <-ctx.Done(): - return stdout.String(), stderrStr, exitCode, fmt.Errorf("context cancelled during retry backoff: %w", ctx.Err()) - } - lastStdout, lastStderr, lastExitCode, lastErr = stdout.String(), stderrStr, exitCode, err - continue - } - - // Not retryable or max retries exceeded - if stderrStr != "" { - err = fmt.Errorf("%w: %s", err, stderrStr) - } - return stdout.String(), stderrStr, exitCode, err - } - - // Success - if cmd.ProcessState != nil { - exitCode = cmd.ProcessState.ExitCode() - } - return stdout.String(), stderr.String(), exitCode, nil - } - - // Should not reach here, but return last error if we do - return lastStdout, lastStderr, lastExitCode, fmt.Errorf("max retries (%d) exceeded: %w", maxRetries, lastErr) -} - -// RunWithCert implements SSHRunner.RunWithCert using the local ssh client with certificate auth. -// Includes retry logic with exponential backoff for transient connection failures. -func (r *DefaultSSHRunner) RunWithCert(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, _ map[string]string, proxyJump string) (string, string, int, error) { - // Pre-flight check: verify the private key file exists and has correct permissions - keyInfo, err := os.Stat(privateKeyPath) - if err != nil { - if os.IsNotExist(err) { - return "", "", 255, fmt.Errorf("ssh key file not found: %s", privateKeyPath) - } - return "", "", 255, fmt.Errorf("ssh key file error: %w", err) - } - if keyInfo.Mode().Perm()&0o077 != 0 { - return "", "", 255, fmt.Errorf("ssh key file %s has insecure permissions %o (should be 0600 or stricter)", privateKeyPath, keyInfo.Mode().Perm()) - } - - // Check certificate file exists - if _, err := os.Stat(certPath); err != nil { - if os.IsNotExist(err) { - return "", "", 255, fmt.Errorf("ssh certificate file not found: %s", certPath) - } - return "", "", 255, fmt.Errorf("ssh certificate file error: %w", err) - } - - if _, ok := ctx.Deadline(); !ok && timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - args := []string{ - "-i", privateKeyPath, - "-o", fmt.Sprintf("CertificateFile=%s", certPath), - "-o", "StrictHostKeyChecking=no", - "-o", "UserKnownHostsFile=/dev/null", - "-o", "ConnectTimeout=15", - "-o", "ServerAliveInterval=30", - "-o", "ServerAliveCountMax=1000", - } - // Add ProxyJump if provided, otherwise use default - effectiveProxyJump := proxyJump - if effectiveProxyJump == "" { - effectiveProxyJump = r.DefaultProxyJump - } - if effectiveProxyJump != "" { - args = append(args, "-J", effectiveProxyJump) - } - args = append(args, - fmt.Sprintf("%s@%s", user, addr), - "--", - command, - ) - - maxRetries, initialDelay, maxDelay := r.sshRetryConfig() - var lastStdout, lastStderr string - var lastExitCode int - var lastErr error - - for attempt := 0; attempt <= maxRetries; attempt++ { - // Check context before each attempt - if ctx.Err() != nil { - return lastStdout, lastStderr, lastExitCode, fmt.Errorf("context cancelled after %d attempts: %w", attempt, ctx.Err()) - } - - var stdout, stderr bytes.Buffer - cmd := exec.CommandContext(ctx, "ssh", args...) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - err = cmd.Run() - exitCode := 0 - if err != nil { - var ee *exec.ExitError - if errors.As(err, &ee) { - exitCode = ee.ExitCode() - } else { - exitCode = 255 - } - stderrStr := stderr.String() - - // Check if this is a retryable error - if attempt < maxRetries && isRetryableSSHError(stderrStr, exitCode) { - // Calculate backoff delay: 2s, 4s, 8s, 16s, 30s (capped) - delay := initialDelay * time.Duration(1< maxDelay { - delay = maxDelay - } - if r.Logger != nil { - r.Logger.Warn("SSH connection failed (cert auth), retrying", - "attempt", attempt+1, - "max_retries", maxRetries, - "delay", delay, - "addr", addr, - "stderr", stderrStr, - ) - } - select { - case <-time.After(delay): - // Continue to next attempt - case <-ctx.Done(): - return stdout.String(), stderrStr, exitCode, fmt.Errorf("context cancelled during retry backoff: %w", ctx.Err()) - } - lastStdout, lastStderr, lastExitCode, lastErr = stdout.String(), stderrStr, exitCode, err - continue - } - - // Not retryable or max retries exceeded - if stderrStr != "" { - err = fmt.Errorf("%w: %s", err, stderrStr) - } - return stdout.String(), stderrStr, exitCode, err - } - - // Success - if cmd.ProcessState != nil { - exitCode = cmd.ProcessState.ExitCode() - } - return stdout.String(), stderr.String(), exitCode, nil - } - - // Should not reach here, but return last error if we do - return lastStdout, lastStderr, lastExitCode, fmt.Errorf("max retries (%d) exceeded: %w", maxRetries, lastErr) -} - -// SourceCommandResult holds the output from a read-only command on a source VM. -type SourceCommandResult struct { - SourceVM string - ExitCode int - Stdout string - Stderr string -} - -// RunSourceVMCommand executes a validated read-only command on a golden/source VM. -// The command is validated against the readonly allowlist before execution. -// Results are NOT persisted to the store. -func (s *Service) RunSourceVMCommand(ctx context.Context, sourceVMName, command string, timeout time.Duration) (*SourceCommandResult, error) { - return s.RunSourceVMCommandWithCallback(ctx, sourceVMName, command, timeout, nil) -} - -// RunSourceVMCommandWithCallback executes a validated read-only command on a golden/source VM with streaming. -func (s *Service) RunSourceVMCommandWithCallback(ctx context.Context, sourceVMName, command string, timeout time.Duration, outputCallback OutputCallback) (*SourceCommandResult, error) { - if strings.TrimSpace(sourceVMName) == "" { - return nil, fmt.Errorf("sourceVMName is required") - } - if strings.TrimSpace(command) == "" { - return nil, fmt.Errorf("command is required") - } - - // Validate command against the read-only allowlist. - if err := readonly.ValidateCommand(command); err != nil { - s.logger.Warn("source VM command blocked by allowlist", - "source_vm", sourceVMName, - "command", command, - "reason", err.Error(), - ) - s.telemetry.Track("source_vm_command_blocked", map[string]any{ - "source_vm": sourceVMName, - "reason": err.Error(), - }) - return nil, fmt.Errorf("command not allowed in read-only mode: %w", err) - } - - if timeout <= 0 { - timeout = s.cfg.CommandTimeout - } - - // Require key manager for source VM access. - if s.keyMgr == nil { - return nil, fmt.Errorf("key manager is required for source VM access") - } - - // Look up source VM host info from store for remote IP discovery and proxy jump. - var remoteHost *config.HostConfig - if s.store != nil { - if svm, err := s.store.GetSourceVM(ctx, sourceVMName); err == nil && svm.HostAddress != nil && *svm.HostAddress != "" { - remoteHost = &config.HostConfig{ - Name: derefStr(svm.HostName), - Address: *svm.HostAddress, - } - } - } - - // Discover VM IP using remote manager if VM is on a remote host. - ipMgr := s.mgr - if remoteHost != nil && s.remoteManagerFactory != nil { - ipMgr = s.remoteManagerFactory(*remoteHost) - } - ip, _, err := ipMgr.GetIPAddress(ctx, sourceVMName, s.cfg.IPDiscoveryTimeout) - if err != nil { - return nil, fmt.Errorf("discover IP for source VM %s: %w", sourceVMName, err) - } - - // Get read-only credentials. - creds, err := s.keyMgr.GetSourceVMCredentials(ctx, sourceVMName) - if err != nil { - return nil, fmt.Errorf("get source VM credentials: %w", err) - } - - // Determine proxy jump. Use remote host as jump host if VM is on a remote network. - proxyJump := s.cfg.SSHProxyJump - if remoteHost != nil && proxyJump == "" { - sshUser := remoteHost.SSHUser - if sshUser == "" { - sshUser = "root" - } - proxyJump = fmt.Sprintf("%s@%s", sshUser, remoteHost.Address) - } - - // Pass command directly - the restricted shell on source VMs handles execution. - var stdout, stderr string - var code int - var runErr error - - if outputCallback != nil { - outputChan := make(chan OutputChunk, 100) - go func() { - for chunk := range outputChan { - outputCallback(chunk) - } - }() - stdout, stderr, code, runErr = s.ssh.RunWithCertStreaming(ctx, ip, creds.Username, creds.PrivateKeyPath, creds.CertificatePath, command, timeout, nil, proxyJump, outputChan) - close(outputChan) - } else { - stdout, stderr, code, runErr = s.ssh.RunWithCert(ctx, ip, creds.Username, creds.PrivateKeyPath, creds.CertificatePath, command, timeout, nil, proxyJump) - } - - result := &SourceCommandResult{ - SourceVM: sourceVMName, - ExitCode: code, - Stdout: stdout, - Stderr: stderr, - } - - if code == 126 { - s.logger.Warn("source VM command blocked by restricted shell", - "source_vm", sourceVMName, - "command", command, - "stderr", stderr, - ) - s.telemetry.Track("source_vm_command_blocked", map[string]any{ - "source_vm": sourceVMName, - "reason": "restricted_shell", - }) - } - - if runErr != nil { - return result, fmt.Errorf("ssh run on source VM: %w", runErr) - } - - s.telemetry.Track("source_vm_command", map[string]any{ - "source_vm": sourceVMName, - "exit_code": code, - }) - - return result, nil -} - -// Helpers - -func snapshotKindFromString(k string) store.SnapshotKind { - switch strings.ToUpper(k) { - case "EXTERNAL": - return store.SnapshotKindExternal - default: - return store.SnapshotKindInternal - } -} - -func derefStr(s *string) string { - if s == nil { - return "" - } - return *s -} - -func shortID() string { - id := uuid.NewString() - if i := strings.IndexByte(id, '-'); i > 0 { - return id[:i] - } - return id -} - -func commandWithEnv(cmd string, env map[string]string) string { - if len(env) == 0 { - // Execute in login shell to emulate typical interactive environment - return fmt.Sprintf("bash -lc %q", cmd) - } - var exports []string - for k, v := range env { - exports = append(exports, fmt.Sprintf(`export %s=%s`, safeShellIdent(k), shellQuote(v))) - } - preamble := strings.Join(exports, "; ") + "; " - return fmt.Sprintf("bash -lc %q", preamble+cmd) -} - -func shellQuote(s string) string { - // Basic single-quote shell escaping - return "'" + strings.ReplaceAll(s, "'", `'\''`) + "'" -} - -func safeShellIdent(s string) string { - // Allow alnum and underscore, replace others with underscore - var b strings.Builder - for _, r := range s { - if (r >= 'a' && r <= 'z') || - (r >= 'A' && r <= 'Z') || - (r >= '0' && r <= '9') || - r == '_' { - b.WriteRune(r) - } else { - b.WriteRune('_') - } - } - out := b.String() - if out == "" { - return "VAR" - } - return out -} - -// RunStreaming implements SSHRunner.RunStreaming using the local ssh client with streaming output. -// Only streams output on the final retry attempt for cleaner UX. -func (r *DefaultSSHRunner) RunStreaming(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, _ map[string]string, proxyJump string, outputChan chan<- OutputChunk) (string, string, int, error) { - // Pre-flight check: verify the private key file exists and has correct permissions - keyInfo, err := os.Stat(privateKeyPath) - if err != nil { - if os.IsNotExist(err) { - return "", "", 255, fmt.Errorf("ssh key file not found: %s", privateKeyPath) - } - return "", "", 255, fmt.Errorf("ssh key file error: %w", err) - } - if keyInfo.Mode().Perm()&0o077 != 0 { - return "", "", 255, fmt.Errorf("ssh key file %s has insecure permissions %o (should be 0600 or stricter)", privateKeyPath, keyInfo.Mode().Perm()) - } - - if _, ok := ctx.Deadline(); !ok && timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - args := []string{ - "-i", privateKeyPath, - "-o", "StrictHostKeyChecking=no", - "-o", "UserKnownHostsFile=/dev/null", - "-o", "ConnectTimeout=15", - "-o", "ServerAliveInterval=30", - "-o", "ServerAliveCountMax=1000", - } - effectiveProxyJump := proxyJump - if effectiveProxyJump == "" { - effectiveProxyJump = r.DefaultProxyJump - } - if effectiveProxyJump != "" { - args = append(args, "-J", effectiveProxyJump) - } - args = append(args, - fmt.Sprintf("%s@%s", user, addr), - "--", - command, - ) - - maxRetries, initialDelay, maxDelay := r.sshRetryConfig() - var lastStdout, lastStderr string - var lastExitCode int - var lastErr error - - for attempt := 0; attempt <= maxRetries; attempt++ { - if ctx.Err() != nil { - return lastStdout, lastStderr, lastExitCode, fmt.Errorf("context cancelled after %d attempts: %w", attempt, ctx.Err()) - } - - // Signal retry to clear previous output (attempt > 0 means this is a retry) - if attempt > 0 && outputChan != nil { - select { - case outputChan <- OutputChunk{Data: nil, IsStderr: false}: // nil data signals reset - default: - } - } - - // Always stream output - each attempt streams, retries override previous - stdout, stderr, exitCode, err := r.runSingleAttemptStreaming(ctx, args, outputChan) - - if err == nil { - return stdout, stderr, exitCode, nil - } - - // Check if this is a retryable error - isLastAttempt := attempt == maxRetries - if !isLastAttempt && isRetryableSSHError(stderr, exitCode) { - delay := initialDelay * time.Duration(1< maxDelay { - delay = maxDelay - } - if r.Logger != nil { - r.Logger.Warn("SSH connection failed, retrying", - "attempt", attempt+1, - "max_retries", maxRetries, - "delay", delay, - "addr", addr, - "stderr", stderr, - ) - } - - // Send retry notification to TUI - if outputChan != nil { - select { - case outputChan <- OutputChunk{ - IsRetry: true, - Retry: &RetryInfo{ - Attempt: attempt + 1, - Max: maxRetries, - Delay: delay, - Error: stderr, - }, - }: - default: - } - } - - select { - case <-time.After(delay): - case <-ctx.Done(): - return stdout, stderr, exitCode, fmt.Errorf("context cancelled during retry backoff: %w", ctx.Err()) - } - lastStdout, lastStderr, lastExitCode, lastErr = stdout, stderr, exitCode, err - continue - } - - if stderr != "" { - err = fmt.Errorf("%w: %s", err, stderr) - } - return stdout, stderr, exitCode, err - } - - return lastStdout, lastStderr, lastExitCode, fmt.Errorf("max retries (%d) exceeded: %w", maxRetries, lastErr) -} - -// RunWithCertStreaming implements SSHRunner.RunWithCertStreaming using certificate-based auth with streaming. -func (r *DefaultSSHRunner) RunWithCertStreaming(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, _ map[string]string, proxyJump string, outputChan chan<- OutputChunk) (string, string, int, error) { - // Pre-flight check: verify the private key file exists and has correct permissions - keyInfo, err := os.Stat(privateKeyPath) - if err != nil { - if os.IsNotExist(err) { - return "", "", 255, fmt.Errorf("ssh key file not found: %s", privateKeyPath) - } - return "", "", 255, fmt.Errorf("ssh key file error: %w", err) - } - if keyInfo.Mode().Perm()&0o077 != 0 { - return "", "", 255, fmt.Errorf("ssh key file %s has insecure permissions %o (should be 0600 or stricter)", privateKeyPath, keyInfo.Mode().Perm()) - } - - if _, err := os.Stat(certPath); err != nil { - if os.IsNotExist(err) { - return "", "", 255, fmt.Errorf("ssh certificate file not found: %s", certPath) - } - return "", "", 255, fmt.Errorf("ssh certificate file error: %w", err) - } - - if _, ok := ctx.Deadline(); !ok && timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - args := []string{ - "-i", privateKeyPath, - "-o", fmt.Sprintf("CertificateFile=%s", certPath), - "-o", "StrictHostKeyChecking=no", - "-o", "UserKnownHostsFile=/dev/null", - "-o", "ConnectTimeout=15", - "-o", "ServerAliveInterval=30", - "-o", "ServerAliveCountMax=1000", - } - effectiveProxyJump := proxyJump - if effectiveProxyJump == "" { - effectiveProxyJump = r.DefaultProxyJump - } - if effectiveProxyJump != "" { - args = append(args, "-J", effectiveProxyJump) - } - args = append(args, - fmt.Sprintf("%s@%s", user, addr), - "--", - command, - ) - - maxRetries, initialDelay, maxDelay := r.sshRetryConfig() - var lastStdout, lastStderr string - var lastExitCode int - var lastErr error - - for attempt := 0; attempt <= maxRetries; attempt++ { - if ctx.Err() != nil { - return lastStdout, lastStderr, lastExitCode, fmt.Errorf("context cancelled after %d attempts: %w", attempt, ctx.Err()) - } - - // Signal retry to clear previous output (attempt > 0 means this is a retry) - if attempt > 0 && outputChan != nil { - select { - case outputChan <- OutputChunk{Data: nil, IsStderr: false}: // nil data signals reset - default: - } - } - - // Always stream output - each attempt streams, retries override previous - stdout, stderr, exitCode, err := r.runSingleAttemptStreaming(ctx, args, outputChan) - - if err == nil { - return stdout, stderr, exitCode, nil - } - - // Check if this is a retryable error - isLastAttempt := attempt == maxRetries - if !isLastAttempt && isRetryableSSHError(stderr, exitCode) { - delay := initialDelay * time.Duration(1< maxDelay { - delay = maxDelay - } - if r.Logger != nil { - r.Logger.Warn("SSH connection failed (cert auth), retrying", - "attempt", attempt+1, - "max_retries", maxRetries, - "delay", delay, - "addr", addr, - "stderr", stderr, - ) - } - - // Send retry notification to TUI - if outputChan != nil { - select { - case outputChan <- OutputChunk{ - IsRetry: true, - Retry: &RetryInfo{ - Attempt: attempt + 1, - Max: maxRetries, - Delay: delay, - Error: stderr, - }, - }: - default: - } - } - - select { - case <-time.After(delay): - case <-ctx.Done(): - return stdout, stderr, exitCode, fmt.Errorf("context cancelled during retry backoff: %w", ctx.Err()) - } - lastStdout, lastStderr, lastExitCode, lastErr = stdout, stderr, exitCode, err - continue - } - - if stderr != "" { - err = fmt.Errorf("%w: %s", err, stderr) - } - return stdout, stderr, exitCode, err - } - - return lastStdout, lastStderr, lastExitCode, fmt.Errorf("max retries (%d) exceeded: %w", maxRetries, lastErr) -} - -// runSingleAttemptStreaming executes a single SSH attempt with optional streaming output. -func (r *DefaultSSHRunner) runSingleAttemptStreaming(ctx context.Context, args []string, outputChan chan<- OutputChunk) (string, string, int, error) { - cmd := exec.CommandContext(ctx, "ssh", args...) - - stdoutPipe, err := cmd.StdoutPipe() - if err != nil { - return "", "", 255, fmt.Errorf("stdout pipe: %w", err) - } - stderrPipe, err := cmd.StderrPipe() - if err != nil { - return "", "", 255, fmt.Errorf("stderr pipe: %w", err) - } - - var stdoutBuf, stderrBuf bytes.Buffer - - if err := cmd.Start(); err != nil { - return "", "", 255, fmt.Errorf("start command: %w", err) - } - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - defer wg.Done() - streamPipe(stdoutPipe, &stdoutBuf, outputChan, false) - }() - - go func() { - defer wg.Done() - streamPipe(stderrPipe, &stderrBuf, outputChan, true) - }() - - wg.Wait() - - err = cmd.Wait() - exitCode := 0 - if err != nil { - var ee *exec.ExitError - if errors.As(err, &ee) { - exitCode = ee.ExitCode() - } else { - exitCode = 255 - } - } - - return stdoutBuf.String(), stderrBuf.String(), exitCode, err -} - -// streamPipe reads from a pipe and sends chunks to the output channel while accumulating in buffer. -func streamPipe(pipe io.Reader, buf *bytes.Buffer, outputChan chan<- OutputChunk, isStderr bool) { - reader := bufio.NewReader(pipe) - chunk := make([]byte, 1024) - - for { - n, err := reader.Read(chunk) - if n > 0 { - data := make([]byte, n) - copy(data, chunk[:n]) - buf.Write(data) - - if outputChan != nil { - select { - case outputChan <- OutputChunk{Data: data, IsStderr: isStderr}: - default: - // Don't block if channel is full - } - } - } - if err != nil { - break - } - } -} diff --git a/fluid/internal/vm/service_test.go b/fluid/internal/vm/service_test.go deleted file mode 100755 index d1edf26d..00000000 --- a/fluid/internal/vm/service_test.go +++ /dev/null @@ -1,1394 +0,0 @@ -package vm - -import ( - "context" - "errors" - "log/slog" - "strings" - "sync" - "testing" - "time" - - "github.com/aspectrr/fluid.sh/fluid/internal/libvirt" - "github.com/aspectrr/fluid.sh/fluid/internal/sshkeys" - "github.com/aspectrr/fluid.sh/fluid/internal/store" - "github.com/aspectrr/fluid.sh/fluid/internal/telemetry" -) - -// mockStore implements store.Store for testing -type mockStore struct { - getSandboxFn func(ctx context.Context, id string) (*store.Sandbox, error) - listCommandsFn func(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Command, error) - listSandboxesFn func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) -} - -func (m *mockStore) Config() store.Config { return store.Config{} } -func (m *mockStore) Ping(ctx context.Context) error { - return nil -} - -func (m *mockStore) WithTx(ctx context.Context, fn func(tx store.DataStore) error) error { - return fn(m) -} -func (m *mockStore) Close() error { return nil } - -func (m *mockStore) CreateSandbox(ctx context.Context, sb *store.Sandbox) error { - return nil -} - -func (m *mockStore) GetSandbox(ctx context.Context, id string) (*store.Sandbox, error) { - if m.getSandboxFn != nil { - return m.getSandboxFn(ctx, id) - } - return nil, store.ErrNotFound -} - -func (m *mockStore) GetSandboxByVMName(ctx context.Context, vmName string) (*store.Sandbox, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) ListSandboxes(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - if m.listSandboxesFn != nil { - return m.listSandboxesFn(ctx, filter, opt) - } - return nil, nil -} - -func (m *mockStore) UpdateSandbox(ctx context.Context, sb *store.Sandbox) error { - return nil -} - -func (m *mockStore) UpdateSandboxState(ctx context.Context, id string, newState store.SandboxState, ipAddr *string) error { - return nil -} - -func (m *mockStore) DeleteSandbox(ctx context.Context, id string) error { - return nil -} - -func (m *mockStore) CreateSnapshot(ctx context.Context, sn *store.Snapshot) error { - return nil -} - -func (m *mockStore) GetSnapshot(ctx context.Context, id string) (*store.Snapshot, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) GetSnapshotByName(ctx context.Context, sandboxID, name string) (*store.Snapshot, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) ListSnapshots(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Snapshot, error) { - return nil, nil -} - -func (m *mockStore) SaveCommand(ctx context.Context, cmd *store.Command) error { - return nil -} - -func (m *mockStore) GetCommand(ctx context.Context, id string) (*store.Command, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) ListCommands(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Command, error) { - if m.listCommandsFn != nil { - return m.listCommandsFn(ctx, sandboxID, opt) - } - return nil, nil -} - -func (m *mockStore) SaveDiff(ctx context.Context, d *store.Diff) error { - return nil -} - -func (m *mockStore) GetDiff(ctx context.Context, id string) (*store.Diff, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) GetDiffBySnapshots(ctx context.Context, sandboxID, fromSnapshot, toSnapshot string) (*store.Diff, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) CreateChangeSet(ctx context.Context, cs *store.ChangeSet) error { - return nil -} - -func (m *mockStore) GetChangeSet(ctx context.Context, id string) (*store.ChangeSet, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) GetChangeSetByJob(ctx context.Context, jobID string) (*store.ChangeSet, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) CreatePublication(ctx context.Context, p *store.Publication) error { - return nil -} - -func (m *mockStore) UpdatePublicationStatus(ctx context.Context, id string, status store.PublicationStatus, commitSHA, prURL, errMsg *string) error { - return nil -} - -func (m *mockStore) GetPublication(ctx context.Context, id string) (*store.Publication, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) CreatePlaybook(ctx context.Context, pb *store.Playbook) error { - return nil -} - -func (m *mockStore) GetPlaybook(ctx context.Context, id string) (*store.Playbook, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) GetPlaybookByName(ctx context.Context, name string) (*store.Playbook, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) ListPlaybooks(ctx context.Context, opt *store.ListOptions) ([]*store.Playbook, error) { - return nil, nil -} - -func (m *mockStore) UpdatePlaybook(ctx context.Context, pb *store.Playbook) error { - return nil -} - -func (m *mockStore) DeletePlaybook(ctx context.Context, id string) error { - return nil -} - -func (m *mockStore) CreatePlaybookTask(ctx context.Context, task *store.PlaybookTask) error { - return nil -} - -func (m *mockStore) GetPlaybookTask(ctx context.Context, id string) (*store.PlaybookTask, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) ListPlaybookTasks(ctx context.Context, playbookID string, opt *store.ListOptions) ([]*store.PlaybookTask, error) { - return nil, nil -} - -func (m *mockStore) UpdatePlaybookTask(ctx context.Context, task *store.PlaybookTask) error { - return nil -} - -func (m *mockStore) DeletePlaybookTask(ctx context.Context, id string) error { - return nil -} - -func (m *mockStore) ReorderPlaybookTasks(ctx context.Context, playbookID string, taskIDs []string) error { - return nil -} - -func (m *mockStore) GetNextTaskPosition(ctx context.Context, playbookID string) (int, error) { - return 0, nil -} - -func (m *mockStore) GetSourceVM(ctx context.Context, name string) (*store.SourceVM, error) { - return nil, store.ErrNotFound -} - -func (m *mockStore) UpsertSourceVM(ctx context.Context, svm *store.SourceVM) error { - return nil -} - -func (m *mockStore) ListSourceVMs(ctx context.Context) ([]*store.SourceVM, error) { - return nil, nil -} - -func TestGetSandbox_Success(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ - ID: id, - JobID: "JOB-123", - AgentID: "agent-456", - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - } - - sb, err := svc.GetSandbox(context.Background(), "SBX-123") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if sb == nil { - t.Fatal("expected sandbox, got nil") - } - if sb.ID != "SBX-123" { - t.Errorf("expected ID %q, got %q", "SBX-123", sb.ID) - } - if sb.State != store.SandboxStateRunning { - t.Errorf("expected state %s, got %s", store.SandboxStateRunning, sb.State) - } -} - -func TestGetSandbox_NotFound(t *testing.T) { - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return nil, store.ErrNotFound - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - } - - _, err := svc.GetSandbox(context.Background(), "nonexistent-id") - if err == nil { - t.Fatal("expected error, got nil") - } - if !errors.Is(err, store.ErrNotFound) { - t.Errorf("expected ErrNotFound, got %v", err) - } -} - -func TestGetSandbox_EmptyID(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.GetSandbox(context.Background(), "") - if err == nil { - t.Fatal("expected error for empty ID, got nil") - } -} - -func TestGetSandbox_WhitespaceID(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.GetSandbox(context.Background(), " ") - if err == nil { - t.Fatal("expected error for whitespace ID, got nil") - } -} - -func TestGetSandboxCommands_Success(t *testing.T) { - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ID: id}, nil - }, - listCommandsFn: func(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Command, error) { - return []*store.Command{ - { - ID: "CMD-001", - SandboxID: sandboxID, - Command: "ls -la", - Stdout: "total 0\n", - ExitCode: 0, - }, - { - ID: "CMD-002", - SandboxID: sandboxID, - Command: "pwd", - Stdout: "/home/user\n", - ExitCode: 0, - }, - }, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - } - - cmds, err := svc.GetSandboxCommands(context.Background(), "SBX-123", nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if len(cmds) != 2 { - t.Errorf("expected 2 commands, got %d", len(cmds)) - } - if cmds[0].Command != "ls -la" { - t.Errorf("expected command %q, got %q", "ls -la", cmds[0].Command) - } -} - -func TestGetSandboxCommands_SandboxNotFound(t *testing.T) { - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return nil, store.ErrNotFound - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - } - - _, err := svc.GetSandboxCommands(context.Background(), "nonexistent-id", nil) - if err == nil { - t.Fatal("expected error, got nil") - } - if !errors.Is(err, store.ErrNotFound) { - t.Errorf("expected ErrNotFound, got %v", err) - } -} - -func TestGetSandboxCommands_EmptyID(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.GetSandboxCommands(context.Background(), "", nil) - if err == nil { - t.Fatal("expected error for empty ID, got nil") - } -} - -func TestGetSandboxCommands_WithListOptions(t *testing.T) { - var capturedOpts *store.ListOptions - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ID: id}, nil - }, - listCommandsFn: func(ctx context.Context, sandboxID string, opt *store.ListOptions) ([]*store.Command, error) { - capturedOpts = opt - return []*store.Command{}, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - } - - opts := &store.ListOptions{Limit: 10, Offset: 5} - _, err := svc.GetSandboxCommands(context.Background(), "SBX-123", opts) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if capturedOpts == nil { - t.Fatal("expected list options to be passed") - } - if capturedOpts.Limit != 10 { - t.Errorf("expected limit %d, got %d", 10, capturedOpts.Limit) - } - if capturedOpts.Offset != 5 { - t.Errorf("expected offset %d, got %d", 5, capturedOpts.Offset) - } -} - -// mockSSHRunner is a mock implementation of SSHRunner for testing -type mockSSHRunner struct { - runFn func(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (stdout, stderr string, exitCode int, err error) - runWithCertFn func(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (stdout, stderr string, exitCode int, err error) -} - -func (m *mockSSHRunner) Run(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error) { - if m.runFn != nil { - return m.runFn(ctx, addr, user, privateKeyPath, command, timeout, env, proxyJump) - } - return "", "", 0, nil -} - -func (m *mockSSHRunner) RunWithCert(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error) { - if m.runWithCertFn != nil { - return m.runWithCertFn(ctx, addr, user, privateKeyPath, certPath, command, timeout, env, proxyJump) - } - // Fall back to runFn if runWithCertFn is not set - if m.runFn != nil { - return m.runFn(ctx, addr, user, privateKeyPath, command, timeout, env, proxyJump) - } - return "", "", 0, nil -} - -func (m *mockSSHRunner) RunStreaming(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string, proxyJump string, outputChan chan<- OutputChunk) (string, string, int, error) { - // For tests, just delegate to Run and ignore the output channel - return m.Run(ctx, addr, user, privateKeyPath, command, timeout, env, proxyJump) -} - -func (m *mockSSHRunner) RunWithCertStreaming(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string, proxyJump string, outputChan chan<- OutputChunk) (string, string, int, error) { - // For tests, just delegate to RunWithCert and ignore the output channel - return m.RunWithCert(ctx, addr, user, privateKeyPath, certPath, command, timeout, env, proxyJump) -} - -// mockManager is a mock implementation of libvirt.Manager for testing -type mockManager struct { - getIPAddressFn func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) -} - -func (m *mockManager) CloneVM(ctx context.Context, baseImage, newVMName string, cpu, memoryMB int, network string) (libvirt.DomainRef, error) { - return libvirt.DomainRef{}, nil -} - -func (m *mockManager) CloneFromVM(ctx context.Context, sourceVMName, newVMName string, cpu, memoryMB int, network string) (libvirt.DomainRef, error) { - return libvirt.DomainRef{}, nil -} - -func (m *mockManager) InjectSSHKey(ctx context.Context, sandboxName, username, publicKey string) error { - return nil -} - -func (m *mockManager) StartVM(ctx context.Context, vmName string) error { - return nil -} - -func (m *mockManager) StopVM(ctx context.Context, vmName string, force bool) error { - return nil -} - -func (m *mockManager) DestroyVM(ctx context.Context, vmName string) error { - return nil -} - -func (m *mockManager) CreateSnapshot(ctx context.Context, vmName, snapshotName string, external bool) (libvirt.SnapshotRef, error) { - return libvirt.SnapshotRef{}, nil -} - -func (m *mockManager) DiffSnapshot(ctx context.Context, vmName, fromSnapshot, toSnapshot string) (*libvirt.FSComparePlan, error) { - return nil, nil -} - -func (m *mockManager) GetIPAddress(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - if m.getIPAddressFn != nil { - return m.getIPAddressFn(ctx, vmName, timeout) - } - return "192.168.1.100", "52:54:00:12:34:56", nil -} - -func (m *mockManager) GetVMState(ctx context.Context, vmName string) (libvirt.VMState, error) { - return libvirt.VMState("running"), nil -} - -func (m *mockManager) ValidateSourceVM(ctx context.Context, vmName string) (*libvirt.VMValidationResult, error) { - return &libvirt.VMValidationResult{ - VMName: vmName, - Valid: true, - State: libvirt.VMStateRunning, - Warnings: []string{}, - Errors: []string{}, - }, nil -} - -func (m *mockManager) CheckHostResources(ctx context.Context, requiredCPUs, requiredMemoryMB int) (*libvirt.ResourceCheckResult, error) { - return &libvirt.ResourceCheckResult{ - Valid: true, - AvailableMemoryMB: 8192, - TotalMemoryMB: 16384, - AvailableCPUs: 8, - AvailableDiskMB: 102400, - RequiredMemoryMB: requiredMemoryMB, - RequiredCPUs: requiredCPUs, - Warnings: []string{}, - Errors: []string{}, - }, nil -} - -func TestRunCommand_Success(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ - ID: id, - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, nil - }, - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - // Return empty list - no other sandboxes with this IP - return []*store.Sandbox{}, nil - }, - } - - mockSSH := &mockSSHRunner{ - runFn: func(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error) { - return "file1.txt\nfile2.txt\n", "", 0, nil - }, - } - - mockMgr := &mockManager{ - getIPAddressFn: func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "192.168.1.100", "52:54:00:12:34:56", nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - ssh: mockSSH, - mgr: mockMgr, - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - cmd, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "/path/to/key", "ls -l", 60*time.Second, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if cmd == nil { - t.Fatal("expected command result, got nil") - } - if cmd.Stdout != "file1.txt\nfile2.txt\n" { - t.Errorf("expected stdout %q, got %q", "file1.txt\nfile2.txt\n", cmd.Stdout) - } - if cmd.ExitCode != 0 { - t.Errorf("expected exit code 0, got %d", cmd.ExitCode) - } -} - -func TestRunCommand_SSHConnectionFailed(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ - ID: id, - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, nil - }, - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - return []*store.Sandbox{}, nil - }, - } - - mockSSH := &mockSSHRunner{ - runFn: func(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error) { - return "", "ssh: connect to host 192.168.1.100 port 22: Connection refused", 255, errors.New("exit status 255: ssh: connect to host 192.168.1.100 port 22: Connection refused") - }, - } - - mockMgr := &mockManager{ - getIPAddressFn: func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "192.168.1.100", "52:54:00:12:34:56", nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - ssh: mockSSH, - mgr: mockMgr, - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - cmd, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "/path/to/key", "ls -l", 60*time.Second, nil) - - // Should return error but also the command with stderr - if err == nil { - t.Fatal("expected error for SSH connection failure") - } - if cmd == nil { - t.Fatal("expected command result with stderr even on SSH failure") - } - if cmd.ExitCode != 255 { - t.Errorf("expected exit code 255, got %d", cmd.ExitCode) - } - if cmd.Stderr != "ssh: connect to host 192.168.1.100 port 22: Connection refused" { - t.Errorf("expected stderr to contain SSH error, got %q", cmd.Stderr) - } -} - -func TestRunCommand_CommandFailed(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ - ID: id, - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, nil - }, - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - return []*store.Sandbox{}, nil - }, - } - - mockSSH := &mockSSHRunner{ - runFn: func(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error) { - // Command ran but returned non-zero exit code (not an SSH error) - return "", "ls: cannot access '/nonexistent': No such file or directory", 2, nil - }, - } - - mockMgr := &mockManager{ - getIPAddressFn: func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "192.168.1.100", "52:54:00:12:34:56", nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - ssh: mockSSH, - mgr: mockMgr, - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - cmd, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "/path/to/key", "ls /nonexistent", 60*time.Second, nil) - if err != nil { - t.Fatalf("unexpected error for command with non-zero exit: %v", err) - } - - if cmd == nil { - t.Fatal("expected command result, got nil") - } - if cmd.ExitCode != 2 { - t.Errorf("expected exit code 2, got %d", cmd.ExitCode) - } - if cmd.Stderr == "" { - t.Error("expected stderr to contain error message") - } -} - -func TestRunCommand_EmptySandboxID(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.RunCommand(context.Background(), "", "ubuntu", "/path/to/key", "ls", 60*time.Second, nil) - if err == nil { - t.Fatal("expected error for empty sandbox ID") - } -} - -func TestRunCommand_EmptyUsername(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.RunCommand(context.Background(), "SBX-123", "", "/path/to/key", "ls", 60*time.Second, nil) - if err == nil { - t.Fatal("expected error for empty username") - } -} - -func TestRunCommand_EmptyPrivateKeyPath(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "", "ls", 60*time.Second, nil) - if err == nil { - t.Fatal("expected error for empty private key path") - } -} - -func TestRunCommand_EmptyCommand(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: &mockStore{}, - timeNowFn: time.Now, - } - - _, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "/path/to/key", "", 60*time.Second, nil) - if err == nil { - t.Fatal("expected error for empty command") - } -} - -func TestRunCommand_SandboxNotFound(t *testing.T) { - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return nil, store.ErrNotFound - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - } - - _, err := svc.RunCommand(context.Background(), "nonexistent", "ubuntu", "/path/to/key", "ls", 60*time.Second, nil) - if err == nil { - t.Fatal("expected error for sandbox not found") - } - if !errors.Is(err, store.ErrNotFound) { - t.Errorf("expected ErrNotFound, got %v", err) - } -} - -func TestRunCommand_WithEnvironmentVariables(t *testing.T) { - ip := "192.168.1.100" - var capturedEnv map[string]string - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ - ID: id, - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, nil - }, - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - return []*store.Sandbox{}, nil - }, - } - - mockSSH := &mockSSHRunner{ - runFn: func(ctx context.Context, addr, user, privateKeyPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error) { - capturedEnv = env - return "test\n", "", 0, nil - }, - } - - mockMgr := &mockManager{ - getIPAddressFn: func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "192.168.1.100", "52:54:00:12:34:56", nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - ssh: mockSSH, - mgr: mockMgr, - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - env := map[string]string{"MY_VAR": "test_value", "OTHER_VAR": "other"} - _, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "/path/to/key", "echo $MY_VAR", 60*time.Second, env) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if capturedEnv == nil { - t.Fatal("expected environment variables to be passed") - } - if capturedEnv["MY_VAR"] != "test_value" { - t.Errorf("expected MY_VAR=%q, got %q", "test_value", capturedEnv["MY_VAR"]) - } -} - -func TestRunCommand_IPConflictDetected(t *testing.T) { - ip := "192.168.1.100" - otherIP := "192.168.1.100" // Same IP as another sandbox - mockSt := &mockStore{ - getSandboxFn: func(ctx context.Context, id string) (*store.Sandbox, error) { - return &store.Sandbox{ - ID: id, - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, nil - }, - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - // Return another sandbox with the same IP - simulating a conflict - return []*store.Sandbox{ - { - ID: "SBX-OTHER", - SandboxName: "other-sandbox", - State: store.SandboxStateRunning, - IPAddress: &otherIP, - }, - }, nil - }, - } - - mockMgr := &mockManager{ - getIPAddressFn: func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "192.168.1.100", "52:54:00:12:34:56", nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - mgr: mockMgr, - timeNowFn: time.Now, - logger: slog.Default(), - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - _, err := svc.RunCommand(context.Background(), "SBX-123", "ubuntu", "/path/to/key", "ls -l", 60*time.Second, nil) - if err == nil { - t.Fatal("expected error for IP conflict, got nil") - } - if !strings.Contains(err.Error(), "ip conflict") { - t.Errorf("expected error to contain 'ip conflict', got: %v", err) - } -} - -func TestValidateIPUniqueness_NoConflict(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - // Return a sandbox with a different IP - otherIP := "192.168.1.200" - return []*store.Sandbox{ - { - ID: "SBX-OTHER", - SandboxName: "other-sandbox", - State: store.SandboxStateRunning, - IPAddress: &otherIP, - }, - }, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - logger: slog.Default(), - } - - err := svc.validateIPUniqueness(context.Background(), "SBX-123", ip) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestValidateIPUniqueness_Conflict(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - // Return another sandbox with the same IP - return []*store.Sandbox{ - { - ID: "SBX-OTHER", - SandboxName: "other-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, - }, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - logger: slog.Default(), - } - - err := svc.validateIPUniqueness(context.Background(), "SBX-123", ip) - if err == nil { - t.Fatal("expected error for IP conflict, got nil") - } - if !strings.Contains(err.Error(), "already assigned") { - t.Errorf("expected error to contain 'already assigned', got: %v", err) - } -} - -func TestValidateIPUniqueness_SameSandboxIgnored(t *testing.T) { - ip := "192.168.1.100" - mockSt := &mockStore{ - listSandboxesFn: func(ctx context.Context, filter store.SandboxFilter, opt *store.ListOptions) ([]*store.Sandbox, error) { - // Return the same sandbox - should be ignored - return []*store.Sandbox{ - { - ID: "SBX-123", // Same ID as the one being validated - SandboxName: "test-sandbox", - State: store.SandboxStateRunning, - IPAddress: &ip, - }, - }, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - store: mockSt, - timeNowFn: time.Now, - logger: slog.Default(), - } - - err := svc.validateIPUniqueness(context.Background(), "SBX-123", ip) - if err != nil { - t.Fatalf("unexpected error (same sandbox should be ignored): %v", err) - } -} - -// mockKeyManager is a mock implementation of sshkeys.KeyProvider for testing -type mockKeyManager struct { - getSourceVMCredentialsFn func(ctx context.Context, sourceVMName string) (*sshkeys.Credentials, error) -} - -func (m *mockKeyManager) GetCredentials(ctx context.Context, sandboxID, username string) (*sshkeys.Credentials, error) { - return nil, nil -} - -func (m *mockKeyManager) GetSourceVMCredentials(ctx context.Context, sourceVMName string) (*sshkeys.Credentials, error) { - if m.getSourceVMCredentialsFn != nil { - return m.getSourceVMCredentialsFn(ctx, sourceVMName) - } - return nil, nil -} - -func (m *mockKeyManager) CleanupSandbox(ctx context.Context, sandboxID string) error { - return nil -} - -func (m *mockKeyManager) Close() error { - return nil -} - -func TestRunSourceVMCommand_AllowlistRejection(t *testing.T) { - mockMgr := &mockManager{} - mockKeyMgr := &mockKeyManager{} - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - mgr: mockMgr, - keyMgr: mockKeyMgr, - timeNowFn: time.Now, - logger: slog.Default(), - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - // Test command not in the readonly allowlist (e.g., "rm" is not allowed) - _, err := svc.RunSourceVMCommand(context.Background(), "source-vm-1", "rm -rf /tmp/test", 60*time.Second) - if err == nil { - t.Fatal("expected error for disallowed command, got nil") - } - if !strings.Contains(err.Error(), "command not allowed in read-only mode") { - t.Errorf("expected 'command not allowed in read-only mode' error, got: %v", err) - } -} - -func TestRunSourceVMCommand_IPDiscoveryFailure(t *testing.T) { - mockMgr := &mockManager{ - getIPAddressFn: func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "", "", errors.New("VM not found") - }, - } - mockKeyMgr := &mockKeyManager{} - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - mgr: mockMgr, - keyMgr: mockKeyMgr, - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - // Use a command that would pass the allowlist check - _, err := svc.RunSourceVMCommand(context.Background(), "source-vm-1", "ls -l", 60*time.Second) - if err == nil { - t.Fatal("expected error for IP discovery failure, got nil") - } - if !strings.Contains(err.Error(), "discover IP for source VM") { - t.Errorf("expected 'discover IP for source VM' error, got: %v", err) - } -} - -func TestRunSourceVMCommand_MissingKeyManager(t *testing.T) { - mockMgr := &mockManager{} - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - mgr: mockMgr, - keyMgr: nil, // No key manager set - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - // Use a command that would pass the allowlist check - _, err := svc.RunSourceVMCommand(context.Background(), "source-vm-1", "ls -l", 60*time.Second) - if err == nil { - t.Fatal("expected error for missing key manager, got nil") - } - if !strings.Contains(err.Error(), "key manager is required for source VM access") { - t.Errorf("expected 'key manager is required for source VM access' error, got: %v", err) - } -} - -func TestRunSourceVMCommand_Success(t *testing.T) { - mockMgr := &mockManager{ - getIPAddressFn: func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "192.168.1.200", "52:54:00:12:34:57", nil - }, - } - mockKeyMgr := &mockKeyManager{ - getSourceVMCredentialsFn: func(ctx context.Context, sourceVMName string) (*sshkeys.Credentials, error) { - return &sshkeys.Credentials{ - Username: "fluid-readonly", - PrivateKeyPath: "/tmp/source-vm-key", - CertificatePath: "/tmp/source-vm-key-cert.pub", - }, nil - }, - } - mockSSH := &mockSSHRunner{ - runWithCertFn: func(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error) { - return "file1.txt\nfile2.txt\n", "", 0, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - mgr: mockMgr, - keyMgr: mockKeyMgr, - ssh: mockSSH, - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - result, err := svc.RunSourceVMCommand(context.Background(), "source-vm-1", "ls -l", 60*time.Second) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if result == nil { - t.Fatal("expected result, got nil") - } - if result.SourceVM != "source-vm-1" { - t.Errorf("expected SourceVM 'source-vm-1', got %q", result.SourceVM) - } - if result.Stdout != "file1.txt\nfile2.txt\n" { - t.Errorf("expected stdout %q, got %q", "file1.txt\nfile2.txt\n", result.Stdout) - } - if result.ExitCode != 0 { - t.Errorf("expected exit code 0, got %d", result.ExitCode) - } -} - -func TestRunSourceVMCommandWithCallback_StreamingSuccess(t *testing.T) { - mockMgr := &mockManager{ - getIPAddressFn: func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "192.168.1.200", "52:54:00:12:34:57", nil - }, - } - mockKeyMgr := &mockKeyManager{ - getSourceVMCredentialsFn: func(ctx context.Context, sourceVMName string) (*sshkeys.Credentials, error) { - return &sshkeys.Credentials{ - Username: "fluid-readonly", - PrivateKeyPath: "/tmp/source-vm-key", - CertificatePath: "/tmp/source-vm-key-cert.pub", - }, nil - }, - } - - mockSSH := &mockSSHRunner{ - runWithCertFn: func(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error) { - return "file1.txt\nfile2.txt\n", "", 0, nil - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - mgr: mockMgr, - keyMgr: mockKeyMgr, - ssh: mockSSH, - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - // Provide a simple callback - it won't be invoked with the current mock setup, - // but this test verifies the code path compiles and executes without error. - callback := func(chunk OutputChunk) { - // Callback provided but not invoked by mock - } - - result, err := svc.RunSourceVMCommandWithCallback(context.Background(), "source-vm-1", "ls -l", 60*time.Second, callback) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if result == nil { - t.Fatal("expected result, got nil") - } - if result.SourceVM != "source-vm-1" { - t.Errorf("expected SourceVM 'source-vm-1', got %q", result.SourceVM) - } - // Note: The mock SSH runner delegates streaming to non-streaming, so callback won't be invoked in this test. - // This test verifies the code path compiles and executes without error when a callback is provided. -} - -func TestRunSourceVMCommand_EmptySourceVMName(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - _, err := svc.RunSourceVMCommand(context.Background(), "", "ls -l", 60*time.Second) - if err == nil { - t.Fatal("expected error for empty sourceVMName, got nil") - } - if !strings.Contains(err.Error(), "sourceVMName is required") { - t.Errorf("expected 'sourceVMName is required' error, got: %v", err) - } -} - -func TestRunSourceVMCommand_EmptyCommand(t *testing.T) { - svc := &Service{ - telemetry: telemetry.NewNoopService(), - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - _, err := svc.RunSourceVMCommand(context.Background(), "source-vm-1", "", 60*time.Second) - if err == nil { - t.Fatal("expected error for empty command, got nil") - } - if !strings.Contains(err.Error(), "command is required") { - t.Errorf("expected 'command is required' error, got: %v", err) - } -} - -func TestRunSourceVMCommand_GetCredentialsFailure(t *testing.T) { - mockMgr := &mockManager{ - getIPAddressFn: func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "192.168.1.200", "52:54:00:12:34:57", nil - }, - } - mockKeyMgr := &mockKeyManager{ - getSourceVMCredentialsFn: func(ctx context.Context, sourceVMName string) (*sshkeys.Credentials, error) { - return nil, errors.New("credentials not found") - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - mgr: mockMgr, - keyMgr: mockKeyMgr, - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - _, err := svc.RunSourceVMCommand(context.Background(), "source-vm-1", "ls -l", 60*time.Second) - if err == nil { - t.Fatal("expected error for credentials failure, got nil") - } - if !strings.Contains(err.Error(), "get source VM credentials") { - t.Errorf("expected 'get source VM credentials' error, got: %v", err) - } -} - -func TestRunSourceVMCommand_SSHRunFailure(t *testing.T) { - mockMgr := &mockManager{ - getIPAddressFn: func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "192.168.1.200", "52:54:00:12:34:57", nil - }, - } - mockKeyMgr := &mockKeyManager{ - getSourceVMCredentialsFn: func(ctx context.Context, sourceVMName string) (*sshkeys.Credentials, error) { - return &sshkeys.Credentials{ - Username: "fluid-readonly", - PrivateKeyPath: "/tmp/source-vm-key", - CertificatePath: "/tmp/source-vm-key-cert.pub", - }, nil - }, - } - mockSSH := &mockSSHRunner{ - runWithCertFn: func(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error) { - return "", "connection timeout", 1, errors.New("SSH connection failed") - }, - } - - svc := &Service{ - telemetry: telemetry.NewNoopService(), - mgr: mockMgr, - keyMgr: mockKeyMgr, - ssh: mockSSH, - timeNowFn: time.Now, - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - result, err := svc.RunSourceVMCommand(context.Background(), "source-vm-1", "ls -l", 60*time.Second) - if err == nil { - t.Fatal("expected error for SSH failure, got nil") - } - if !strings.Contains(err.Error(), "ssh run on source VM") { - t.Errorf("expected 'ssh run on source VM' error, got: %v", err) - } - // Result should still be returned with exit code and stderr - if result == nil { - t.Fatal("expected result even with error, got nil") - } - if result.ExitCode != 1 { - t.Errorf("expected exit code 1, got %d", result.ExitCode) - } - if result.Stderr != "connection timeout" { - t.Errorf("expected stderr %q, got %q", "connection timeout", result.Stderr) - } -} - -// mockTelemetryService records Track calls for test assertions. -type mockTelemetryService struct { - mu sync.Mutex - events []trackedEvent -} - -type trackedEvent struct { - name string - properties map[string]any -} - -func (m *mockTelemetryService) Track(event string, properties map[string]any) { - m.mu.Lock() - defer m.mu.Unlock() - m.events = append(m.events, trackedEvent{name: event, properties: properties}) -} - -func (m *mockTelemetryService) Close() {} - -func (m *mockTelemetryService) getEvents() []trackedEvent { - m.mu.Lock() - defer m.mu.Unlock() - out := make([]trackedEvent, len(m.events)) - copy(out, m.events) - return out -} - -func TestRunSourceVMCommand_TelemetryTracking(t *testing.T) { - t.Run("blocked by allowlist", func(t *testing.T) { - mt := &mockTelemetryService{} - svc := &Service{ - telemetry: mt, - mgr: &mockManager{}, - keyMgr: &mockKeyManager{}, - timeNowFn: time.Now, - logger: slog.Default(), - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - _, err := svc.RunSourceVMCommand(context.Background(), "source-vm-1", "rm -rf /", 60*time.Second) - if err == nil { - t.Fatal("expected error for disallowed command") - } - - events := mt.getEvents() - if len(events) != 1 { - t.Fatalf("expected 1 telemetry event, got %d", len(events)) - } - if events[0].name != "source_vm_command_blocked" { - t.Errorf("expected event 'source_vm_command_blocked', got %q", events[0].name) - } - if events[0].properties["source_vm"] != "source-vm-1" { - t.Errorf("expected source_vm 'source-vm-1', got %v", events[0].properties["source_vm"]) - } - if _, ok := events[0].properties["reason"]; !ok { - t.Error("expected 'reason' property in telemetry event") - } - }) - - t.Run("successful command", func(t *testing.T) { - mt := &mockTelemetryService{} - svc := &Service{ - telemetry: mt, - mgr: &mockManager{ - getIPAddressFn: func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "192.168.1.200", "52:54:00:12:34:57", nil - }, - }, - keyMgr: &mockKeyManager{ - getSourceVMCredentialsFn: func(ctx context.Context, sourceVMName string) (*sshkeys.Credentials, error) { - return &sshkeys.Credentials{ - Username: "fluid-readonly", - PrivateKeyPath: "/tmp/key", - CertificatePath: "/tmp/key-cert.pub", - }, nil - }, - }, - ssh: &mockSSHRunner{ - runWithCertFn: func(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error) { - return "output", "", 0, nil - }, - }, - timeNowFn: time.Now, - logger: slog.Default(), - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - _, err := svc.RunSourceVMCommand(context.Background(), "source-vm-1", "ls -l", 60*time.Second) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - events := mt.getEvents() - if len(events) != 1 { - t.Fatalf("expected 1 telemetry event, got %d", len(events)) - } - if events[0].name != "source_vm_command" { - t.Errorf("expected event 'source_vm_command', got %q", events[0].name) - } - if events[0].properties["exit_code"] != 0 { - t.Errorf("expected exit_code 0, got %v", events[0].properties["exit_code"]) - } - }) - - t.Run("blocked by restricted shell", func(t *testing.T) { - mt := &mockTelemetryService{} - svc := &Service{ - telemetry: mt, - mgr: &mockManager{ - getIPAddressFn: func(ctx context.Context, vmName string, timeout time.Duration) (string, string, error) { - return "192.168.1.200", "52:54:00:12:34:57", nil - }, - }, - keyMgr: &mockKeyManager{ - getSourceVMCredentialsFn: func(ctx context.Context, sourceVMName string) (*sshkeys.Credentials, error) { - return &sshkeys.Credentials{ - Username: "fluid-readonly", - PrivateKeyPath: "/tmp/key", - CertificatePath: "/tmp/key-cert.pub", - }, nil - }, - }, - ssh: &mockSSHRunner{ - runWithCertFn: func(ctx context.Context, addr, user, privateKeyPath, certPath, command string, timeout time.Duration, env map[string]string, proxyJump string) (string, string, int, error) { - return "", "command not permitted", 126, nil - }, - }, - timeNowFn: time.Now, - logger: slog.Default(), - cfg: Config{CommandTimeout: 30 * time.Second, IPDiscoveryTimeout: 30 * time.Second}, - } - - result, err := svc.RunSourceVMCommand(context.Background(), "source-vm-1", "ls -l", 60*time.Second) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if result.ExitCode != 126 { - t.Errorf("expected exit code 126, got %d", result.ExitCode) - } - - events := mt.getEvents() - // Should have both: source_vm_command_blocked (restricted_shell) and source_vm_command - if len(events) != 2 { - t.Fatalf("expected 2 telemetry events, got %d", len(events)) - } - if events[0].name != "source_vm_command_blocked" { - t.Errorf("expected first event 'source_vm_command_blocked', got %q", events[0].name) - } - if events[0].properties["reason"] != "restricted_shell" { - t.Errorf("expected reason 'restricted_shell', got %v", events[0].properties["reason"]) - } - if events[1].name != "source_vm_command" { - t.Errorf("expected second event 'source_vm_command', got %q", events[1].name) - } - }) -} diff --git a/fluid/internal/workflow/errors.go b/fluid/internal/workflow/errors.go deleted file mode 100755 index c62acc9f..00000000 --- a/fluid/internal/workflow/errors.go +++ /dev/null @@ -1,151 +0,0 @@ -package workflow - -import ( - "errors" - "fmt" -) - -// Sentinel errors for workflow stages. These allow callers to identify -// which stage failed and take appropriate action. -var ( - // ErrDomainNotFound indicates the requested VM domain does not exist. - ErrDomainNotFound = errors.New("domain not found") - - // ErrDomainTransient indicates the domain is transient (not persistent). - ErrDomainTransient = errors.New("transient domains are not supported") - - // ErrDomainUnsupported indicates the domain configuration is not supported. - ErrDomainUnsupported = errors.New("domain configuration not supported") - - // ErrSnapshotFailed indicates snapshot creation failed. - ErrSnapshotFailed = errors.New("snapshot_failed") - - // ErrNBDAttachFailed indicates qemu-nbd attachment failed. - ErrNBDAttachFailed = errors.New("nbd_attach_failed") - - // ErrMountFailed indicates filesystem mount failed. - ErrMountFailed = errors.New("mount_failed") - - // ErrSanitizeFailed indicates filesystem sanitization failed. - ErrSanitizeFailed = errors.New("sanitize_failed") - - // ErrArchiveFailed indicates rootfs archive creation failed. - ErrArchiveFailed = errors.New("archive_failed") - - // ErrImageBuildFailed indicates Podman image build failed. - ErrImageBuildFailed = errors.New("image_build_failed") - - // ErrContainerCreateFailed indicates container creation/start failed. - ErrContainerCreateFailed = errors.New("container_create_failed") - - // ErrRollbackFailed indicates cleanup during rollback encountered errors. - ErrRollbackFailed = errors.New("rollback_failed") -) - -// WorkflowError wraps a stage error with additional context. -type WorkflowError struct { - // Stage identifies which workflow step failed. - Stage string - - // Err is the underlying sentinel or wrapped error. - Err error - - // Detail provides additional context about the failure. - Detail string -} - -// Error implements the error interface. -func (e *WorkflowError) Error() string { - if e.Detail != "" { - return fmt.Sprintf("%s: %s: %s", e.Stage, e.Err.Error(), e.Detail) - } - return fmt.Sprintf("%s: %s", e.Stage, e.Err.Error()) -} - -// Unwrap returns the underlying error for errors.Is/As support. -func (e *WorkflowError) Unwrap() error { - return e.Err -} - -// NewWorkflowError creates a new WorkflowError. -func NewWorkflowError(stage string, err error, detail string) *WorkflowError { - return &WorkflowError{ - Stage: stage, - Err: err, - Detail: detail, - } -} - -// ErrorResponse is the JSON structure returned on API errors. -type ErrorResponse struct { - Error string `json:"error"` - Detail string `json:"detail,omitempty"` -} - -// ToErrorResponse converts a WorkflowError to an API error response. -func (e *WorkflowError) ToErrorResponse() ErrorResponse { - return ErrorResponse{ - Error: e.Err.Error(), - Detail: e.Detail, - } -} - -// Stage names for error context. -const ( - StageResolveDomain = "resolve_domain" - StageDetermineMode = "determine_mode" - StageCreateSnapshot = "create_snapshot" - StageMountDisk = "mount_disk" - StageSanitizeFS = "sanitize_filesystem" - StageCreateArchive = "create_archive" - StageBuildImage = "build_image" - StageRunContainer = "run_container" - StageCleanup = "cleanup" -) - -// CleanupFunc is a function that performs cleanup and returns any error encountered. -type CleanupFunc func() error - -// CleanupStack manages a stack of cleanup functions for rollback. -// Cleanups are executed in LIFO order (last registered, first executed). -type CleanupStack struct { - funcs []CleanupFunc -} - -// NewCleanupStack creates a new cleanup stack. -func NewCleanupStack() *CleanupStack { - return &CleanupStack{ - funcs: make([]CleanupFunc, 0), - } -} - -// Push adds a cleanup function to the stack. -func (s *CleanupStack) Push(fn CleanupFunc) { - s.funcs = append(s.funcs, fn) -} - -// ExecuteAll runs all cleanup functions in reverse order. -// It collects all errors and returns a combined error if any occurred. -func (s *CleanupStack) ExecuteAll() error { - var errs []error - for i := len(s.funcs) - 1; i >= 0; i-- { - if err := s.funcs[i](); err != nil { - errs = append(errs, err) - } - } - if len(errs) > 0 { - return fmt.Errorf("%w: %d cleanup(s) failed: %v", ErrRollbackFailed, len(errs), errs) - } - return nil -} - -// Clear removes all cleanup functions without executing them. -// Call this after successful completion to prevent rollback. -func (s *CleanupStack) Clear() { - s.funcs = nil -} - -// Len returns the number of registered cleanup functions. -func (s *CleanupStack) Len() int { - return len(s.funcs) -} diff --git a/landing-page/.env.example b/landing-page/.env.example deleted file mode 100644 index edc99647..00000000 --- a/landing-page/.env.example +++ /dev/null @@ -1,3 +0,0 @@ -PUBLIC_POSTHOG_API_KEY= -PUBLIC_POSTHOG_HOST= -PUBLIC_POSTHOG_DEFAULTS= diff --git a/landing-page/.gitignore b/landing-page/.gitignore deleted file mode 100644 index 38f55541..00000000 --- a/landing-page/.gitignore +++ /dev/null @@ -1,28 +0,0 @@ -# build output -dist/ -# generated types -.astro/ - -# dependencies -node_modules/ - -# logs -npm-debug.log* -yarn-debug.log* -yarn-error.log* -pnpm-debug.log* - - -# environment variables -.env -.env.development -.env.production - -# macOS-specific files -.DS_Store - -# jetbrains setting folder -.idea/ - -.eslintcache -.claude diff --git a/landing-page/AGENTS.md b/landing-page/AGENTS.md deleted file mode 100644 index 0797eee4..00000000 --- a/landing-page/AGENTS.md +++ /dev/null @@ -1,168 +0,0 @@ -# Landing Page Development Guidelines - -## Tech Stack - -- **Framework**: Astro 5 -- **Styling**: Tailwind CSS v4 -- **Package Manager**: Bun -- **Analytics**: PostHog - -## Scripts - -```bash -# Install dependencies -bun install - -# Development server (hot reload) -bun run dev - -# Build for production -bun run build - -# Preview production build -bun run preview -``` - -## Project Structure - -``` -landing-page/ -β”œβ”€β”€ src/ -β”‚ β”œβ”€β”€ components/ # Astro components -β”‚ β”œβ”€β”€ content/ # Content collections (blog) -β”‚ β”œβ”€β”€ layouts/ # Base layouts -β”‚ β”œβ”€β”€ pages/ # Routes (index.astro, blog/, etc.) -β”‚ └── styles/ # Global styles -β”œβ”€β”€ public/ # Static assets -β”œβ”€β”€ package.json -└── astro.config.mjs -``` - -## Design System - -### Color Palette - -Terminal-inspired dark theme with blue accents: - -| Purpose | Color | Tailwind Class | -| ---------------- | ----------- | ------------------------------------------ | -| Background | Black | `bg-black` | -| Card/Surface | Dark gray | `bg-neutral-900`, `bg-neutral-900/50` | -| Primary text | Light gray | `text-neutral-200`, `text-neutral-300` | -| Secondary text | Muted gray | `text-neutral-400`, `text-neutral-500` | -| Accent | Blue | `text-blue-400`, `border-blue-400` | -| Success | Green | `text-green-400`, `border-green-400` | -| Warning/Playbook | Amber | `text-amber-400`, `border-amber-400` | -| Borders | Subtle gray | `border-neutral-800`, `border-neutral-700` | - -### Typography - -- **Logo/Brand**: `font-logo` (custom) -- **Code/Terminal**: `font-mono` -- **Body**: Default sans-serif - -### Component Patterns - -#### Cards - -```html -
- -
-``` - -#### Terminal Blocks - -```html -
- $ command here -
-``` - -#### Timeline Items (with glow effect) - -```html -
-``` - -#### Glowing Borders - -```css -/* Blue glow */ -shadow-[0_0_8px_2px_rgba(96,165,250,0.6)] - -/* Green glow */ -shadow-[0_0_8px_2px_rgba(74,222,128,0.6)] - -/* Amber glow */ -shadow-[0_0_8px_2px_rgba(251,191,36,0.5)] -``` - -### Animation Patterns - -#### Intersection Observer for scroll-triggered animations - -```javascript -const observer = new IntersectionObserver( - (entries) => { - entries.forEach((entry) => { - if (entry.isIntersecting) { - // Trigger animation - observer.disconnect(); - } - }); - }, - { threshold: 0.3 }, -); -``` - -#### Sequential item reveal - -```javascript -items.forEach((item, index) => { - setTimeout(() => { - item.classList.remove("opacity-0", "translate-y-4"); - item.classList.add("opacity-100", "translate-y-0"); - }, index * 250); -}); -``` - -#### Typing animation with mistakes - -- Use array of `['type', char]`, `['delete', '']`, `['pause', ms]` actions -- Variable delay: `30 + Math.random() * 40` ms between keystrokes -- Faster deletion: `50ms` per character - -### Icon Style - -Terminal-inspired text icons using monospace font: - -| Concept | Icon | -| ------------ | ------- | -| Home/Sandbox | `[~]` | -| List/Explore | `ls` | -| Output/Audit | `>>>` | -| YAML/Config | `.yaml` | -| Prompt | `$` | -| Checkmark | `v` | - -## Key Pages - -| Route | File | Purpose | -| ------------- | -------------------------------- | ----------------------- | -| `/` | `src/pages/index.astro` | Main landing page | -| `/install.sh` | `src/pages/install.sh.ts` | Install script endpoint | -| `/blog/*` | `src/pages/blog/[...slug].astro` | Blog posts | - -## Development Notes - -- Use ` diff --git a/landing-page/src/content/config.ts b/landing-page/src/content/config.ts deleted file mode 100644 index f6ee2819..00000000 --- a/landing-page/src/content/config.ts +++ /dev/null @@ -1,22 +0,0 @@ -import { defineCollection, z } from "astro:content"; - -const blog = defineCollection({ - type: "content", - // Type-check frontmatter using a schema - schema: z.object({ - title: z.string(), - description: z.string(), - // Transform string to Date object - pubDate: z.coerce.date(), - updatedDate: z.coerce.date().optional(), - heroImage: z.string().optional(), - // Author info - author: z.string().optional(), - authorImage: z.string().optional(), - authorEmail: z.string().optional(), - authorPhone: z.string().optional(), - authorDiscord: z.string().optional(), - }), -}); - -export const collections = { blog }; diff --git a/landing-page/src/env.d.ts b/landing-page/src/env.d.ts deleted file mode 100644 index 0ae87889..00000000 --- a/landing-page/src/env.d.ts +++ /dev/null @@ -1,9 +0,0 @@ -interface ImportMetaEnv { - readonly PUBLIC_POSTHOG_API_KEY: string; - readonly PUBLIC_POSTHOG_HOST: string; - readonly PUBLIC_POSTHOG_DEFAULTS: string; -} - -interface ImportMeta { - readonly env: ImportMetaEnv; -} diff --git a/landing-page/src/layouts/BaseLayout.astro b/landing-page/src/layouts/BaseLayout.astro deleted file mode 100644 index 0cf82615..00000000 --- a/landing-page/src/layouts/BaseLayout.astro +++ /dev/null @@ -1,90 +0,0 @@ ---- -import Posthog from "../components/posthog.astro"; -import "../styles/global.css"; - -interface Props { - title: string; - description?: string; -} - -const { title, description = "fluid.sh" } = Astro.props; ---- - - - - - - - - - - {title} - - - - - - - - - diff --git a/landing-page/src/middleware/index.ts b/landing-page/src/middleware/index.ts deleted file mode 100644 index 2132d4c0..00000000 --- a/landing-page/src/middleware/index.ts +++ /dev/null @@ -1,18 +0,0 @@ -import type { MiddlewareNext, APIContext } from "astro"; -export const onRequest = async ( - { request }: APIContext, - next: MiddlewareNext, -) => { - const start = Date.now(); - const response = await next(); - console.log( - JSON.stringify({ - type: "http_access", - method: request.method, - path: new URL(request.url).pathname, - status: response.status, - duration_ms: Date.now() - start, - }), - ); - return response; -}; diff --git a/landing-page/src/pages/blog/[...slug].astro b/landing-page/src/pages/blog/[...slug].astro deleted file mode 100644 index d8c715d2..00000000 --- a/landing-page/src/pages/blog/[...slug].astro +++ /dev/null @@ -1,242 +0,0 @@ ---- -import { type CollectionEntry, getCollection } from "astro:content"; -import BaseLayout from "../../layouts/BaseLayout.astro"; - -export async function getStaticPaths() { - const posts = await getCollection("blog"); - return posts.map((post) => ({ - params: { slug: post.slug }, - props: post, - })); -} -type Props = CollectionEntry<"blog">; - -const post = Astro.props; -const { Content } = await post.render(); ---- - - -
-
-
- - $ cd .. - - -
-

- # {post.data.title} -

-
- -
- { - post.data.pubDate.toLocaleDateString("en-us", { - year: "numeric", - month: "long", - day: "numeric", - }) - } -
- - {/* Author info */} - {post.data.author && ( -
- {post.data.authorImage ? ( - {post.data.author} - ) : ( -
- {post.data.author.charAt(0)} -
- )} -
-
{post.data.author}
-
- {post.data.authorEmail && ( - - email - - )} - {post.data.authorPhone && ( - - text - - )} - {post.data.authorDiscord && ( - - discord - - )} -
-
-
- )} -
- -
- -
-
-
-
- - diff --git a/landing-page/src/pages/blog/index.astro b/landing-page/src/pages/blog/index.astro deleted file mode 100644 index 321fe125..00000000 --- a/landing-page/src/pages/blog/index.astro +++ /dev/null @@ -1,102 +0,0 @@ ---- -import BaseLayout from "../../layouts/BaseLayout.astro"; -import { getCollection } from "astro:content"; - -const posts = (await getCollection("blog")).sort( - (a, b) => b.data.pubDate.valueOf() - a.data.pubDate.valueOf(), -); ---- - - - - - - diff --git a/landing-page/src/pages/index.astro b/landing-page/src/pages/index.astro deleted file mode 100644 index df5574b0..00000000 --- a/landing-page/src/pages/index.astro +++ /dev/null @@ -1,995 +0,0 @@ ---- -import BaseLayout from "../layouts/BaseLayout.astro"; -import FeatureCard from "../components/FeatureCard.astro"; ---- - - -
-
-
-
-
-

- $ fluid.sh -

-
-
- Blog - GitHub - Discord -
-
-

- Claude Code for Debugging and Managing VMs. -

-

- Fluid enables SREs, Platform Engineers, and DevOps Engineers to fix - multiple issues at once, expand their triage and enable more uptime - across their production environment. -

-

Fluid works in four phases.

- - -

- Debug production VMs with Fluid's read-only mode. Let Fluid - investigate an issue by querying log files, reading systemctl changes, - or accessing config. -

- -

- Edit VM sandboxes with Fluid's edit mode. After Fluid has some context - on the system, it will create a sandbox by cloning the VM. Fluid will - then make changes, edit files, and iterate within the sandbox until - the issue is resolved. -

- -

- Once Fluid has fixed the issue on the sandbox, it will begin to create - an Ansible playbook to reconstruct the fix on production. -

- - -

- After the changes are made, open sandboxes will be deleted when the - Fluid CLI is closed or you can ask Fluid to delete them manually. -

-

- - Installation -

- -

- This will install the $ - terminal agent meant - to be installed on your local workstation. -

-

- If you are instead looking for the $ - agent api, to control - thousands of concurrent agents/sandboxes, navigate to the readme. -

- -
-
- - -
-
- -
-
-
- $ go install - github.com/aspectrr/fluid.sh/fluid/cmd/fluid@latest -
- -
-
- - - - -
- $ fluid -
-
-
- -
-
-
-
-
-

- Built for where you already work -

- - -
- - - - -
- -
-
-

Usage

- - -
- -
-
- - -
-
-
-
- $ - | -
-
- - -
-
-
-
- - -
-
- - -
-
-
-
- - -
-
- - -
-
-
-
- - -
-
- - -
-
-
-
- - -
-
- - -
-
-
-
- - -
-
- - -
-
-
-
- - -
-
- - -
-
-
-
- - -
-
- - -
-
-
-
- - -
-
- - -
-
-
-
- - -
-
- - -
-
-
-
-
- Done! Here's what I accomplished: -
- -
- Sandbox Created -
- ID: SBX-demo1234
-
- IP: 192.168.122.50
-
- -
- Apache HTTP Server -
- Installed and running
-
- - Custom page at /var/www/html/index.html -
-
- - Verified working with curl -
-
- -
- Ansible Playbook: httpd-setup -
- The playbook includes 4 tasks: -
-
1. Update apt cache
-
2. Install Apache
-
- 3. Create custom index.html -
-
- 4. Start and enable Apache service -
-
- -
- You can run this playbook on any Ubuntu server to reproduce - this setup. -
-
-
-
-
- - - - -
-
-
-
diff --git a/landing-page/src/pages/install.sh.ts b/landing-page/src/pages/install.sh.ts deleted file mode 100644 index ca3ddf58..00000000 --- a/landing-page/src/pages/install.sh.ts +++ /dev/null @@ -1,63 +0,0 @@ -import type { APIRoute } from "astro"; -import { PostHog } from "posthog-node"; - -export const GET: APIRoute = async ({ request }) => { - const client = new PostHog(import.meta.env.PUBLIC_POSTHOG_API_KEY, { - host: import.meta.env.PUBLIC_POSTHOG_HOST, - }); - - // Log the install event to PostHog - try { - const userAgent = request.headers.get("user-agent") || "unknown"; - const ip = - request.headers.get("x-forwarded-for") || - request.headers.get("x-real-ip") || - "unknown"; - - client.capture({ - distinctId: ip, - event: "install_script_fetched", - properties: { - $os: userAgent.includes("Darwin") - ? "macOS" - : userAgent.includes("Linux") - ? "Linux" - : "unknown", - $browser: "curl/wget", - user_agent: userAgent, - ip: ip, - }, - }); - - // Send queued events immediately - await client.shutdown(); - } catch (e) { - console.error("Failed to log to PostHog:", e); - } - - const script = `#!/bin/sh -set -e - -# Install Fluid -echo "Installing Fluid..." - -if ! command -v go &> /dev/null; then - echo "Error: 'go' is not installed. Please install Go first: https://go.dev/doc/install" - exit 1 -fi - -echo "Running: go install github.com/aspectrr/fluid.sh/fluid/cmd/fluid@latest" -go install github.com/aspectrr/fluid.sh/fluid/cmd/fluid@latest - -echo "" -echo "Fluid installed successfully!" -echo "Ensure that $(go env GOPATH)/bin is in your PATH." -echo "Run 'fluid --help' to get started." -`; - - return new Response(script, { - headers: { - "Content-Type": "text/plain", - }, - }); -}; diff --git a/landing-page/src/styles/global.css b/landing-page/src/styles/global.css deleted file mode 100644 index e33137dc..00000000 --- a/landing-page/src/styles/global.css +++ /dev/null @@ -1,29 +0,0 @@ -@import "tailwindcss"; - -@theme { - --font-logo: "NeueMachina", ui-sans-serif, system-ui; -} - -@font-face { - font-family: "NeueMachina"; - src: url("/fonts/NeueMachina-Regular.otf") format("opentype"); - font-weight: normal; - font-style: normal; - font-display: swap; -} - -@font-face { - font-family: "NeueMachina"; - src: url("/fonts/NeueMachina-Ultrabold.otf") format("opentype"); - font-weight: 800; - font-style: normal; - font-display: swap; -} - -@font-face { - font-family: "NeueMachina"; - src: url("/fonts/NeueMachina-Light.otf") format("opentype"); - font-weight: 300; - font-style: normal; - font-display: swap; -} diff --git a/landing-page/tsconfig.json b/landing-page/tsconfig.json deleted file mode 100644 index 8bf91d3b..00000000 --- a/landing-page/tsconfig.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "extends": "astro/tsconfigs/strict", - "include": [".astro/types.d.ts", "**/*"], - "exclude": ["dist"] -} diff --git a/landing-page/tsconfig.tsbuildinfo b/landing-page/tsconfig.tsbuildinfo deleted file mode 100644 index 6a7680d1..00000000 --- a/landing-page/tsconfig.tsbuildinfo +++ /dev/null @@ -1 +0,0 @@ -{"root":["./.astro/types.d.ts","./astro.config.mjs","./eslint.config.js","./src/env.d.ts","./src/content/config.ts","./src/middleware/index.ts","./src/pages/install.sh.ts"],"version":"5.9.3"} \ No newline at end of file diff --git a/lefthook.yaml b/lefthook.yaml index 0c606c14..3251e2c7 100644 --- a/lefthook.yaml +++ b/lefthook.yaml @@ -2,17 +2,24 @@ pre-commit: parallel: false commands: # Go - fluid CLI - fmt-fluid: + fmt-fluid-cli: glob: "*.go" - root: "fluid" - run: make fmt + root: "fluid-cli" + run: gofumpt -w . stage_fixed: true - # Go - fluid-remote API - fmt-fluid-remote: + # Go - fluid daemon + fmt-fluid-daemon: glob: "*.go" - root: "fluid-remote" - run: make fmt + root: "fluid-daemon" + run: go fmt ./... + stage_fixed: true + + # Go - API + fmt-api: + glob: "*.go" + root: "api" + run: go fmt ./... stage_fixed: true # Python SDK @@ -35,46 +42,55 @@ pre-commit: run: bun format stage_fixed: true - # Landing Page - fmt-landing-page: - glob: "*.{ts,tsx,js,jsx,json,css,md,astro}" - root: "landing-page" - run: bun format - stage_fixed: true - pre-push: parallel: true commands: # Go - fluid CLI - vet-fluid: + vet-fluid-cli: + glob: "*.go" + root: "fluid-cli" + run: make vet + + test-fluid-cli: + glob: "*.go" + root: "fluid-cli" + run: make test + + lint-fluid-cli: + glob: "*.go" + root: "fluid-cli" + run: make lint + + # Go - fluid daemon + vet-fluid-daemon: glob: "*.go" - root: "fluid" + root: "fluid-daemon" run: make vet - test-fluid: + test-fluid-daemon: glob: "*.go" - root: "fluid" + root: "fluid-daemon" run: make test - lint-fluid: + lint-fluid-daemon: glob: "*.go" - root: "fluid" + root: "fluid-daemon" run: make lint - # Go - fluid-remote API - vet-fluid-remote: + # Go - API + vet-api: glob: "*.go" - root: "fluid-remote" + root: "api" run: make vet - test-fluid-remote: + test-api: glob: "*.go" - root: "fluid-remote" + root: "api" run: make test - lint-fluid-remote: + lint-api: glob: "*.go" - root: "fluid-remote" + root: "api" run: make lint # Python SDK @@ -93,19 +109,3 @@ pre-push: glob: "*.ts" root: "web" run: bun typecheck - - # Landing Page - lint-landing-page: - glob: "*.{ts,tsx,js,jsx,css,astro}" - root: "landing-page" - run: bun lint - - typecheck-landing-page: - glob: "*.{ts,tsx,js,jsx,css,astro}" - root: "landing-page" - run: bun typecheck - - check-landing-page: - glob: "*.{ts,tsx,js,jsx,css,astro}" - root: "landing-page" - run: bun check diff --git a/mprocs.yaml b/mprocs.yaml index 18896662..62dc3ea2 100644 --- a/mprocs.yaml +++ b/mprocs.yaml @@ -6,15 +6,20 @@ procs: stop: "SIGTERM" api: - cwd: "./fluid" + cwd: "./api" autostart: false - shell: 'go build --tags libvirt -ldflags="-s -w" -o bin/fluid ./cmd/fluid && bin/fluid' + shell: "make build && bin/api" frontend: cwd: "./web" autostart: false shell: "bun dev" + stripe-webhooks: + cwd: "." + autostart: false + shell: "stripe listen --forward-to localhost:8080/v1/webhooks/stripe" + list-sandboxes: cwd: "." autostart: false diff --git a/proto/buf.gen.yaml b/proto/buf.gen.yaml new file mode 100644 index 00000000..29dac446 --- /dev/null +++ b/proto/buf.gen.yaml @@ -0,0 +1,10 @@ +version: v2 +plugins: + - remote: buf.build/protocolbuffers/go + out: gen/go + opt: + - paths=source_relative + - remote: buf.build/grpc/go + out: gen/go + opt: + - paths=source_relative diff --git a/proto/buf.yaml b/proto/buf.yaml new file mode 100644 index 00000000..ed2df56c --- /dev/null +++ b/proto/buf.yaml @@ -0,0 +1,12 @@ +version: v2 +modules: + - path: . + name: buf.build/fluid/api +deps: + - buf.build/googleapis/googleapis +lint: + use: + - STANDARD +breaking: + use: + - FILE diff --git a/proto/fluid/v1/daemon.proto b/proto/fluid/v1/daemon.proto new file mode 100644 index 00000000..bf1b3ff5 --- /dev/null +++ b/proto/fluid/v1/daemon.proto @@ -0,0 +1,116 @@ +syntax = "proto3"; + +package fluid.v1; + +option go_package = "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1;fluidv1"; + +import "fluid/v1/sandbox.proto"; +import "fluid/v1/source.proto"; +import "fluid/v1/host.proto"; + +// DaemonService is served by the fluid-daemon for direct CLI access. +// Unlike HostService (bidirectional streaming for control plane), +// this uses standard unary RPCs for CLI-to-daemon communication. +service DaemonService { + // Sandbox lifecycle + rpc CreateSandbox(CreateSandboxCommand) returns (SandboxCreated); + rpc GetSandbox(GetSandboxRequest) returns (SandboxInfo); + rpc ListSandboxes(ListSandboxesRequest) returns (ListSandboxesResponse); + rpc DestroySandbox(DestroySandboxCommand) returns (SandboxDestroyed); + rpc StartSandbox(StartSandboxCommand) returns (SandboxStarted); + rpc StopSandbox(StopSandboxCommand) returns (SandboxStopped); + + // Command execution + rpc RunCommand(RunCommandCommand) returns (CommandResult); + + // Snapshots + rpc CreateSnapshot(SnapshotCommand) returns (SnapshotCreated); + + // Source VM operations + rpc ListSourceVMs(ListSourceVMsCommand) returns (SourceVMsList); + rpc ValidateSourceVM(ValidateSourceVMCommand) returns (SourceVMValidation); + rpc PrepareSourceVM(PrepareSourceVMCommand) returns (SourceVMPrepared); + rpc RunSourceCommand(RunSourceCommandCommand) returns (SourceCommandResult); + rpc ReadSourceFile(ReadSourceFileCommand) returns (SourceFileResult); + + // Host info + rpc GetHostInfo(GetHostInfoRequest) returns (HostInfoResponse); + rpc Health(HealthRequest) returns (HealthResponse); + + // Host discovery + rpc DiscoverHosts(DiscoverHostsCommand) returns (DiscoverHostsResult); +} + +// GetSandboxRequest requests details for a single sandbox. +message GetSandboxRequest { + string sandbox_id = 1; +} + +// SandboxInfo contains full details about a sandbox. +message SandboxInfo { + string sandbox_id = 1; + string name = 2; + string state = 3; + string ip_address = 4; + string base_image = 5; + string agent_id = 6; + int32 vcpus = 7; + int32 memory_mb = 8; + string created_at = 9; +} + +// ListSandboxesRequest requests all sandboxes. +message ListSandboxesRequest {} + +// ListSandboxesResponse contains a list of sandboxes. +message ListSandboxesResponse { + repeated SandboxInfo sandboxes = 1; + int32 count = 2; +} + +// GetHostInfoRequest requests host information. +message GetHostInfoRequest {} + +// HostInfoResponse contains host resource and capability information. +message HostInfoResponse { + string host_id = 1; + string hostname = 2; + string version = 3; + int32 total_cpus = 4; + int64 total_memory_mb = 5; + int32 active_sandboxes = 6; + repeated string base_images = 7; +} + +// HealthRequest is an empty health check request. +message HealthRequest {} + +// HealthResponse indicates daemon health status. +message HealthResponse { + string status = 1; +} + +// DiscoverHostsCommand requests the daemon to parse SSH config and probe hosts. +message DiscoverHostsCommand { + // ssh_config_content is the raw SSH config text to parse. + string ssh_config_content = 1; +} + +// DiscoveredHost describes a single probed host. +message DiscoveredHost { + string name = 1; + string hostname = 2; + string user = 3; + int32 port = 4; + string identity_file = 5; + bool reachable = 6; + bool has_libvirt = 7; + bool has_proxmox = 8; + repeated string vms = 9; + string error = 10; +} + +// DiscoverHostsResult is the response containing all probed hosts. +message DiscoverHostsResult { + repeated DiscoveredHost hosts = 1; +} diff --git a/proto/fluid/v1/host.proto b/proto/fluid/v1/host.proto new file mode 100644 index 00000000..591c5592 --- /dev/null +++ b/proto/fluid/v1/host.proto @@ -0,0 +1,102 @@ +syntax = "proto3"; + +package fluid.v1; + +option go_package = "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1;fluidv1"; + +// HostRegistration is sent as the first message from a sandbox host when it +// connects to the control plane. +message HostRegistration { + // host_id is a persistent identifier for this host (generated on first run). + string host_id = 1; + + // hostname is the human-readable name of the host machine. + string hostname = 2; + + // version is the sandbox-host daemon version. + string version = 3; + + // Total resources available on this host. + int32 total_cpus = 10; + int64 total_memory_mb = 11; + int64 total_disk_mb = 12; + + // Available resources (after existing sandboxes). + int32 available_cpus = 13; + int64 available_memory_mb = 14; + int64 available_disk_mb = 15; + + // base_images lists the QCOW2 base images available on this host. + repeated string base_images = 20; + + // source_vms lists source VMs visible to this host via libvirt. + repeated SourceVMInfo source_vms = 21; + + // bridges lists network bridges available on this host. + repeated BridgeInfo bridges = 22; +} + +// BridgeInfo describes a network bridge available on a sandbox host. +message BridgeInfo { + string name = 1; + string subnet = 2; +} + +// SourceVMInfo describes a source VM visible to a sandbox host. +message SourceVMInfo { + string name = 1; + string state = 2; + string ip_address = 3; + bool prepared = 4; +} + +// RegistrationAck is the control plane's response to a host registration. +message RegistrationAck { + bool accepted = 1; + string reason = 2; + // assigned_host_id may differ from the one sent if the control plane + // overrides it (e.g., first registration). + string assigned_host_id = 3; +} + +// Heartbeat is sent periodically by the sandbox host (every 30s) to report +// health and resource availability. +message Heartbeat { + int32 active_sandboxes = 1; + int32 available_cpus = 2; + int64 available_memory_mb = 3; + int64 available_disk_mb = 4; + int32 source_vm_count = 5; +} + +// ResourceReport is a full resource snapshot sent on reconnection or on demand. +message ResourceReport { + int32 total_cpus = 1; + int64 total_memory_mb = 2; + int64 total_disk_mb = 3; + int32 available_cpus = 4; + int64 available_memory_mb = 5; + int64 available_disk_mb = 6; + + repeated string base_images = 10; + repeated SourceVMInfo source_vms = 11; + repeated BridgeInfo bridges = 12; + + // Per-sandbox status. + repeated SandboxStatus sandbox_statuses = 20; +} + +// SandboxStatus reports the current state of a sandbox on the host. +message SandboxStatus { + string sandbox_id = 1; + string state = 2; + string ip_address = 3; + int32 pid = 4; +} + +// ErrorReport reports an error that occurred on the host. +message ErrorReport { + string error = 1; + string sandbox_id = 2; + string context = 3; +} diff --git a/proto/fluid/v1/sandbox.proto b/proto/fluid/v1/sandbox.proto new file mode 100644 index 00000000..ccb4fdd3 --- /dev/null +++ b/proto/fluid/v1/sandbox.proto @@ -0,0 +1,153 @@ +syntax = "proto3"; + +package fluid.v1; + +option go_package = "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1;fluidv1"; + +// SnapshotMode controls whether to use a cached image or take a fresh snapshot. +enum SnapshotMode { + SNAPSHOT_MODE_CACHED = 0; + SNAPSHOT_MODE_FRESH = 1; +} + +// SourceHostConnection carries the credentials needed to connect to a source host. +message SourceHostConnection { + // type is "libvirt" or "proxmox". + string type = 1; + string ssh_host = 2; + int32 ssh_port = 3; + string ssh_user = 4; + string ssh_identity_file = 5; + string proxmox_host = 6; + string proxmox_token_id = 7; + string proxmox_secret = 8; + string proxmox_node = 9; + bool proxmox_verify_ssl = 10; +} + +// CreateSandboxCommand instructs a sandbox host to create a new microVM sandbox. +message CreateSandboxCommand { + // sandbox_id is assigned by the control plane. + string sandbox_id = 1; + + // base_image is the QCOW2 base image filename to use as backing file. + string base_image = 2; + + // name is the human-readable sandbox name. + string name = 3; + + // vcpus is the number of virtual CPUs to allocate. + int32 vcpus = 4; + + // memory_mb is the amount of memory in megabytes. + int32 memory_mb = 5; + + // ttl_seconds is the time-to-live before automatic cleanup. 0 = no TTL. + int32 ttl_seconds = 6; + + // agent_id identifies the agent that requested this sandbox. + string agent_id = 7; + + // network optionally overrides bridge selection. If empty, the host + // resolves the bridge from the source VM's network or default. + string network = 8; + + // source_vm is the source VM name used for network resolution. + string source_vm = 9; + + // ssh_public_key is injected into the sandbox for SSH access. + string ssh_public_key = 10; + + // snapshot_mode controls cached vs fresh snapshot behavior. + SnapshotMode snapshot_mode = 11; + + // source_host_connection carries credentials for the remote source host. + SourceHostConnection source_host_connection = 12; + + // live controls whether to clone from the VM's current live state (true) + // or use a cached image if available (false, default). + bool live = 13; +} + +// SandboxCreated is sent by the host after successfully creating a sandbox. +message SandboxCreated { + string sandbox_id = 1; + string name = 2; + string state = 3; + string ip_address = 4; + string mac_address = 5; + string bridge = 6; + int32 pid = 7; +} + +// DestroySandboxCommand instructs the host to destroy a sandbox. +message DestroySandboxCommand { + string sandbox_id = 1; +} + +// SandboxDestroyed confirms a sandbox has been destroyed. +message SandboxDestroyed { + string sandbox_id = 1; +} + +// StartSandboxCommand instructs the host to start a stopped sandbox. +message StartSandboxCommand { + string sandbox_id = 1; +} + +// SandboxStarted confirms a sandbox has been started. +message SandboxStarted { + string sandbox_id = 1; + string state = 2; + string ip_address = 3; +} + +// StopSandboxCommand instructs the host to stop a running sandbox. +message StopSandboxCommand { + string sandbox_id = 1; + bool force = 2; +} + +// SandboxStopped confirms a sandbox has been stopped. +message SandboxStopped { + string sandbox_id = 1; + string state = 2; +} + +// SandboxStateChanged reports any sandbox state transition. +message SandboxStateChanged { + string sandbox_id = 1; + string previous_state = 2; + string new_state = 3; + string reason = 4; +} + +// RunCommandCommand instructs the host to execute a command in a sandbox via SSH. +message RunCommandCommand { + string sandbox_id = 1; + string command = 2; + int32 timeout_seconds = 3; + map env = 4; +} + +// CommandResult returns the output of a command execution. +message CommandResult { + string sandbox_id = 1; + string stdout = 2; + string stderr = 3; + int32 exit_code = 4; + int64 duration_ms = 5; +} + +// SnapshotCommand instructs the host to snapshot a sandbox. +message SnapshotCommand { + string sandbox_id = 1; + string snapshot_name = 2; +} + +// SnapshotCreated confirms a snapshot was taken. +message SnapshotCreated { + string sandbox_id = 1; + string snapshot_id = 2; + string snapshot_name = 3; +} diff --git a/proto/fluid/v1/source.proto b/proto/fluid/v1/source.proto new file mode 100644 index 00000000..0f733e1e --- /dev/null +++ b/proto/fluid/v1/source.proto @@ -0,0 +1,88 @@ +syntax = "proto3"; + +package fluid.v1; + +option go_package = "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1;fluidv1"; + +// PrepareSourceVMCommand instructs the host to prepare a source VM for +// read-only access (install restricted shell, create fluid-readonly user, etc.). +message PrepareSourceVMCommand { + string source_vm = 1; + string ssh_user = 2; + string ssh_key_path = 3; +} + +// SourceVMPrepared reports the result of preparing a source VM. +message SourceVMPrepared { + string source_vm = 1; + string ip_address = 2; + bool prepared = 3; + bool user_created = 4; + bool shell_installed = 5; + bool ca_key_installed = 6; + bool sshd_configured = 7; + bool principals_created = 8; + bool sshd_restarted = 9; +} + +// RunSourceCommandCommand instructs the host to run a read-only command +// on a source VM via the fluid-readonly user. +message RunSourceCommandCommand { + string source_vm = 1; + string command = 2; + int32 timeout_seconds = 3; +} + +// SourceCommandResult returns the output of a source VM command. +message SourceCommandResult { + string source_vm = 1; + int32 exit_code = 2; + string stdout = 3; + string stderr = 4; +} + +// ReadSourceFileCommand instructs the host to read a file from a source VM. +message ReadSourceFileCommand { + string source_vm = 1; + string path = 2; +} + +// SourceFileResult returns the content of a file from a source VM. +message SourceFileResult { + string source_vm = 1; + string path = 2; + string content = 3; +} + +// ListSourceVMsCommand instructs the host to list available source VMs. +message ListSourceVMsCommand {} + +// SourceVMsList returns the list of source VMs on a host. +message SourceVMsList { + repeated SourceVMListEntry vms = 1; +} + +message SourceVMListEntry { + string name = 1; + string state = 2; + string ip_address = 3; + bool prepared = 4; +} + +// ValidateSourceVMCommand instructs the host to validate a source VM's +// readiness for read-only access. +message ValidateSourceVMCommand { + string source_vm = 1; +} + +// SourceVMValidation returns the validation result for a source VM. +message SourceVMValidation { + string source_vm = 1; + bool valid = 2; + string state = 3; + string mac_address = 4; + string ip_address = 5; + bool has_network = 6; + repeated string warnings = 7; + repeated string errors = 8; +} diff --git a/proto/fluid/v1/stream.proto b/proto/fluid/v1/stream.proto new file mode 100644 index 00000000..05f14c7d --- /dev/null +++ b/proto/fluid/v1/stream.proto @@ -0,0 +1,83 @@ +syntax = "proto3"; + +package fluid.v1; + +option go_package = "github.com/aspectrr/fluid.sh/proto/gen/go/fluid/v1;fluidv1"; + +import "fluid/v1/host.proto"; +import "fluid/v1/sandbox.proto"; +import "fluid/v1/source.proto"; +import "fluid/v1/daemon.proto"; + +// HostService is the bidirectional streaming service between sandbox hosts +// and the control plane. The sandbox host connects OUT to the control plane +// (NAT-friendly). Both sides exchange typed envelope messages with request_id +// for correlation. +service HostService { + // Connect opens a bidirectional stream. The host sends a HostRegistration + // as its first message and receives a RegistrationAck in response. + // After registration, both sides exchange messages asynchronously. + rpc Connect(stream HostMessage) returns (stream ControlMessage); +} + +// HostMessage is the envelope for all messages sent from sandbox host to control plane. +message HostMessage { + // request_id correlates responses to requests. + string request_id = 1; + + oneof payload { + // Registration and health + HostRegistration registration = 10; + Heartbeat heartbeat = 11; + ResourceReport resource_report = 12; + ErrorReport error_report = 13; + + // Sandbox lifecycle responses + SandboxCreated sandbox_created = 20; + SandboxDestroyed sandbox_destroyed = 21; + SandboxStateChanged state_changed = 22; + SandboxStarted sandbox_started = 23; + SandboxStopped sandbox_stopped = 24; + CommandResult command_result = 25; + SnapshotCreated snapshot_created = 26; + + // Source VM responses + SourceVMPrepared source_vm_prepared = 30; + SourceCommandResult source_command_result = 31; + SourceFileResult source_file_result = 32; + SourceVMsList source_vms_list = 33; + SourceVMValidation source_vm_validation = 34; + + // Host discovery responses + DiscoverHostsResult discover_hosts_result = 40; + } +} + +// ControlMessage is the envelope for all messages sent from control plane to sandbox host. +message ControlMessage { + // request_id correlates requests to responses. + string request_id = 1; + + oneof payload { + // Registration response + RegistrationAck registration_ack = 10; + + // Sandbox lifecycle commands + CreateSandboxCommand create_sandbox = 20; + DestroySandboxCommand destroy_sandbox = 21; + StartSandboxCommand start_sandbox = 22; + StopSandboxCommand stop_sandbox = 23; + RunCommandCommand run_command = 24; + SnapshotCommand create_snapshot = 25; + + // Source VM commands + PrepareSourceVMCommand prepare_source_vm = 30; + RunSourceCommandCommand run_source_command = 31; + ReadSourceFileCommand read_source_file = 32; + ListSourceVMsCommand list_source_vms = 33; + ValidateSourceVMCommand validate_source_vm = 34; + + // Host discovery commands + DiscoverHostsCommand discover_hosts = 40; + } +} diff --git a/proto/gen/go/fluid/v1/daemon.pb.go b/proto/gen/go/fluid/v1/daemon.pb.go new file mode 100644 index 00000000..0aee6ec5 --- /dev/null +++ b/proto/gen/go/fluid/v1/daemon.pb.go @@ -0,0 +1,881 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: fluid/v1/daemon.proto + +package fluidv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// GetSandboxRequest requests details for a single sandbox. +type GetSandboxRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetSandboxRequest) Reset() { + *x = GetSandboxRequest{} + mi := &file_fluid_v1_daemon_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetSandboxRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSandboxRequest) ProtoMessage() {} + +func (x *GetSandboxRequest) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_daemon_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSandboxRequest.ProtoReflect.Descriptor instead. +func (*GetSandboxRequest) Descriptor() ([]byte, []int) { + return file_fluid_v1_daemon_proto_rawDescGZIP(), []int{0} +} + +func (x *GetSandboxRequest) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +// SandboxInfo contains full details about a sandbox. +type SandboxInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` + IpAddress string `protobuf:"bytes,4,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + BaseImage string `protobuf:"bytes,5,opt,name=base_image,json=baseImage,proto3" json:"base_image,omitempty"` + AgentId string `protobuf:"bytes,6,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"` + Vcpus int32 `protobuf:"varint,7,opt,name=vcpus,proto3" json:"vcpus,omitempty"` + MemoryMb int32 `protobuf:"varint,8,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb,omitempty"` + CreatedAt string `protobuf:"bytes,9,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SandboxInfo) Reset() { + *x = SandboxInfo{} + mi := &file_fluid_v1_daemon_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SandboxInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxInfo) ProtoMessage() {} + +func (x *SandboxInfo) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_daemon_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxInfo.ProtoReflect.Descriptor instead. +func (*SandboxInfo) Descriptor() ([]byte, []int) { + return file_fluid_v1_daemon_proto_rawDescGZIP(), []int{1} +} + +func (x *SandboxInfo) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *SandboxInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SandboxInfo) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *SandboxInfo) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +func (x *SandboxInfo) GetBaseImage() string { + if x != nil { + return x.BaseImage + } + return "" +} + +func (x *SandboxInfo) GetAgentId() string { + if x != nil { + return x.AgentId + } + return "" +} + +func (x *SandboxInfo) GetVcpus() int32 { + if x != nil { + return x.Vcpus + } + return 0 +} + +func (x *SandboxInfo) GetMemoryMb() int32 { + if x != nil { + return x.MemoryMb + } + return 0 +} + +func (x *SandboxInfo) GetCreatedAt() string { + if x != nil { + return x.CreatedAt + } + return "" +} + +// ListSandboxesRequest requests all sandboxes. +type ListSandboxesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListSandboxesRequest) Reset() { + *x = ListSandboxesRequest{} + mi := &file_fluid_v1_daemon_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSandboxesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSandboxesRequest) ProtoMessage() {} + +func (x *ListSandboxesRequest) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_daemon_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSandboxesRequest.ProtoReflect.Descriptor instead. +func (*ListSandboxesRequest) Descriptor() ([]byte, []int) { + return file_fluid_v1_daemon_proto_rawDescGZIP(), []int{2} +} + +// ListSandboxesResponse contains a list of sandboxes. +type ListSandboxesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Sandboxes []*SandboxInfo `protobuf:"bytes,1,rep,name=sandboxes,proto3" json:"sandboxes,omitempty"` + Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListSandboxesResponse) Reset() { + *x = ListSandboxesResponse{} + mi := &file_fluid_v1_daemon_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSandboxesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSandboxesResponse) ProtoMessage() {} + +func (x *ListSandboxesResponse) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_daemon_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSandboxesResponse.ProtoReflect.Descriptor instead. +func (*ListSandboxesResponse) Descriptor() ([]byte, []int) { + return file_fluid_v1_daemon_proto_rawDescGZIP(), []int{3} +} + +func (x *ListSandboxesResponse) GetSandboxes() []*SandboxInfo { + if x != nil { + return x.Sandboxes + } + return nil +} + +func (x *ListSandboxesResponse) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +// GetHostInfoRequest requests host information. +type GetHostInfoRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetHostInfoRequest) Reset() { + *x = GetHostInfoRequest{} + mi := &file_fluid_v1_daemon_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetHostInfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetHostInfoRequest) ProtoMessage() {} + +func (x *GetHostInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_daemon_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetHostInfoRequest.ProtoReflect.Descriptor instead. +func (*GetHostInfoRequest) Descriptor() ([]byte, []int) { + return file_fluid_v1_daemon_proto_rawDescGZIP(), []int{4} +} + +// HostInfoResponse contains host resource and capability information. +type HostInfoResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"` + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"` + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + TotalCpus int32 `protobuf:"varint,4,opt,name=total_cpus,json=totalCpus,proto3" json:"total_cpus,omitempty"` + TotalMemoryMb int64 `protobuf:"varint,5,opt,name=total_memory_mb,json=totalMemoryMb,proto3" json:"total_memory_mb,omitempty"` + ActiveSandboxes int32 `protobuf:"varint,6,opt,name=active_sandboxes,json=activeSandboxes,proto3" json:"active_sandboxes,omitempty"` + BaseImages []string `protobuf:"bytes,7,rep,name=base_images,json=baseImages,proto3" json:"base_images,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HostInfoResponse) Reset() { + *x = HostInfoResponse{} + mi := &file_fluid_v1_daemon_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HostInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HostInfoResponse) ProtoMessage() {} + +func (x *HostInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_daemon_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HostInfoResponse.ProtoReflect.Descriptor instead. +func (*HostInfoResponse) Descriptor() ([]byte, []int) { + return file_fluid_v1_daemon_proto_rawDescGZIP(), []int{5} +} + +func (x *HostInfoResponse) GetHostId() string { + if x != nil { + return x.HostId + } + return "" +} + +func (x *HostInfoResponse) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *HostInfoResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *HostInfoResponse) GetTotalCpus() int32 { + if x != nil { + return x.TotalCpus + } + return 0 +} + +func (x *HostInfoResponse) GetTotalMemoryMb() int64 { + if x != nil { + return x.TotalMemoryMb + } + return 0 +} + +func (x *HostInfoResponse) GetActiveSandboxes() int32 { + if x != nil { + return x.ActiveSandboxes + } + return 0 +} + +func (x *HostInfoResponse) GetBaseImages() []string { + if x != nil { + return x.BaseImages + } + return nil +} + +// HealthRequest is an empty health check request. +type HealthRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthRequest) Reset() { + *x = HealthRequest{} + mi := &file_fluid_v1_daemon_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthRequest) ProtoMessage() {} + +func (x *HealthRequest) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_daemon_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthRequest.ProtoReflect.Descriptor instead. +func (*HealthRequest) Descriptor() ([]byte, []int) { + return file_fluid_v1_daemon_proto_rawDescGZIP(), []int{6} +} + +// HealthResponse indicates daemon health status. +type HealthResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthResponse) Reset() { + *x = HealthResponse{} + mi := &file_fluid_v1_daemon_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthResponse) ProtoMessage() {} + +func (x *HealthResponse) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_daemon_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead. +func (*HealthResponse) Descriptor() ([]byte, []int) { + return file_fluid_v1_daemon_proto_rawDescGZIP(), []int{7} +} + +func (x *HealthResponse) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +// DiscoverHostsCommand requests the daemon to parse SSH config and probe hosts. +type DiscoverHostsCommand struct { + state protoimpl.MessageState `protogen:"open.v1"` + // ssh_config_content is the raw SSH config text to parse. + SshConfigContent string `protobuf:"bytes,1,opt,name=ssh_config_content,json=sshConfigContent,proto3" json:"ssh_config_content,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DiscoverHostsCommand) Reset() { + *x = DiscoverHostsCommand{} + mi := &file_fluid_v1_daemon_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DiscoverHostsCommand) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DiscoverHostsCommand) ProtoMessage() {} + +func (x *DiscoverHostsCommand) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_daemon_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiscoverHostsCommand.ProtoReflect.Descriptor instead. +func (*DiscoverHostsCommand) Descriptor() ([]byte, []int) { + return file_fluid_v1_daemon_proto_rawDescGZIP(), []int{8} +} + +func (x *DiscoverHostsCommand) GetSshConfigContent() string { + if x != nil { + return x.SshConfigContent + } + return "" +} + +// DiscoveredHost describes a single probed host. +type DiscoveredHost struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"` + User string `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"` + Port int32 `protobuf:"varint,4,opt,name=port,proto3" json:"port,omitempty"` + IdentityFile string `protobuf:"bytes,5,opt,name=identity_file,json=identityFile,proto3" json:"identity_file,omitempty"` + Reachable bool `protobuf:"varint,6,opt,name=reachable,proto3" json:"reachable,omitempty"` + HasLibvirt bool `protobuf:"varint,7,opt,name=has_libvirt,json=hasLibvirt,proto3" json:"has_libvirt,omitempty"` + HasProxmox bool `protobuf:"varint,8,opt,name=has_proxmox,json=hasProxmox,proto3" json:"has_proxmox,omitempty"` + Vms []string `protobuf:"bytes,9,rep,name=vms,proto3" json:"vms,omitempty"` + Error string `protobuf:"bytes,10,opt,name=error,proto3" json:"error,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DiscoveredHost) Reset() { + *x = DiscoveredHost{} + mi := &file_fluid_v1_daemon_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DiscoveredHost) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DiscoveredHost) ProtoMessage() {} + +func (x *DiscoveredHost) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_daemon_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiscoveredHost.ProtoReflect.Descriptor instead. +func (*DiscoveredHost) Descriptor() ([]byte, []int) { + return file_fluid_v1_daemon_proto_rawDescGZIP(), []int{9} +} + +func (x *DiscoveredHost) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DiscoveredHost) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *DiscoveredHost) GetUser() string { + if x != nil { + return x.User + } + return "" +} + +func (x *DiscoveredHost) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *DiscoveredHost) GetIdentityFile() string { + if x != nil { + return x.IdentityFile + } + return "" +} + +func (x *DiscoveredHost) GetReachable() bool { + if x != nil { + return x.Reachable + } + return false +} + +func (x *DiscoveredHost) GetHasLibvirt() bool { + if x != nil { + return x.HasLibvirt + } + return false +} + +func (x *DiscoveredHost) GetHasProxmox() bool { + if x != nil { + return x.HasProxmox + } + return false +} + +func (x *DiscoveredHost) GetVms() []string { + if x != nil { + return x.Vms + } + return nil +} + +func (x *DiscoveredHost) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +// DiscoverHostsResult is the response containing all probed hosts. +type DiscoverHostsResult struct { + state protoimpl.MessageState `protogen:"open.v1"` + Hosts []*DiscoveredHost `protobuf:"bytes,1,rep,name=hosts,proto3" json:"hosts,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DiscoverHostsResult) Reset() { + *x = DiscoverHostsResult{} + mi := &file_fluid_v1_daemon_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DiscoverHostsResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DiscoverHostsResult) ProtoMessage() {} + +func (x *DiscoverHostsResult) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_daemon_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiscoverHostsResult.ProtoReflect.Descriptor instead. +func (*DiscoverHostsResult) Descriptor() ([]byte, []int) { + return file_fluid_v1_daemon_proto_rawDescGZIP(), []int{10} +} + +func (x *DiscoverHostsResult) GetHosts() []*DiscoveredHost { + if x != nil { + return x.Hosts + } + return nil +} + +var File_fluid_v1_daemon_proto protoreflect.FileDescriptor + +const file_fluid_v1_daemon_proto_rawDesc = "" + + "\n" + + "\x15fluid/v1/daemon.proto\x12\bfluid.v1\x1a\x16fluid/v1/sandbox.proto\x1a\x15fluid/v1/source.proto\x1a\x13fluid/v1/host.proto\"2\n" + + "\x11GetSandboxRequest\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\"\x81\x02\n" + + "\vSandboxInfo\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12\x14\n" + + "\x05state\x18\x03 \x01(\tR\x05state\x12\x1d\n" + + "\n" + + "ip_address\x18\x04 \x01(\tR\tipAddress\x12\x1d\n" + + "\n" + + "base_image\x18\x05 \x01(\tR\tbaseImage\x12\x19\n" + + "\bagent_id\x18\x06 \x01(\tR\aagentId\x12\x14\n" + + "\x05vcpus\x18\a \x01(\x05R\x05vcpus\x12\x1b\n" + + "\tmemory_mb\x18\b \x01(\x05R\bmemoryMb\x12\x1d\n" + + "\n" + + "created_at\x18\t \x01(\tR\tcreatedAt\"\x16\n" + + "\x14ListSandboxesRequest\"b\n" + + "\x15ListSandboxesResponse\x123\n" + + "\tsandboxes\x18\x01 \x03(\v2\x15.fluid.v1.SandboxInfoR\tsandboxes\x12\x14\n" + + "\x05count\x18\x02 \x01(\x05R\x05count\"\x14\n" + + "\x12GetHostInfoRequest\"\xf4\x01\n" + + "\x10HostInfoResponse\x12\x17\n" + + "\ahost_id\x18\x01 \x01(\tR\x06hostId\x12\x1a\n" + + "\bhostname\x18\x02 \x01(\tR\bhostname\x12\x18\n" + + "\aversion\x18\x03 \x01(\tR\aversion\x12\x1d\n" + + "\n" + + "total_cpus\x18\x04 \x01(\x05R\ttotalCpus\x12&\n" + + "\x0ftotal_memory_mb\x18\x05 \x01(\x03R\rtotalMemoryMb\x12)\n" + + "\x10active_sandboxes\x18\x06 \x01(\x05R\x0factiveSandboxes\x12\x1f\n" + + "\vbase_images\x18\a \x03(\tR\n" + + "baseImages\"\x0f\n" + + "\rHealthRequest\"(\n" + + "\x0eHealthResponse\x12\x16\n" + + "\x06status\x18\x01 \x01(\tR\x06status\"D\n" + + "\x14DiscoverHostsCommand\x12,\n" + + "\x12ssh_config_content\x18\x01 \x01(\tR\x10sshConfigContent\"\x95\x02\n" + + "\x0eDiscoveredHost\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1a\n" + + "\bhostname\x18\x02 \x01(\tR\bhostname\x12\x12\n" + + "\x04user\x18\x03 \x01(\tR\x04user\x12\x12\n" + + "\x04port\x18\x04 \x01(\x05R\x04port\x12#\n" + + "\ridentity_file\x18\x05 \x01(\tR\fidentityFile\x12\x1c\n" + + "\treachable\x18\x06 \x01(\bR\treachable\x12\x1f\n" + + "\vhas_libvirt\x18\a \x01(\bR\n" + + "hasLibvirt\x12\x1f\n" + + "\vhas_proxmox\x18\b \x01(\bR\n" + + "hasProxmox\x12\x10\n" + + "\x03vms\x18\t \x03(\tR\x03vms\x12\x14\n" + + "\x05error\x18\n" + + " \x01(\tR\x05error\"E\n" + + "\x13DiscoverHostsResult\x12.\n" + + "\x05hosts\x18\x01 \x03(\v2\x18.fluid.v1.DiscoveredHostR\x05hosts2\xc4\t\n" + + "\rDaemonService\x12I\n" + + "\rCreateSandbox\x12\x1e.fluid.v1.CreateSandboxCommand\x1a\x18.fluid.v1.SandboxCreated\x12@\n" + + "\n" + + "GetSandbox\x12\x1b.fluid.v1.GetSandboxRequest\x1a\x15.fluid.v1.SandboxInfo\x12P\n" + + "\rListSandboxes\x12\x1e.fluid.v1.ListSandboxesRequest\x1a\x1f.fluid.v1.ListSandboxesResponse\x12M\n" + + "\x0eDestroySandbox\x12\x1f.fluid.v1.DestroySandboxCommand\x1a\x1a.fluid.v1.SandboxDestroyed\x12G\n" + + "\fStartSandbox\x12\x1d.fluid.v1.StartSandboxCommand\x1a\x18.fluid.v1.SandboxStarted\x12E\n" + + "\vStopSandbox\x12\x1c.fluid.v1.StopSandboxCommand\x1a\x18.fluid.v1.SandboxStopped\x12B\n" + + "\n" + + "RunCommand\x12\x1b.fluid.v1.RunCommandCommand\x1a\x17.fluid.v1.CommandResult\x12F\n" + + "\x0eCreateSnapshot\x12\x19.fluid.v1.SnapshotCommand\x1a\x19.fluid.v1.SnapshotCreated\x12H\n" + + "\rListSourceVMs\x12\x1e.fluid.v1.ListSourceVMsCommand\x1a\x17.fluid.v1.SourceVMsList\x12S\n" + + "\x10ValidateSourceVM\x12!.fluid.v1.ValidateSourceVMCommand\x1a\x1c.fluid.v1.SourceVMValidation\x12O\n" + + "\x0fPrepareSourceVM\x12 .fluid.v1.PrepareSourceVMCommand\x1a\x1a.fluid.v1.SourceVMPrepared\x12T\n" + + "\x10RunSourceCommand\x12!.fluid.v1.RunSourceCommandCommand\x1a\x1d.fluid.v1.SourceCommandResult\x12M\n" + + "\x0eReadSourceFile\x12\x1f.fluid.v1.ReadSourceFileCommand\x1a\x1a.fluid.v1.SourceFileResult\x12G\n" + + "\vGetHostInfo\x12\x1c.fluid.v1.GetHostInfoRequest\x1a\x1a.fluid.v1.HostInfoResponse\x12;\n" + + "\x06Health\x12\x17.fluid.v1.HealthRequest\x1a\x18.fluid.v1.HealthResponse\x12N\n" + + "\rDiscoverHosts\x12\x1e.fluid.v1.DiscoverHostsCommand\x1a\x1d.fluid.v1.DiscoverHostsResultB fluid.v1.SandboxInfo + 9, // 1: fluid.v1.DiscoverHostsResult.hosts:type_name -> fluid.v1.DiscoveredHost + 11, // 2: fluid.v1.DaemonService.CreateSandbox:input_type -> fluid.v1.CreateSandboxCommand + 0, // 3: fluid.v1.DaemonService.GetSandbox:input_type -> fluid.v1.GetSandboxRequest + 2, // 4: fluid.v1.DaemonService.ListSandboxes:input_type -> fluid.v1.ListSandboxesRequest + 12, // 5: fluid.v1.DaemonService.DestroySandbox:input_type -> fluid.v1.DestroySandboxCommand + 13, // 6: fluid.v1.DaemonService.StartSandbox:input_type -> fluid.v1.StartSandboxCommand + 14, // 7: fluid.v1.DaemonService.StopSandbox:input_type -> fluid.v1.StopSandboxCommand + 15, // 8: fluid.v1.DaemonService.RunCommand:input_type -> fluid.v1.RunCommandCommand + 16, // 9: fluid.v1.DaemonService.CreateSnapshot:input_type -> fluid.v1.SnapshotCommand + 17, // 10: fluid.v1.DaemonService.ListSourceVMs:input_type -> fluid.v1.ListSourceVMsCommand + 18, // 11: fluid.v1.DaemonService.ValidateSourceVM:input_type -> fluid.v1.ValidateSourceVMCommand + 19, // 12: fluid.v1.DaemonService.PrepareSourceVM:input_type -> fluid.v1.PrepareSourceVMCommand + 20, // 13: fluid.v1.DaemonService.RunSourceCommand:input_type -> fluid.v1.RunSourceCommandCommand + 21, // 14: fluid.v1.DaemonService.ReadSourceFile:input_type -> fluid.v1.ReadSourceFileCommand + 4, // 15: fluid.v1.DaemonService.GetHostInfo:input_type -> fluid.v1.GetHostInfoRequest + 6, // 16: fluid.v1.DaemonService.Health:input_type -> fluid.v1.HealthRequest + 8, // 17: fluid.v1.DaemonService.DiscoverHosts:input_type -> fluid.v1.DiscoverHostsCommand + 22, // 18: fluid.v1.DaemonService.CreateSandbox:output_type -> fluid.v1.SandboxCreated + 1, // 19: fluid.v1.DaemonService.GetSandbox:output_type -> fluid.v1.SandboxInfo + 3, // 20: fluid.v1.DaemonService.ListSandboxes:output_type -> fluid.v1.ListSandboxesResponse + 23, // 21: fluid.v1.DaemonService.DestroySandbox:output_type -> fluid.v1.SandboxDestroyed + 24, // 22: fluid.v1.DaemonService.StartSandbox:output_type -> fluid.v1.SandboxStarted + 25, // 23: fluid.v1.DaemonService.StopSandbox:output_type -> fluid.v1.SandboxStopped + 26, // 24: fluid.v1.DaemonService.RunCommand:output_type -> fluid.v1.CommandResult + 27, // 25: fluid.v1.DaemonService.CreateSnapshot:output_type -> fluid.v1.SnapshotCreated + 28, // 26: fluid.v1.DaemonService.ListSourceVMs:output_type -> fluid.v1.SourceVMsList + 29, // 27: fluid.v1.DaemonService.ValidateSourceVM:output_type -> fluid.v1.SourceVMValidation + 30, // 28: fluid.v1.DaemonService.PrepareSourceVM:output_type -> fluid.v1.SourceVMPrepared + 31, // 29: fluid.v1.DaemonService.RunSourceCommand:output_type -> fluid.v1.SourceCommandResult + 32, // 30: fluid.v1.DaemonService.ReadSourceFile:output_type -> fluid.v1.SourceFileResult + 5, // 31: fluid.v1.DaemonService.GetHostInfo:output_type -> fluid.v1.HostInfoResponse + 7, // 32: fluid.v1.DaemonService.Health:output_type -> fluid.v1.HealthResponse + 10, // 33: fluid.v1.DaemonService.DiscoverHosts:output_type -> fluid.v1.DiscoverHostsResult + 18, // [18:34] is the sub-list for method output_type + 2, // [2:18] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_fluid_v1_daemon_proto_init() } +func file_fluid_v1_daemon_proto_init() { + if File_fluid_v1_daemon_proto != nil { + return + } + file_fluid_v1_sandbox_proto_init() + file_fluid_v1_source_proto_init() + file_fluid_v1_host_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_fluid_v1_daemon_proto_rawDesc), len(file_fluid_v1_daemon_proto_rawDesc)), + NumEnums: 0, + NumMessages: 11, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_fluid_v1_daemon_proto_goTypes, + DependencyIndexes: file_fluid_v1_daemon_proto_depIdxs, + MessageInfos: file_fluid_v1_daemon_proto_msgTypes, + }.Build() + File_fluid_v1_daemon_proto = out.File + file_fluid_v1_daemon_proto_goTypes = nil + file_fluid_v1_daemon_proto_depIdxs = nil +} diff --git a/proto/gen/go/fluid/v1/daemon_grpc.pb.go b/proto/gen/go/fluid/v1/daemon_grpc.pb.go new file mode 100644 index 00000000..afe0a960 --- /dev/null +++ b/proto/gen/go/fluid/v1/daemon_grpc.pb.go @@ -0,0 +1,711 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.6.1 +// - protoc (unknown) +// source: fluid/v1/daemon.proto + +package fluidv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + DaemonService_CreateSandbox_FullMethodName = "/fluid.v1.DaemonService/CreateSandbox" + DaemonService_GetSandbox_FullMethodName = "/fluid.v1.DaemonService/GetSandbox" + DaemonService_ListSandboxes_FullMethodName = "/fluid.v1.DaemonService/ListSandboxes" + DaemonService_DestroySandbox_FullMethodName = "/fluid.v1.DaemonService/DestroySandbox" + DaemonService_StartSandbox_FullMethodName = "/fluid.v1.DaemonService/StartSandbox" + DaemonService_StopSandbox_FullMethodName = "/fluid.v1.DaemonService/StopSandbox" + DaemonService_RunCommand_FullMethodName = "/fluid.v1.DaemonService/RunCommand" + DaemonService_CreateSnapshot_FullMethodName = "/fluid.v1.DaemonService/CreateSnapshot" + DaemonService_ListSourceVMs_FullMethodName = "/fluid.v1.DaemonService/ListSourceVMs" + DaemonService_ValidateSourceVM_FullMethodName = "/fluid.v1.DaemonService/ValidateSourceVM" + DaemonService_PrepareSourceVM_FullMethodName = "/fluid.v1.DaemonService/PrepareSourceVM" + DaemonService_RunSourceCommand_FullMethodName = "/fluid.v1.DaemonService/RunSourceCommand" + DaemonService_ReadSourceFile_FullMethodName = "/fluid.v1.DaemonService/ReadSourceFile" + DaemonService_GetHostInfo_FullMethodName = "/fluid.v1.DaemonService/GetHostInfo" + DaemonService_Health_FullMethodName = "/fluid.v1.DaemonService/Health" + DaemonService_DiscoverHosts_FullMethodName = "/fluid.v1.DaemonService/DiscoverHosts" +) + +// DaemonServiceClient is the client API for DaemonService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// DaemonService is served by the fluid-daemon for direct CLI access. +// Unlike HostService (bidirectional streaming for control plane), +// this uses standard unary RPCs for CLI-to-daemon communication. +type DaemonServiceClient interface { + // Sandbox lifecycle + CreateSandbox(ctx context.Context, in *CreateSandboxCommand, opts ...grpc.CallOption) (*SandboxCreated, error) + GetSandbox(ctx context.Context, in *GetSandboxRequest, opts ...grpc.CallOption) (*SandboxInfo, error) + ListSandboxes(ctx context.Context, in *ListSandboxesRequest, opts ...grpc.CallOption) (*ListSandboxesResponse, error) + DestroySandbox(ctx context.Context, in *DestroySandboxCommand, opts ...grpc.CallOption) (*SandboxDestroyed, error) + StartSandbox(ctx context.Context, in *StartSandboxCommand, opts ...grpc.CallOption) (*SandboxStarted, error) + StopSandbox(ctx context.Context, in *StopSandboxCommand, opts ...grpc.CallOption) (*SandboxStopped, error) + // Command execution + RunCommand(ctx context.Context, in *RunCommandCommand, opts ...grpc.CallOption) (*CommandResult, error) + // Snapshots + CreateSnapshot(ctx context.Context, in *SnapshotCommand, opts ...grpc.CallOption) (*SnapshotCreated, error) + // Source VM operations + ListSourceVMs(ctx context.Context, in *ListSourceVMsCommand, opts ...grpc.CallOption) (*SourceVMsList, error) + ValidateSourceVM(ctx context.Context, in *ValidateSourceVMCommand, opts ...grpc.CallOption) (*SourceVMValidation, error) + PrepareSourceVM(ctx context.Context, in *PrepareSourceVMCommand, opts ...grpc.CallOption) (*SourceVMPrepared, error) + RunSourceCommand(ctx context.Context, in *RunSourceCommandCommand, opts ...grpc.CallOption) (*SourceCommandResult, error) + ReadSourceFile(ctx context.Context, in *ReadSourceFileCommand, opts ...grpc.CallOption) (*SourceFileResult, error) + // Host info + GetHostInfo(ctx context.Context, in *GetHostInfoRequest, opts ...grpc.CallOption) (*HostInfoResponse, error) + Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) + // Host discovery + DiscoverHosts(ctx context.Context, in *DiscoverHostsCommand, opts ...grpc.CallOption) (*DiscoverHostsResult, error) +} + +type daemonServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewDaemonServiceClient(cc grpc.ClientConnInterface) DaemonServiceClient { + return &daemonServiceClient{cc} +} + +func (c *daemonServiceClient) CreateSandbox(ctx context.Context, in *CreateSandboxCommand, opts ...grpc.CallOption) (*SandboxCreated, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SandboxCreated) + err := c.cc.Invoke(ctx, DaemonService_CreateSandbox_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) GetSandbox(ctx context.Context, in *GetSandboxRequest, opts ...grpc.CallOption) (*SandboxInfo, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SandboxInfo) + err := c.cc.Invoke(ctx, DaemonService_GetSandbox_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) ListSandboxes(ctx context.Context, in *ListSandboxesRequest, opts ...grpc.CallOption) (*ListSandboxesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListSandboxesResponse) + err := c.cc.Invoke(ctx, DaemonService_ListSandboxes_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) DestroySandbox(ctx context.Context, in *DestroySandboxCommand, opts ...grpc.CallOption) (*SandboxDestroyed, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SandboxDestroyed) + err := c.cc.Invoke(ctx, DaemonService_DestroySandbox_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) StartSandbox(ctx context.Context, in *StartSandboxCommand, opts ...grpc.CallOption) (*SandboxStarted, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SandboxStarted) + err := c.cc.Invoke(ctx, DaemonService_StartSandbox_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) StopSandbox(ctx context.Context, in *StopSandboxCommand, opts ...grpc.CallOption) (*SandboxStopped, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SandboxStopped) + err := c.cc.Invoke(ctx, DaemonService_StopSandbox_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) RunCommand(ctx context.Context, in *RunCommandCommand, opts ...grpc.CallOption) (*CommandResult, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CommandResult) + err := c.cc.Invoke(ctx, DaemonService_RunCommand_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) CreateSnapshot(ctx context.Context, in *SnapshotCommand, opts ...grpc.CallOption) (*SnapshotCreated, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SnapshotCreated) + err := c.cc.Invoke(ctx, DaemonService_CreateSnapshot_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) ListSourceVMs(ctx context.Context, in *ListSourceVMsCommand, opts ...grpc.CallOption) (*SourceVMsList, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SourceVMsList) + err := c.cc.Invoke(ctx, DaemonService_ListSourceVMs_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) ValidateSourceVM(ctx context.Context, in *ValidateSourceVMCommand, opts ...grpc.CallOption) (*SourceVMValidation, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SourceVMValidation) + err := c.cc.Invoke(ctx, DaemonService_ValidateSourceVM_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) PrepareSourceVM(ctx context.Context, in *PrepareSourceVMCommand, opts ...grpc.CallOption) (*SourceVMPrepared, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SourceVMPrepared) + err := c.cc.Invoke(ctx, DaemonService_PrepareSourceVM_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) RunSourceCommand(ctx context.Context, in *RunSourceCommandCommand, opts ...grpc.CallOption) (*SourceCommandResult, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SourceCommandResult) + err := c.cc.Invoke(ctx, DaemonService_RunSourceCommand_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) ReadSourceFile(ctx context.Context, in *ReadSourceFileCommand, opts ...grpc.CallOption) (*SourceFileResult, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SourceFileResult) + err := c.cc.Invoke(ctx, DaemonService_ReadSourceFile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) GetHostInfo(ctx context.Context, in *GetHostInfoRequest, opts ...grpc.CallOption) (*HostInfoResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(HostInfoResponse) + err := c.cc.Invoke(ctx, DaemonService_GetHostInfo_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(HealthResponse) + err := c.cc.Invoke(ctx, DaemonService_Health_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) DiscoverHosts(ctx context.Context, in *DiscoverHostsCommand, opts ...grpc.CallOption) (*DiscoverHostsResult, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(DiscoverHostsResult) + err := c.cc.Invoke(ctx, DaemonService_DiscoverHosts_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DaemonServiceServer is the server API for DaemonService service. +// All implementations must embed UnimplementedDaemonServiceServer +// for forward compatibility. +// +// DaemonService is served by the fluid-daemon for direct CLI access. +// Unlike HostService (bidirectional streaming for control plane), +// this uses standard unary RPCs for CLI-to-daemon communication. +type DaemonServiceServer interface { + // Sandbox lifecycle + CreateSandbox(context.Context, *CreateSandboxCommand) (*SandboxCreated, error) + GetSandbox(context.Context, *GetSandboxRequest) (*SandboxInfo, error) + ListSandboxes(context.Context, *ListSandboxesRequest) (*ListSandboxesResponse, error) + DestroySandbox(context.Context, *DestroySandboxCommand) (*SandboxDestroyed, error) + StartSandbox(context.Context, *StartSandboxCommand) (*SandboxStarted, error) + StopSandbox(context.Context, *StopSandboxCommand) (*SandboxStopped, error) + // Command execution + RunCommand(context.Context, *RunCommandCommand) (*CommandResult, error) + // Snapshots + CreateSnapshot(context.Context, *SnapshotCommand) (*SnapshotCreated, error) + // Source VM operations + ListSourceVMs(context.Context, *ListSourceVMsCommand) (*SourceVMsList, error) + ValidateSourceVM(context.Context, *ValidateSourceVMCommand) (*SourceVMValidation, error) + PrepareSourceVM(context.Context, *PrepareSourceVMCommand) (*SourceVMPrepared, error) + RunSourceCommand(context.Context, *RunSourceCommandCommand) (*SourceCommandResult, error) + ReadSourceFile(context.Context, *ReadSourceFileCommand) (*SourceFileResult, error) + // Host info + GetHostInfo(context.Context, *GetHostInfoRequest) (*HostInfoResponse, error) + Health(context.Context, *HealthRequest) (*HealthResponse, error) + // Host discovery + DiscoverHosts(context.Context, *DiscoverHostsCommand) (*DiscoverHostsResult, error) + mustEmbedUnimplementedDaemonServiceServer() +} + +// UnimplementedDaemonServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedDaemonServiceServer struct{} + +func (UnimplementedDaemonServiceServer) CreateSandbox(context.Context, *CreateSandboxCommand) (*SandboxCreated, error) { + return nil, status.Error(codes.Unimplemented, "method CreateSandbox not implemented") +} +func (UnimplementedDaemonServiceServer) GetSandbox(context.Context, *GetSandboxRequest) (*SandboxInfo, error) { + return nil, status.Error(codes.Unimplemented, "method GetSandbox not implemented") +} +func (UnimplementedDaemonServiceServer) ListSandboxes(context.Context, *ListSandboxesRequest) (*ListSandboxesResponse, error) { + return nil, status.Error(codes.Unimplemented, "method ListSandboxes not implemented") +} +func (UnimplementedDaemonServiceServer) DestroySandbox(context.Context, *DestroySandboxCommand) (*SandboxDestroyed, error) { + return nil, status.Error(codes.Unimplemented, "method DestroySandbox not implemented") +} +func (UnimplementedDaemonServiceServer) StartSandbox(context.Context, *StartSandboxCommand) (*SandboxStarted, error) { + return nil, status.Error(codes.Unimplemented, "method StartSandbox not implemented") +} +func (UnimplementedDaemonServiceServer) StopSandbox(context.Context, *StopSandboxCommand) (*SandboxStopped, error) { + return nil, status.Error(codes.Unimplemented, "method StopSandbox not implemented") +} +func (UnimplementedDaemonServiceServer) RunCommand(context.Context, *RunCommandCommand) (*CommandResult, error) { + return nil, status.Error(codes.Unimplemented, "method RunCommand not implemented") +} +func (UnimplementedDaemonServiceServer) CreateSnapshot(context.Context, *SnapshotCommand) (*SnapshotCreated, error) { + return nil, status.Error(codes.Unimplemented, "method CreateSnapshot not implemented") +} +func (UnimplementedDaemonServiceServer) ListSourceVMs(context.Context, *ListSourceVMsCommand) (*SourceVMsList, error) { + return nil, status.Error(codes.Unimplemented, "method ListSourceVMs not implemented") +} +func (UnimplementedDaemonServiceServer) ValidateSourceVM(context.Context, *ValidateSourceVMCommand) (*SourceVMValidation, error) { + return nil, status.Error(codes.Unimplemented, "method ValidateSourceVM not implemented") +} +func (UnimplementedDaemonServiceServer) PrepareSourceVM(context.Context, *PrepareSourceVMCommand) (*SourceVMPrepared, error) { + return nil, status.Error(codes.Unimplemented, "method PrepareSourceVM not implemented") +} +func (UnimplementedDaemonServiceServer) RunSourceCommand(context.Context, *RunSourceCommandCommand) (*SourceCommandResult, error) { + return nil, status.Error(codes.Unimplemented, "method RunSourceCommand not implemented") +} +func (UnimplementedDaemonServiceServer) ReadSourceFile(context.Context, *ReadSourceFileCommand) (*SourceFileResult, error) { + return nil, status.Error(codes.Unimplemented, "method ReadSourceFile not implemented") +} +func (UnimplementedDaemonServiceServer) GetHostInfo(context.Context, *GetHostInfoRequest) (*HostInfoResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetHostInfo not implemented") +} +func (UnimplementedDaemonServiceServer) Health(context.Context, *HealthRequest) (*HealthResponse, error) { + return nil, status.Error(codes.Unimplemented, "method Health not implemented") +} +func (UnimplementedDaemonServiceServer) DiscoverHosts(context.Context, *DiscoverHostsCommand) (*DiscoverHostsResult, error) { + return nil, status.Error(codes.Unimplemented, "method DiscoverHosts not implemented") +} +func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {} +func (UnimplementedDaemonServiceServer) testEmbeddedByValue() {} + +// UnsafeDaemonServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DaemonServiceServer will +// result in compilation errors. +type UnsafeDaemonServiceServer interface { + mustEmbedUnimplementedDaemonServiceServer() +} + +func RegisterDaemonServiceServer(s grpc.ServiceRegistrar, srv DaemonServiceServer) { + // If the following call panics, it indicates UnimplementedDaemonServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&DaemonService_ServiceDesc, srv) +} + +func _DaemonService_CreateSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSandboxCommand) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).CreateSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_CreateSandbox_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).CreateSandbox(ctx, req.(*CreateSandboxCommand)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_GetSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).GetSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_GetSandbox_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).GetSandbox(ctx, req.(*GetSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_ListSandboxes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSandboxesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).ListSandboxes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_ListSandboxes_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).ListSandboxes(ctx, req.(*ListSandboxesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_DestroySandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DestroySandboxCommand) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).DestroySandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_DestroySandbox_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).DestroySandbox(ctx, req.(*DestroySandboxCommand)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_StartSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartSandboxCommand) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).StartSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_StartSandbox_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).StartSandbox(ctx, req.(*StartSandboxCommand)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_StopSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopSandboxCommand) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).StopSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_StopSandbox_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).StopSandbox(ctx, req.(*StopSandboxCommand)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_RunCommand_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunCommandCommand) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).RunCommand(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_RunCommand_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).RunCommand(ctx, req.(*RunCommandCommand)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SnapshotCommand) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).CreateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_CreateSnapshot_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).CreateSnapshot(ctx, req.(*SnapshotCommand)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_ListSourceVMs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSourceVMsCommand) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).ListSourceVMs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_ListSourceVMs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).ListSourceVMs(ctx, req.(*ListSourceVMsCommand)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_ValidateSourceVM_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateSourceVMCommand) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).ValidateSourceVM(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_ValidateSourceVM_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).ValidateSourceVM(ctx, req.(*ValidateSourceVMCommand)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_PrepareSourceVM_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PrepareSourceVMCommand) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).PrepareSourceVM(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_PrepareSourceVM_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).PrepareSourceVM(ctx, req.(*PrepareSourceVMCommand)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_RunSourceCommand_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunSourceCommandCommand) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).RunSourceCommand(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_RunSourceCommand_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).RunSourceCommand(ctx, req.(*RunSourceCommandCommand)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_ReadSourceFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadSourceFileCommand) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).ReadSourceFile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_ReadSourceFile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).ReadSourceFile(ctx, req.(*ReadSourceFileCommand)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_GetHostInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetHostInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).GetHostInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_GetHostInfo_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).GetHostInfo(ctx, req.(*GetHostInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_Health_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).Health(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_Health_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).Health(ctx, req.(*HealthRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_DiscoverHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DiscoverHostsCommand) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).DiscoverHosts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DaemonService_DiscoverHosts_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).DiscoverHosts(ctx, req.(*DiscoverHostsCommand)) + } + return interceptor(ctx, in, info, handler) +} + +// DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var DaemonService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "fluid.v1.DaemonService", + HandlerType: (*DaemonServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSandbox", + Handler: _DaemonService_CreateSandbox_Handler, + }, + { + MethodName: "GetSandbox", + Handler: _DaemonService_GetSandbox_Handler, + }, + { + MethodName: "ListSandboxes", + Handler: _DaemonService_ListSandboxes_Handler, + }, + { + MethodName: "DestroySandbox", + Handler: _DaemonService_DestroySandbox_Handler, + }, + { + MethodName: "StartSandbox", + Handler: _DaemonService_StartSandbox_Handler, + }, + { + MethodName: "StopSandbox", + Handler: _DaemonService_StopSandbox_Handler, + }, + { + MethodName: "RunCommand", + Handler: _DaemonService_RunCommand_Handler, + }, + { + MethodName: "CreateSnapshot", + Handler: _DaemonService_CreateSnapshot_Handler, + }, + { + MethodName: "ListSourceVMs", + Handler: _DaemonService_ListSourceVMs_Handler, + }, + { + MethodName: "ValidateSourceVM", + Handler: _DaemonService_ValidateSourceVM_Handler, + }, + { + MethodName: "PrepareSourceVM", + Handler: _DaemonService_PrepareSourceVM_Handler, + }, + { + MethodName: "RunSourceCommand", + Handler: _DaemonService_RunSourceCommand_Handler, + }, + { + MethodName: "ReadSourceFile", + Handler: _DaemonService_ReadSourceFile_Handler, + }, + { + MethodName: "GetHostInfo", + Handler: _DaemonService_GetHostInfo_Handler, + }, + { + MethodName: "Health", + Handler: _DaemonService_Health_Handler, + }, + { + MethodName: "DiscoverHosts", + Handler: _DaemonService_DiscoverHosts_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "fluid/v1/daemon.proto", +} diff --git a/proto/gen/go/fluid/v1/host.pb.go b/proto/gen/go/fluid/v1/host.pb.go new file mode 100644 index 00000000..31c33731 --- /dev/null +++ b/proto/gen/go/fluid/v1/host.pb.go @@ -0,0 +1,805 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: fluid/v1/host.proto + +package fluidv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// HostRegistration is sent as the first message from a sandbox host when it +// connects to the control plane. +type HostRegistration struct { + state protoimpl.MessageState `protogen:"open.v1"` + // host_id is a persistent identifier for this host (generated on first run). + HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"` + // hostname is the human-readable name of the host machine. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"` + // version is the sandbox-host daemon version. + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + // Total resources available on this host. + TotalCpus int32 `protobuf:"varint,10,opt,name=total_cpus,json=totalCpus,proto3" json:"total_cpus,omitempty"` + TotalMemoryMb int64 `protobuf:"varint,11,opt,name=total_memory_mb,json=totalMemoryMb,proto3" json:"total_memory_mb,omitempty"` + TotalDiskMb int64 `protobuf:"varint,12,opt,name=total_disk_mb,json=totalDiskMb,proto3" json:"total_disk_mb,omitempty"` + // Available resources (after existing sandboxes). + AvailableCpus int32 `protobuf:"varint,13,opt,name=available_cpus,json=availableCpus,proto3" json:"available_cpus,omitempty"` + AvailableMemoryMb int64 `protobuf:"varint,14,opt,name=available_memory_mb,json=availableMemoryMb,proto3" json:"available_memory_mb,omitempty"` + AvailableDiskMb int64 `protobuf:"varint,15,opt,name=available_disk_mb,json=availableDiskMb,proto3" json:"available_disk_mb,omitempty"` + // base_images lists the QCOW2 base images available on this host. + BaseImages []string `protobuf:"bytes,20,rep,name=base_images,json=baseImages,proto3" json:"base_images,omitempty"` + // source_vms lists source VMs visible to this host via libvirt. + SourceVms []*SourceVMInfo `protobuf:"bytes,21,rep,name=source_vms,json=sourceVms,proto3" json:"source_vms,omitempty"` + // bridges lists network bridges available on this host. + Bridges []*BridgeInfo `protobuf:"bytes,22,rep,name=bridges,proto3" json:"bridges,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HostRegistration) Reset() { + *x = HostRegistration{} + mi := &file_fluid_v1_host_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HostRegistration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HostRegistration) ProtoMessage() {} + +func (x *HostRegistration) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_host_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HostRegistration.ProtoReflect.Descriptor instead. +func (*HostRegistration) Descriptor() ([]byte, []int) { + return file_fluid_v1_host_proto_rawDescGZIP(), []int{0} +} + +func (x *HostRegistration) GetHostId() string { + if x != nil { + return x.HostId + } + return "" +} + +func (x *HostRegistration) GetHostname() string { + if x != nil { + return x.Hostname + } + return "" +} + +func (x *HostRegistration) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *HostRegistration) GetTotalCpus() int32 { + if x != nil { + return x.TotalCpus + } + return 0 +} + +func (x *HostRegistration) GetTotalMemoryMb() int64 { + if x != nil { + return x.TotalMemoryMb + } + return 0 +} + +func (x *HostRegistration) GetTotalDiskMb() int64 { + if x != nil { + return x.TotalDiskMb + } + return 0 +} + +func (x *HostRegistration) GetAvailableCpus() int32 { + if x != nil { + return x.AvailableCpus + } + return 0 +} + +func (x *HostRegistration) GetAvailableMemoryMb() int64 { + if x != nil { + return x.AvailableMemoryMb + } + return 0 +} + +func (x *HostRegistration) GetAvailableDiskMb() int64 { + if x != nil { + return x.AvailableDiskMb + } + return 0 +} + +func (x *HostRegistration) GetBaseImages() []string { + if x != nil { + return x.BaseImages + } + return nil +} + +func (x *HostRegistration) GetSourceVms() []*SourceVMInfo { + if x != nil { + return x.SourceVms + } + return nil +} + +func (x *HostRegistration) GetBridges() []*BridgeInfo { + if x != nil { + return x.Bridges + } + return nil +} + +// BridgeInfo describes a network bridge available on a sandbox host. +type BridgeInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Subnet string `protobuf:"bytes,2,opt,name=subnet,proto3" json:"subnet,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BridgeInfo) Reset() { + *x = BridgeInfo{} + mi := &file_fluid_v1_host_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BridgeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BridgeInfo) ProtoMessage() {} + +func (x *BridgeInfo) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_host_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BridgeInfo.ProtoReflect.Descriptor instead. +func (*BridgeInfo) Descriptor() ([]byte, []int) { + return file_fluid_v1_host_proto_rawDescGZIP(), []int{1} +} + +func (x *BridgeInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *BridgeInfo) GetSubnet() string { + if x != nil { + return x.Subnet + } + return "" +} + +// SourceVMInfo describes a source VM visible to a sandbox host. +type SourceVMInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + Prepared bool `protobuf:"varint,4,opt,name=prepared,proto3" json:"prepared,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SourceVMInfo) Reset() { + *x = SourceVMInfo{} + mi := &file_fluid_v1_host_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SourceVMInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceVMInfo) ProtoMessage() {} + +func (x *SourceVMInfo) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_host_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceVMInfo.ProtoReflect.Descriptor instead. +func (*SourceVMInfo) Descriptor() ([]byte, []int) { + return file_fluid_v1_host_proto_rawDescGZIP(), []int{2} +} + +func (x *SourceVMInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SourceVMInfo) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *SourceVMInfo) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +func (x *SourceVMInfo) GetPrepared() bool { + if x != nil { + return x.Prepared + } + return false +} + +// RegistrationAck is the control plane's response to a host registration. +type RegistrationAck struct { + state protoimpl.MessageState `protogen:"open.v1"` + Accepted bool `protobuf:"varint,1,opt,name=accepted,proto3" json:"accepted,omitempty"` + Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` + // assigned_host_id may differ from the one sent if the control plane + // overrides it (e.g., first registration). + AssignedHostId string `protobuf:"bytes,3,opt,name=assigned_host_id,json=assignedHostId,proto3" json:"assigned_host_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RegistrationAck) Reset() { + *x = RegistrationAck{} + mi := &file_fluid_v1_host_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RegistrationAck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegistrationAck) ProtoMessage() {} + +func (x *RegistrationAck) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_host_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegistrationAck.ProtoReflect.Descriptor instead. +func (*RegistrationAck) Descriptor() ([]byte, []int) { + return file_fluid_v1_host_proto_rawDescGZIP(), []int{3} +} + +func (x *RegistrationAck) GetAccepted() bool { + if x != nil { + return x.Accepted + } + return false +} + +func (x *RegistrationAck) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *RegistrationAck) GetAssignedHostId() string { + if x != nil { + return x.AssignedHostId + } + return "" +} + +// Heartbeat is sent periodically by the sandbox host (every 30s) to report +// health and resource availability. +type Heartbeat struct { + state protoimpl.MessageState `protogen:"open.v1"` + ActiveSandboxes int32 `protobuf:"varint,1,opt,name=active_sandboxes,json=activeSandboxes,proto3" json:"active_sandboxes,omitempty"` + AvailableCpus int32 `protobuf:"varint,2,opt,name=available_cpus,json=availableCpus,proto3" json:"available_cpus,omitempty"` + AvailableMemoryMb int64 `protobuf:"varint,3,opt,name=available_memory_mb,json=availableMemoryMb,proto3" json:"available_memory_mb,omitempty"` + AvailableDiskMb int64 `protobuf:"varint,4,opt,name=available_disk_mb,json=availableDiskMb,proto3" json:"available_disk_mb,omitempty"` + SourceVmCount int32 `protobuf:"varint,5,opt,name=source_vm_count,json=sourceVmCount,proto3" json:"source_vm_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Heartbeat) Reset() { + *x = Heartbeat{} + mi := &file_fluid_v1_host_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Heartbeat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Heartbeat) ProtoMessage() {} + +func (x *Heartbeat) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_host_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Heartbeat.ProtoReflect.Descriptor instead. +func (*Heartbeat) Descriptor() ([]byte, []int) { + return file_fluid_v1_host_proto_rawDescGZIP(), []int{4} +} + +func (x *Heartbeat) GetActiveSandboxes() int32 { + if x != nil { + return x.ActiveSandboxes + } + return 0 +} + +func (x *Heartbeat) GetAvailableCpus() int32 { + if x != nil { + return x.AvailableCpus + } + return 0 +} + +func (x *Heartbeat) GetAvailableMemoryMb() int64 { + if x != nil { + return x.AvailableMemoryMb + } + return 0 +} + +func (x *Heartbeat) GetAvailableDiskMb() int64 { + if x != nil { + return x.AvailableDiskMb + } + return 0 +} + +func (x *Heartbeat) GetSourceVmCount() int32 { + if x != nil { + return x.SourceVmCount + } + return 0 +} + +// ResourceReport is a full resource snapshot sent on reconnection or on demand. +type ResourceReport struct { + state protoimpl.MessageState `protogen:"open.v1"` + TotalCpus int32 `protobuf:"varint,1,opt,name=total_cpus,json=totalCpus,proto3" json:"total_cpus,omitempty"` + TotalMemoryMb int64 `protobuf:"varint,2,opt,name=total_memory_mb,json=totalMemoryMb,proto3" json:"total_memory_mb,omitempty"` + TotalDiskMb int64 `protobuf:"varint,3,opt,name=total_disk_mb,json=totalDiskMb,proto3" json:"total_disk_mb,omitempty"` + AvailableCpus int32 `protobuf:"varint,4,opt,name=available_cpus,json=availableCpus,proto3" json:"available_cpus,omitempty"` + AvailableMemoryMb int64 `protobuf:"varint,5,opt,name=available_memory_mb,json=availableMemoryMb,proto3" json:"available_memory_mb,omitempty"` + AvailableDiskMb int64 `protobuf:"varint,6,opt,name=available_disk_mb,json=availableDiskMb,proto3" json:"available_disk_mb,omitempty"` + BaseImages []string `protobuf:"bytes,10,rep,name=base_images,json=baseImages,proto3" json:"base_images,omitempty"` + SourceVms []*SourceVMInfo `protobuf:"bytes,11,rep,name=source_vms,json=sourceVms,proto3" json:"source_vms,omitempty"` + Bridges []*BridgeInfo `protobuf:"bytes,12,rep,name=bridges,proto3" json:"bridges,omitempty"` + // Per-sandbox status. + SandboxStatuses []*SandboxStatus `protobuf:"bytes,20,rep,name=sandbox_statuses,json=sandboxStatuses,proto3" json:"sandbox_statuses,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ResourceReport) Reset() { + *x = ResourceReport{} + mi := &file_fluid_v1_host_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ResourceReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceReport) ProtoMessage() {} + +func (x *ResourceReport) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_host_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceReport.ProtoReflect.Descriptor instead. +func (*ResourceReport) Descriptor() ([]byte, []int) { + return file_fluid_v1_host_proto_rawDescGZIP(), []int{5} +} + +func (x *ResourceReport) GetTotalCpus() int32 { + if x != nil { + return x.TotalCpus + } + return 0 +} + +func (x *ResourceReport) GetTotalMemoryMb() int64 { + if x != nil { + return x.TotalMemoryMb + } + return 0 +} + +func (x *ResourceReport) GetTotalDiskMb() int64 { + if x != nil { + return x.TotalDiskMb + } + return 0 +} + +func (x *ResourceReport) GetAvailableCpus() int32 { + if x != nil { + return x.AvailableCpus + } + return 0 +} + +func (x *ResourceReport) GetAvailableMemoryMb() int64 { + if x != nil { + return x.AvailableMemoryMb + } + return 0 +} + +func (x *ResourceReport) GetAvailableDiskMb() int64 { + if x != nil { + return x.AvailableDiskMb + } + return 0 +} + +func (x *ResourceReport) GetBaseImages() []string { + if x != nil { + return x.BaseImages + } + return nil +} + +func (x *ResourceReport) GetSourceVms() []*SourceVMInfo { + if x != nil { + return x.SourceVms + } + return nil +} + +func (x *ResourceReport) GetBridges() []*BridgeInfo { + if x != nil { + return x.Bridges + } + return nil +} + +func (x *ResourceReport) GetSandboxStatuses() []*SandboxStatus { + if x != nil { + return x.SandboxStatuses + } + return nil +} + +// SandboxStatus reports the current state of a sandbox on the host. +type SandboxStatus struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + Pid int32 `protobuf:"varint,4,opt,name=pid,proto3" json:"pid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SandboxStatus) Reset() { + *x = SandboxStatus{} + mi := &file_fluid_v1_host_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SandboxStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxStatus) ProtoMessage() {} + +func (x *SandboxStatus) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_host_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxStatus.ProtoReflect.Descriptor instead. +func (*SandboxStatus) Descriptor() ([]byte, []int) { + return file_fluid_v1_host_proto_rawDescGZIP(), []int{6} +} + +func (x *SandboxStatus) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *SandboxStatus) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *SandboxStatus) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +func (x *SandboxStatus) GetPid() int32 { + if x != nil { + return x.Pid + } + return 0 +} + +// ErrorReport reports an error that occurred on the host. +type ErrorReport struct { + state protoimpl.MessageState `protogen:"open.v1"` + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + SandboxId string `protobuf:"bytes,2,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Context string `protobuf:"bytes,3,opt,name=context,proto3" json:"context,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ErrorReport) Reset() { + *x = ErrorReport{} + mi := &file_fluid_v1_host_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ErrorReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorReport) ProtoMessage() {} + +func (x *ErrorReport) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_host_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorReport.ProtoReflect.Descriptor instead. +func (*ErrorReport) Descriptor() ([]byte, []int) { + return file_fluid_v1_host_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorReport) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *ErrorReport) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *ErrorReport) GetContext() string { + if x != nil { + return x.Context + } + return "" +} + +var File_fluid_v1_host_proto protoreflect.FileDescriptor + +const file_fluid_v1_host_proto_rawDesc = "" + + "\n" + + "\x13fluid/v1/host.proto\x12\bfluid.v1\"\xd7\x03\n" + + "\x10HostRegistration\x12\x17\n" + + "\ahost_id\x18\x01 \x01(\tR\x06hostId\x12\x1a\n" + + "\bhostname\x18\x02 \x01(\tR\bhostname\x12\x18\n" + + "\aversion\x18\x03 \x01(\tR\aversion\x12\x1d\n" + + "\n" + + "total_cpus\x18\n" + + " \x01(\x05R\ttotalCpus\x12&\n" + + "\x0ftotal_memory_mb\x18\v \x01(\x03R\rtotalMemoryMb\x12\"\n" + + "\rtotal_disk_mb\x18\f \x01(\x03R\vtotalDiskMb\x12%\n" + + "\x0eavailable_cpus\x18\r \x01(\x05R\ravailableCpus\x12.\n" + + "\x13available_memory_mb\x18\x0e \x01(\x03R\x11availableMemoryMb\x12*\n" + + "\x11available_disk_mb\x18\x0f \x01(\x03R\x0favailableDiskMb\x12\x1f\n" + + "\vbase_images\x18\x14 \x03(\tR\n" + + "baseImages\x125\n" + + "\n" + + "source_vms\x18\x15 \x03(\v2\x16.fluid.v1.SourceVMInfoR\tsourceVms\x12.\n" + + "\abridges\x18\x16 \x03(\v2\x14.fluid.v1.BridgeInfoR\abridges\"8\n" + + "\n" + + "BridgeInfo\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06subnet\x18\x02 \x01(\tR\x06subnet\"s\n" + + "\fSourceVMInfo\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x14\n" + + "\x05state\x18\x02 \x01(\tR\x05state\x12\x1d\n" + + "\n" + + "ip_address\x18\x03 \x01(\tR\tipAddress\x12\x1a\n" + + "\bprepared\x18\x04 \x01(\bR\bprepared\"o\n" + + "\x0fRegistrationAck\x12\x1a\n" + + "\baccepted\x18\x01 \x01(\bR\baccepted\x12\x16\n" + + "\x06reason\x18\x02 \x01(\tR\x06reason\x12(\n" + + "\x10assigned_host_id\x18\x03 \x01(\tR\x0eassignedHostId\"\xe1\x01\n" + + "\tHeartbeat\x12)\n" + + "\x10active_sandboxes\x18\x01 \x01(\x05R\x0factiveSandboxes\x12%\n" + + "\x0eavailable_cpus\x18\x02 \x01(\x05R\ravailableCpus\x12.\n" + + "\x13available_memory_mb\x18\x03 \x01(\x03R\x11availableMemoryMb\x12*\n" + + "\x11available_disk_mb\x18\x04 \x01(\x03R\x0favailableDiskMb\x12&\n" + + "\x0fsource_vm_count\x18\x05 \x01(\x05R\rsourceVmCount\"\xca\x03\n" + + "\x0eResourceReport\x12\x1d\n" + + "\n" + + "total_cpus\x18\x01 \x01(\x05R\ttotalCpus\x12&\n" + + "\x0ftotal_memory_mb\x18\x02 \x01(\x03R\rtotalMemoryMb\x12\"\n" + + "\rtotal_disk_mb\x18\x03 \x01(\x03R\vtotalDiskMb\x12%\n" + + "\x0eavailable_cpus\x18\x04 \x01(\x05R\ravailableCpus\x12.\n" + + "\x13available_memory_mb\x18\x05 \x01(\x03R\x11availableMemoryMb\x12*\n" + + "\x11available_disk_mb\x18\x06 \x01(\x03R\x0favailableDiskMb\x12\x1f\n" + + "\vbase_images\x18\n" + + " \x03(\tR\n" + + "baseImages\x125\n" + + "\n" + + "source_vms\x18\v \x03(\v2\x16.fluid.v1.SourceVMInfoR\tsourceVms\x12.\n" + + "\abridges\x18\f \x03(\v2\x14.fluid.v1.BridgeInfoR\abridges\x12B\n" + + "\x10sandbox_statuses\x18\x14 \x03(\v2\x17.fluid.v1.SandboxStatusR\x0fsandboxStatuses\"u\n" + + "\rSandboxStatus\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x14\n" + + "\x05state\x18\x02 \x01(\tR\x05state\x12\x1d\n" + + "\n" + + "ip_address\x18\x03 \x01(\tR\tipAddress\x12\x10\n" + + "\x03pid\x18\x04 \x01(\x05R\x03pid\"\\\n" + + "\vErrorReport\x12\x14\n" + + "\x05error\x18\x01 \x01(\tR\x05error\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x02 \x01(\tR\tsandboxId\x12\x18\n" + + "\acontext\x18\x03 \x01(\tR\acontextB fluid.v1.SourceVMInfo + 1, // 1: fluid.v1.HostRegistration.bridges:type_name -> fluid.v1.BridgeInfo + 2, // 2: fluid.v1.ResourceReport.source_vms:type_name -> fluid.v1.SourceVMInfo + 1, // 3: fluid.v1.ResourceReport.bridges:type_name -> fluid.v1.BridgeInfo + 6, // 4: fluid.v1.ResourceReport.sandbox_statuses:type_name -> fluid.v1.SandboxStatus + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_fluid_v1_host_proto_init() } +func file_fluid_v1_host_proto_init() { + if File_fluid_v1_host_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_fluid_v1_host_proto_rawDesc), len(file_fluid_v1_host_proto_rawDesc)), + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_fluid_v1_host_proto_goTypes, + DependencyIndexes: file_fluid_v1_host_proto_depIdxs, + MessageInfos: file_fluid_v1_host_proto_msgTypes, + }.Build() + File_fluid_v1_host_proto = out.File + file_fluid_v1_host_proto_goTypes = nil + file_fluid_v1_host_proto_depIdxs = nil +} diff --git a/proto/gen/go/fluid/v1/sandbox.pb.go b/proto/gen/go/fluid/v1/sandbox.pb.go new file mode 100644 index 00000000..c78f1a5e --- /dev/null +++ b/proto/gen/go/fluid/v1/sandbox.pb.go @@ -0,0 +1,1241 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: fluid/v1/sandbox.proto + +package fluidv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// SnapshotMode controls whether to use a cached image or take a fresh snapshot. +type SnapshotMode int32 + +const ( + SnapshotMode_SNAPSHOT_MODE_CACHED SnapshotMode = 0 + SnapshotMode_SNAPSHOT_MODE_FRESH SnapshotMode = 1 +) + +// Enum value maps for SnapshotMode. +var ( + SnapshotMode_name = map[int32]string{ + 0: "SNAPSHOT_MODE_CACHED", + 1: "SNAPSHOT_MODE_FRESH", + } + SnapshotMode_value = map[string]int32{ + "SNAPSHOT_MODE_CACHED": 0, + "SNAPSHOT_MODE_FRESH": 1, + } +) + +func (x SnapshotMode) Enum() *SnapshotMode { + p := new(SnapshotMode) + *p = x + return p +} + +func (x SnapshotMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SnapshotMode) Descriptor() protoreflect.EnumDescriptor { + return file_fluid_v1_sandbox_proto_enumTypes[0].Descriptor() +} + +func (SnapshotMode) Type() protoreflect.EnumType { + return &file_fluid_v1_sandbox_proto_enumTypes[0] +} + +func (x SnapshotMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SnapshotMode.Descriptor instead. +func (SnapshotMode) EnumDescriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{0} +} + +// SourceHostConnection carries the credentials needed to connect to a source host. +type SourceHostConnection struct { + state protoimpl.MessageState `protogen:"open.v1"` + // type is "libvirt" or "proxmox". + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + SshHost string `protobuf:"bytes,2,opt,name=ssh_host,json=sshHost,proto3" json:"ssh_host,omitempty"` + SshPort int32 `protobuf:"varint,3,opt,name=ssh_port,json=sshPort,proto3" json:"ssh_port,omitempty"` + SshUser string `protobuf:"bytes,4,opt,name=ssh_user,json=sshUser,proto3" json:"ssh_user,omitempty"` + SshIdentityFile string `protobuf:"bytes,5,opt,name=ssh_identity_file,json=sshIdentityFile,proto3" json:"ssh_identity_file,omitempty"` + ProxmoxHost string `protobuf:"bytes,6,opt,name=proxmox_host,json=proxmoxHost,proto3" json:"proxmox_host,omitempty"` + ProxmoxTokenId string `protobuf:"bytes,7,opt,name=proxmox_token_id,json=proxmoxTokenId,proto3" json:"proxmox_token_id,omitempty"` + ProxmoxSecret string `protobuf:"bytes,8,opt,name=proxmox_secret,json=proxmoxSecret,proto3" json:"proxmox_secret,omitempty"` + ProxmoxNode string `protobuf:"bytes,9,opt,name=proxmox_node,json=proxmoxNode,proto3" json:"proxmox_node,omitempty"` + ProxmoxVerifySsl bool `protobuf:"varint,10,opt,name=proxmox_verify_ssl,json=proxmoxVerifySsl,proto3" json:"proxmox_verify_ssl,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SourceHostConnection) Reset() { + *x = SourceHostConnection{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SourceHostConnection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceHostConnection) ProtoMessage() {} + +func (x *SourceHostConnection) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceHostConnection.ProtoReflect.Descriptor instead. +func (*SourceHostConnection) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{0} +} + +func (x *SourceHostConnection) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *SourceHostConnection) GetSshHost() string { + if x != nil { + return x.SshHost + } + return "" +} + +func (x *SourceHostConnection) GetSshPort() int32 { + if x != nil { + return x.SshPort + } + return 0 +} + +func (x *SourceHostConnection) GetSshUser() string { + if x != nil { + return x.SshUser + } + return "" +} + +func (x *SourceHostConnection) GetSshIdentityFile() string { + if x != nil { + return x.SshIdentityFile + } + return "" +} + +func (x *SourceHostConnection) GetProxmoxHost() string { + if x != nil { + return x.ProxmoxHost + } + return "" +} + +func (x *SourceHostConnection) GetProxmoxTokenId() string { + if x != nil { + return x.ProxmoxTokenId + } + return "" +} + +func (x *SourceHostConnection) GetProxmoxSecret() string { + if x != nil { + return x.ProxmoxSecret + } + return "" +} + +func (x *SourceHostConnection) GetProxmoxNode() string { + if x != nil { + return x.ProxmoxNode + } + return "" +} + +func (x *SourceHostConnection) GetProxmoxVerifySsl() bool { + if x != nil { + return x.ProxmoxVerifySsl + } + return false +} + +// CreateSandboxCommand instructs a sandbox host to create a new microVM sandbox. +type CreateSandboxCommand struct { + state protoimpl.MessageState `protogen:"open.v1"` + // sandbox_id is assigned by the control plane. + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + // base_image is the QCOW2 base image filename to use as backing file. + BaseImage string `protobuf:"bytes,2,opt,name=base_image,json=baseImage,proto3" json:"base_image,omitempty"` + // name is the human-readable sandbox name. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // vcpus is the number of virtual CPUs to allocate. + Vcpus int32 `protobuf:"varint,4,opt,name=vcpus,proto3" json:"vcpus,omitempty"` + // memory_mb is the amount of memory in megabytes. + MemoryMb int32 `protobuf:"varint,5,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb,omitempty"` + // ttl_seconds is the time-to-live before automatic cleanup. 0 = no TTL. + TtlSeconds int32 `protobuf:"varint,6,opt,name=ttl_seconds,json=ttlSeconds,proto3" json:"ttl_seconds,omitempty"` + // agent_id identifies the agent that requested this sandbox. + AgentId string `protobuf:"bytes,7,opt,name=agent_id,json=agentId,proto3" json:"agent_id,omitempty"` + // network optionally overrides bridge selection. If empty, the host + // resolves the bridge from the source VM's network or default. + Network string `protobuf:"bytes,8,opt,name=network,proto3" json:"network,omitempty"` + // source_vm is the source VM name used for network resolution. + SourceVm string `protobuf:"bytes,9,opt,name=source_vm,json=sourceVm,proto3" json:"source_vm,omitempty"` + // ssh_public_key is injected into the sandbox for SSH access. + SshPublicKey string `protobuf:"bytes,10,opt,name=ssh_public_key,json=sshPublicKey,proto3" json:"ssh_public_key,omitempty"` + // snapshot_mode controls cached vs fresh snapshot behavior. + SnapshotMode SnapshotMode `protobuf:"varint,11,opt,name=snapshot_mode,json=snapshotMode,proto3,enum=fluid.v1.SnapshotMode" json:"snapshot_mode,omitempty"` + // source_host_connection carries credentials for the remote source host. + SourceHostConnection *SourceHostConnection `protobuf:"bytes,12,opt,name=source_host_connection,json=sourceHostConnection,proto3" json:"source_host_connection,omitempty"` + // live controls whether to clone from the VM's current live state (true) + // or use a cached image if available (false, default). + Live bool `protobuf:"varint,13,opt,name=live,proto3" json:"live,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSandboxCommand) Reset() { + *x = CreateSandboxCommand{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSandboxCommand) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSandboxCommand) ProtoMessage() {} + +func (x *CreateSandboxCommand) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSandboxCommand.ProtoReflect.Descriptor instead. +func (*CreateSandboxCommand) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateSandboxCommand) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *CreateSandboxCommand) GetBaseImage() string { + if x != nil { + return x.BaseImage + } + return "" +} + +func (x *CreateSandboxCommand) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateSandboxCommand) GetVcpus() int32 { + if x != nil { + return x.Vcpus + } + return 0 +} + +func (x *CreateSandboxCommand) GetMemoryMb() int32 { + if x != nil { + return x.MemoryMb + } + return 0 +} + +func (x *CreateSandboxCommand) GetTtlSeconds() int32 { + if x != nil { + return x.TtlSeconds + } + return 0 +} + +func (x *CreateSandboxCommand) GetAgentId() string { + if x != nil { + return x.AgentId + } + return "" +} + +func (x *CreateSandboxCommand) GetNetwork() string { + if x != nil { + return x.Network + } + return "" +} + +func (x *CreateSandboxCommand) GetSourceVm() string { + if x != nil { + return x.SourceVm + } + return "" +} + +func (x *CreateSandboxCommand) GetSshPublicKey() string { + if x != nil { + return x.SshPublicKey + } + return "" +} + +func (x *CreateSandboxCommand) GetSnapshotMode() SnapshotMode { + if x != nil { + return x.SnapshotMode + } + return SnapshotMode_SNAPSHOT_MODE_CACHED +} + +func (x *CreateSandboxCommand) GetSourceHostConnection() *SourceHostConnection { + if x != nil { + return x.SourceHostConnection + } + return nil +} + +func (x *CreateSandboxCommand) GetLive() bool { + if x != nil { + return x.Live + } + return false +} + +// SandboxCreated is sent by the host after successfully creating a sandbox. +type SandboxCreated struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` + IpAddress string `protobuf:"bytes,4,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + MacAddress string `protobuf:"bytes,5,opt,name=mac_address,json=macAddress,proto3" json:"mac_address,omitempty"` + Bridge string `protobuf:"bytes,6,opt,name=bridge,proto3" json:"bridge,omitempty"` + Pid int32 `protobuf:"varint,7,opt,name=pid,proto3" json:"pid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SandboxCreated) Reset() { + *x = SandboxCreated{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SandboxCreated) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxCreated) ProtoMessage() {} + +func (x *SandboxCreated) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxCreated.ProtoReflect.Descriptor instead. +func (*SandboxCreated) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{2} +} + +func (x *SandboxCreated) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *SandboxCreated) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SandboxCreated) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *SandboxCreated) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +func (x *SandboxCreated) GetMacAddress() string { + if x != nil { + return x.MacAddress + } + return "" +} + +func (x *SandboxCreated) GetBridge() string { + if x != nil { + return x.Bridge + } + return "" +} + +func (x *SandboxCreated) GetPid() int32 { + if x != nil { + return x.Pid + } + return 0 +} + +// DestroySandboxCommand instructs the host to destroy a sandbox. +type DestroySandboxCommand struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DestroySandboxCommand) Reset() { + *x = DestroySandboxCommand{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DestroySandboxCommand) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DestroySandboxCommand) ProtoMessage() {} + +func (x *DestroySandboxCommand) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DestroySandboxCommand.ProtoReflect.Descriptor instead. +func (*DestroySandboxCommand) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{3} +} + +func (x *DestroySandboxCommand) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +// SandboxDestroyed confirms a sandbox has been destroyed. +type SandboxDestroyed struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SandboxDestroyed) Reset() { + *x = SandboxDestroyed{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SandboxDestroyed) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxDestroyed) ProtoMessage() {} + +func (x *SandboxDestroyed) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxDestroyed.ProtoReflect.Descriptor instead. +func (*SandboxDestroyed) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{4} +} + +func (x *SandboxDestroyed) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +// StartSandboxCommand instructs the host to start a stopped sandbox. +type StartSandboxCommand struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartSandboxCommand) Reset() { + *x = StartSandboxCommand{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartSandboxCommand) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartSandboxCommand) ProtoMessage() {} + +func (x *StartSandboxCommand) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartSandboxCommand.ProtoReflect.Descriptor instead. +func (*StartSandboxCommand) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{5} +} + +func (x *StartSandboxCommand) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +// SandboxStarted confirms a sandbox has been started. +type SandboxStarted struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SandboxStarted) Reset() { + *x = SandboxStarted{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SandboxStarted) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxStarted) ProtoMessage() {} + +func (x *SandboxStarted) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxStarted.ProtoReflect.Descriptor instead. +func (*SandboxStarted) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{6} +} + +func (x *SandboxStarted) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *SandboxStarted) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *SandboxStarted) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +// StopSandboxCommand instructs the host to stop a running sandbox. +type StopSandboxCommand struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StopSandboxCommand) Reset() { + *x = StopSandboxCommand{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StopSandboxCommand) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopSandboxCommand) ProtoMessage() {} + +func (x *StopSandboxCommand) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopSandboxCommand.ProtoReflect.Descriptor instead. +func (*StopSandboxCommand) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{7} +} + +func (x *StopSandboxCommand) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *StopSandboxCommand) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +// SandboxStopped confirms a sandbox has been stopped. +type SandboxStopped struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SandboxStopped) Reset() { + *x = SandboxStopped{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SandboxStopped) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxStopped) ProtoMessage() {} + +func (x *SandboxStopped) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxStopped.ProtoReflect.Descriptor instead. +func (*SandboxStopped) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{8} +} + +func (x *SandboxStopped) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *SandboxStopped) GetState() string { + if x != nil { + return x.State + } + return "" +} + +// SandboxStateChanged reports any sandbox state transition. +type SandboxStateChanged struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + PreviousState string `protobuf:"bytes,2,opt,name=previous_state,json=previousState,proto3" json:"previous_state,omitempty"` + NewState string `protobuf:"bytes,3,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SandboxStateChanged) Reset() { + *x = SandboxStateChanged{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SandboxStateChanged) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxStateChanged) ProtoMessage() {} + +func (x *SandboxStateChanged) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxStateChanged.ProtoReflect.Descriptor instead. +func (*SandboxStateChanged) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{9} +} + +func (x *SandboxStateChanged) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *SandboxStateChanged) GetPreviousState() string { + if x != nil { + return x.PreviousState + } + return "" +} + +func (x *SandboxStateChanged) GetNewState() string { + if x != nil { + return x.NewState + } + return "" +} + +func (x *SandboxStateChanged) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +// RunCommandCommand instructs the host to execute a command in a sandbox via SSH. +type RunCommandCommand struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Command string `protobuf:"bytes,2,opt,name=command,proto3" json:"command,omitempty"` + TimeoutSeconds int32 `protobuf:"varint,3,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` + Env map[string]string `protobuf:"bytes,4,rep,name=env,proto3" json:"env,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RunCommandCommand) Reset() { + *x = RunCommandCommand{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RunCommandCommand) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunCommandCommand) ProtoMessage() {} + +func (x *RunCommandCommand) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunCommandCommand.ProtoReflect.Descriptor instead. +func (*RunCommandCommand) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{10} +} + +func (x *RunCommandCommand) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *RunCommandCommand) GetCommand() string { + if x != nil { + return x.Command + } + return "" +} + +func (x *RunCommandCommand) GetTimeoutSeconds() int32 { + if x != nil { + return x.TimeoutSeconds + } + return 0 +} + +func (x *RunCommandCommand) GetEnv() map[string]string { + if x != nil { + return x.Env + } + return nil +} + +// CommandResult returns the output of a command execution. +type CommandResult struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Stdout string `protobuf:"bytes,2,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,3,opt,name=stderr,proto3" json:"stderr,omitempty"` + ExitCode int32 `protobuf:"varint,4,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` + DurationMs int64 `protobuf:"varint,5,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CommandResult) Reset() { + *x = CommandResult{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CommandResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommandResult) ProtoMessage() {} + +func (x *CommandResult) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommandResult.ProtoReflect.Descriptor instead. +func (*CommandResult) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{11} +} + +func (x *CommandResult) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *CommandResult) GetStdout() string { + if x != nil { + return x.Stdout + } + return "" +} + +func (x *CommandResult) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" +} + +func (x *CommandResult) GetExitCode() int32 { + if x != nil { + return x.ExitCode + } + return 0 +} + +func (x *CommandResult) GetDurationMs() int64 { + if x != nil { + return x.DurationMs + } + return 0 +} + +// SnapshotCommand instructs the host to snapshot a sandbox. +type SnapshotCommand struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + SnapshotName string `protobuf:"bytes,2,opt,name=snapshot_name,json=snapshotName,proto3" json:"snapshot_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SnapshotCommand) Reset() { + *x = SnapshotCommand{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SnapshotCommand) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SnapshotCommand) ProtoMessage() {} + +func (x *SnapshotCommand) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SnapshotCommand.ProtoReflect.Descriptor instead. +func (*SnapshotCommand) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{12} +} + +func (x *SnapshotCommand) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *SnapshotCommand) GetSnapshotName() string { + if x != nil { + return x.SnapshotName + } + return "" +} + +// SnapshotCreated confirms a snapshot was taken. +type SnapshotCreated struct { + state protoimpl.MessageState `protogen:"open.v1"` + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + SnapshotId string `protobuf:"bytes,2,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + SnapshotName string `protobuf:"bytes,3,opt,name=snapshot_name,json=snapshotName,proto3" json:"snapshot_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SnapshotCreated) Reset() { + *x = SnapshotCreated{} + mi := &file_fluid_v1_sandbox_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SnapshotCreated) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SnapshotCreated) ProtoMessage() {} + +func (x *SnapshotCreated) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_sandbox_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SnapshotCreated.ProtoReflect.Descriptor instead. +func (*SnapshotCreated) Descriptor() ([]byte, []int) { + return file_fluid_v1_sandbox_proto_rawDescGZIP(), []int{13} +} + +func (x *SnapshotCreated) GetSandboxId() string { + if x != nil { + return x.SandboxId + } + return "" +} + +func (x *SnapshotCreated) GetSnapshotId() string { + if x != nil { + return x.SnapshotId + } + return "" +} + +func (x *SnapshotCreated) GetSnapshotName() string { + if x != nil { + return x.SnapshotName + } + return "" +} + +var File_fluid_v1_sandbox_proto protoreflect.FileDescriptor + +const file_fluid_v1_sandbox_proto_rawDesc = "" + + "\n" + + "\x16fluid/v1/sandbox.proto\x12\bfluid.v1\"\xec\x02\n" + + "\x14SourceHostConnection\x12\x12\n" + + "\x04type\x18\x01 \x01(\tR\x04type\x12\x19\n" + + "\bssh_host\x18\x02 \x01(\tR\asshHost\x12\x19\n" + + "\bssh_port\x18\x03 \x01(\x05R\asshPort\x12\x19\n" + + "\bssh_user\x18\x04 \x01(\tR\asshUser\x12*\n" + + "\x11ssh_identity_file\x18\x05 \x01(\tR\x0fsshIdentityFile\x12!\n" + + "\fproxmox_host\x18\x06 \x01(\tR\vproxmoxHost\x12(\n" + + "\x10proxmox_token_id\x18\a \x01(\tR\x0eproxmoxTokenId\x12%\n" + + "\x0eproxmox_secret\x18\b \x01(\tR\rproxmoxSecret\x12!\n" + + "\fproxmox_node\x18\t \x01(\tR\vproxmoxNode\x12,\n" + + "\x12proxmox_verify_ssl\x18\n" + + " \x01(\bR\x10proxmoxVerifySsl\"\xdb\x03\n" + + "\x14CreateSandboxCommand\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x1d\n" + + "\n" + + "base_image\x18\x02 \x01(\tR\tbaseImage\x12\x12\n" + + "\x04name\x18\x03 \x01(\tR\x04name\x12\x14\n" + + "\x05vcpus\x18\x04 \x01(\x05R\x05vcpus\x12\x1b\n" + + "\tmemory_mb\x18\x05 \x01(\x05R\bmemoryMb\x12\x1f\n" + + "\vttl_seconds\x18\x06 \x01(\x05R\n" + + "ttlSeconds\x12\x19\n" + + "\bagent_id\x18\a \x01(\tR\aagentId\x12\x18\n" + + "\anetwork\x18\b \x01(\tR\anetwork\x12\x1b\n" + + "\tsource_vm\x18\t \x01(\tR\bsourceVm\x12$\n" + + "\x0essh_public_key\x18\n" + + " \x01(\tR\fsshPublicKey\x12;\n" + + "\rsnapshot_mode\x18\v \x01(\x0e2\x16.fluid.v1.SnapshotModeR\fsnapshotMode\x12T\n" + + "\x16source_host_connection\x18\f \x01(\v2\x1e.fluid.v1.SourceHostConnectionR\x14sourceHostConnection\x12\x12\n" + + "\x04live\x18\r \x01(\bR\x04live\"\xc3\x01\n" + + "\x0eSandboxCreated\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12\x14\n" + + "\x05state\x18\x03 \x01(\tR\x05state\x12\x1d\n" + + "\n" + + "ip_address\x18\x04 \x01(\tR\tipAddress\x12\x1f\n" + + "\vmac_address\x18\x05 \x01(\tR\n" + + "macAddress\x12\x16\n" + + "\x06bridge\x18\x06 \x01(\tR\x06bridge\x12\x10\n" + + "\x03pid\x18\a \x01(\x05R\x03pid\"6\n" + + "\x15DestroySandboxCommand\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\"1\n" + + "\x10SandboxDestroyed\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\"4\n" + + "\x13StartSandboxCommand\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\"d\n" + + "\x0eSandboxStarted\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x14\n" + + "\x05state\x18\x02 \x01(\tR\x05state\x12\x1d\n" + + "\n" + + "ip_address\x18\x03 \x01(\tR\tipAddress\"I\n" + + "\x12StopSandboxCommand\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x14\n" + + "\x05force\x18\x02 \x01(\bR\x05force\"E\n" + + "\x0eSandboxStopped\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x14\n" + + "\x05state\x18\x02 \x01(\tR\x05state\"\x90\x01\n" + + "\x13SandboxStateChanged\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12%\n" + + "\x0eprevious_state\x18\x02 \x01(\tR\rpreviousState\x12\x1b\n" + + "\tnew_state\x18\x03 \x01(\tR\bnewState\x12\x16\n" + + "\x06reason\x18\x04 \x01(\tR\x06reason\"\xe5\x01\n" + + "\x11RunCommandCommand\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x18\n" + + "\acommand\x18\x02 \x01(\tR\acommand\x12'\n" + + "\x0ftimeout_seconds\x18\x03 \x01(\x05R\x0etimeoutSeconds\x126\n" + + "\x03env\x18\x04 \x03(\v2$.fluid.v1.RunCommandCommand.EnvEntryR\x03env\x1a6\n" + + "\bEnvEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x9c\x01\n" + + "\rCommandResult\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x16\n" + + "\x06stdout\x18\x02 \x01(\tR\x06stdout\x12\x16\n" + + "\x06stderr\x18\x03 \x01(\tR\x06stderr\x12\x1b\n" + + "\texit_code\x18\x04 \x01(\x05R\bexitCode\x12\x1f\n" + + "\vduration_ms\x18\x05 \x01(\x03R\n" + + "durationMs\"U\n" + + "\x0fSnapshotCommand\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12#\n" + + "\rsnapshot_name\x18\x02 \x01(\tR\fsnapshotName\"v\n" + + "\x0fSnapshotCreated\x12\x1d\n" + + "\n" + + "sandbox_id\x18\x01 \x01(\tR\tsandboxId\x12\x1f\n" + + "\vsnapshot_id\x18\x02 \x01(\tR\n" + + "snapshotId\x12#\n" + + "\rsnapshot_name\x18\x03 \x01(\tR\fsnapshotName*A\n" + + "\fSnapshotMode\x12\x18\n" + + "\x14SNAPSHOT_MODE_CACHED\x10\x00\x12\x17\n" + + "\x13SNAPSHOT_MODE_FRESH\x10\x01B fluid.v1.SnapshotMode + 1, // 1: fluid.v1.CreateSandboxCommand.source_host_connection:type_name -> fluid.v1.SourceHostConnection + 15, // 2: fluid.v1.RunCommandCommand.env:type_name -> fluid.v1.RunCommandCommand.EnvEntry + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_fluid_v1_sandbox_proto_init() } +func file_fluid_v1_sandbox_proto_init() { + if File_fluid_v1_sandbox_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_fluid_v1_sandbox_proto_rawDesc), len(file_fluid_v1_sandbox_proto_rawDesc)), + NumEnums: 1, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_fluid_v1_sandbox_proto_goTypes, + DependencyIndexes: file_fluid_v1_sandbox_proto_depIdxs, + EnumInfos: file_fluid_v1_sandbox_proto_enumTypes, + MessageInfos: file_fluid_v1_sandbox_proto_msgTypes, + }.Build() + File_fluid_v1_sandbox_proto = out.File + file_fluid_v1_sandbox_proto_goTypes = nil + file_fluid_v1_sandbox_proto_depIdxs = nil +} diff --git a/proto/gen/go/fluid/v1/source.pb.go b/proto/gen/go/fluid/v1/source.pb.go new file mode 100644 index 00000000..23d62b1c --- /dev/null +++ b/proto/gen/go/fluid/v1/source.pb.go @@ -0,0 +1,855 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: fluid/v1/source.proto + +package fluidv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// PrepareSourceVMCommand instructs the host to prepare a source VM for +// read-only access (install restricted shell, create fluid-readonly user, etc.). +type PrepareSourceVMCommand struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceVm string `protobuf:"bytes,1,opt,name=source_vm,json=sourceVm,proto3" json:"source_vm,omitempty"` + SshUser string `protobuf:"bytes,2,opt,name=ssh_user,json=sshUser,proto3" json:"ssh_user,omitempty"` + SshKeyPath string `protobuf:"bytes,3,opt,name=ssh_key_path,json=sshKeyPath,proto3" json:"ssh_key_path,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PrepareSourceVMCommand) Reset() { + *x = PrepareSourceVMCommand{} + mi := &file_fluid_v1_source_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PrepareSourceVMCommand) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrepareSourceVMCommand) ProtoMessage() {} + +func (x *PrepareSourceVMCommand) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_source_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareSourceVMCommand.ProtoReflect.Descriptor instead. +func (*PrepareSourceVMCommand) Descriptor() ([]byte, []int) { + return file_fluid_v1_source_proto_rawDescGZIP(), []int{0} +} + +func (x *PrepareSourceVMCommand) GetSourceVm() string { + if x != nil { + return x.SourceVm + } + return "" +} + +func (x *PrepareSourceVMCommand) GetSshUser() string { + if x != nil { + return x.SshUser + } + return "" +} + +func (x *PrepareSourceVMCommand) GetSshKeyPath() string { + if x != nil { + return x.SshKeyPath + } + return "" +} + +// SourceVMPrepared reports the result of preparing a source VM. +type SourceVMPrepared struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceVm string `protobuf:"bytes,1,opt,name=source_vm,json=sourceVm,proto3" json:"source_vm,omitempty"` + IpAddress string `protobuf:"bytes,2,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + Prepared bool `protobuf:"varint,3,opt,name=prepared,proto3" json:"prepared,omitempty"` + UserCreated bool `protobuf:"varint,4,opt,name=user_created,json=userCreated,proto3" json:"user_created,omitempty"` + ShellInstalled bool `protobuf:"varint,5,opt,name=shell_installed,json=shellInstalled,proto3" json:"shell_installed,omitempty"` + CaKeyInstalled bool `protobuf:"varint,6,opt,name=ca_key_installed,json=caKeyInstalled,proto3" json:"ca_key_installed,omitempty"` + SshdConfigured bool `protobuf:"varint,7,opt,name=sshd_configured,json=sshdConfigured,proto3" json:"sshd_configured,omitempty"` + PrincipalsCreated bool `protobuf:"varint,8,opt,name=principals_created,json=principalsCreated,proto3" json:"principals_created,omitempty"` + SshdRestarted bool `protobuf:"varint,9,opt,name=sshd_restarted,json=sshdRestarted,proto3" json:"sshd_restarted,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SourceVMPrepared) Reset() { + *x = SourceVMPrepared{} + mi := &file_fluid_v1_source_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SourceVMPrepared) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceVMPrepared) ProtoMessage() {} + +func (x *SourceVMPrepared) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_source_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceVMPrepared.ProtoReflect.Descriptor instead. +func (*SourceVMPrepared) Descriptor() ([]byte, []int) { + return file_fluid_v1_source_proto_rawDescGZIP(), []int{1} +} + +func (x *SourceVMPrepared) GetSourceVm() string { + if x != nil { + return x.SourceVm + } + return "" +} + +func (x *SourceVMPrepared) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +func (x *SourceVMPrepared) GetPrepared() bool { + if x != nil { + return x.Prepared + } + return false +} + +func (x *SourceVMPrepared) GetUserCreated() bool { + if x != nil { + return x.UserCreated + } + return false +} + +func (x *SourceVMPrepared) GetShellInstalled() bool { + if x != nil { + return x.ShellInstalled + } + return false +} + +func (x *SourceVMPrepared) GetCaKeyInstalled() bool { + if x != nil { + return x.CaKeyInstalled + } + return false +} + +func (x *SourceVMPrepared) GetSshdConfigured() bool { + if x != nil { + return x.SshdConfigured + } + return false +} + +func (x *SourceVMPrepared) GetPrincipalsCreated() bool { + if x != nil { + return x.PrincipalsCreated + } + return false +} + +func (x *SourceVMPrepared) GetSshdRestarted() bool { + if x != nil { + return x.SshdRestarted + } + return false +} + +// RunSourceCommandCommand instructs the host to run a read-only command +// on a source VM via the fluid-readonly user. +type RunSourceCommandCommand struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceVm string `protobuf:"bytes,1,opt,name=source_vm,json=sourceVm,proto3" json:"source_vm,omitempty"` + Command string `protobuf:"bytes,2,opt,name=command,proto3" json:"command,omitempty"` + TimeoutSeconds int32 `protobuf:"varint,3,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RunSourceCommandCommand) Reset() { + *x = RunSourceCommandCommand{} + mi := &file_fluid_v1_source_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RunSourceCommandCommand) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunSourceCommandCommand) ProtoMessage() {} + +func (x *RunSourceCommandCommand) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_source_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunSourceCommandCommand.ProtoReflect.Descriptor instead. +func (*RunSourceCommandCommand) Descriptor() ([]byte, []int) { + return file_fluid_v1_source_proto_rawDescGZIP(), []int{2} +} + +func (x *RunSourceCommandCommand) GetSourceVm() string { + if x != nil { + return x.SourceVm + } + return "" +} + +func (x *RunSourceCommandCommand) GetCommand() string { + if x != nil { + return x.Command + } + return "" +} + +func (x *RunSourceCommandCommand) GetTimeoutSeconds() int32 { + if x != nil { + return x.TimeoutSeconds + } + return 0 +} + +// SourceCommandResult returns the output of a source VM command. +type SourceCommandResult struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceVm string `protobuf:"bytes,1,opt,name=source_vm,json=sourceVm,proto3" json:"source_vm,omitempty"` + ExitCode int32 `protobuf:"varint,2,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` + Stdout string `protobuf:"bytes,3,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,4,opt,name=stderr,proto3" json:"stderr,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SourceCommandResult) Reset() { + *x = SourceCommandResult{} + mi := &file_fluid_v1_source_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SourceCommandResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCommandResult) ProtoMessage() {} + +func (x *SourceCommandResult) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_source_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCommandResult.ProtoReflect.Descriptor instead. +func (*SourceCommandResult) Descriptor() ([]byte, []int) { + return file_fluid_v1_source_proto_rawDescGZIP(), []int{3} +} + +func (x *SourceCommandResult) GetSourceVm() string { + if x != nil { + return x.SourceVm + } + return "" +} + +func (x *SourceCommandResult) GetExitCode() int32 { + if x != nil { + return x.ExitCode + } + return 0 +} + +func (x *SourceCommandResult) GetStdout() string { + if x != nil { + return x.Stdout + } + return "" +} + +func (x *SourceCommandResult) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" +} + +// ReadSourceFileCommand instructs the host to read a file from a source VM. +type ReadSourceFileCommand struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceVm string `protobuf:"bytes,1,opt,name=source_vm,json=sourceVm,proto3" json:"source_vm,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReadSourceFileCommand) Reset() { + *x = ReadSourceFileCommand{} + mi := &file_fluid_v1_source_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReadSourceFileCommand) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadSourceFileCommand) ProtoMessage() {} + +func (x *ReadSourceFileCommand) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_source_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadSourceFileCommand.ProtoReflect.Descriptor instead. +func (*ReadSourceFileCommand) Descriptor() ([]byte, []int) { + return file_fluid_v1_source_proto_rawDescGZIP(), []int{4} +} + +func (x *ReadSourceFileCommand) GetSourceVm() string { + if x != nil { + return x.SourceVm + } + return "" +} + +func (x *ReadSourceFileCommand) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +// SourceFileResult returns the content of a file from a source VM. +type SourceFileResult struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceVm string `protobuf:"bytes,1,opt,name=source_vm,json=sourceVm,proto3" json:"source_vm,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Content string `protobuf:"bytes,3,opt,name=content,proto3" json:"content,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SourceFileResult) Reset() { + *x = SourceFileResult{} + mi := &file_fluid_v1_source_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SourceFileResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceFileResult) ProtoMessage() {} + +func (x *SourceFileResult) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_source_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceFileResult.ProtoReflect.Descriptor instead. +func (*SourceFileResult) Descriptor() ([]byte, []int) { + return file_fluid_v1_source_proto_rawDescGZIP(), []int{5} +} + +func (x *SourceFileResult) GetSourceVm() string { + if x != nil { + return x.SourceVm + } + return "" +} + +func (x *SourceFileResult) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *SourceFileResult) GetContent() string { + if x != nil { + return x.Content + } + return "" +} + +// ListSourceVMsCommand instructs the host to list available source VMs. +type ListSourceVMsCommand struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListSourceVMsCommand) Reset() { + *x = ListSourceVMsCommand{} + mi := &file_fluid_v1_source_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSourceVMsCommand) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSourceVMsCommand) ProtoMessage() {} + +func (x *ListSourceVMsCommand) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_source_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSourceVMsCommand.ProtoReflect.Descriptor instead. +func (*ListSourceVMsCommand) Descriptor() ([]byte, []int) { + return file_fluid_v1_source_proto_rawDescGZIP(), []int{6} +} + +// SourceVMsList returns the list of source VMs on a host. +type SourceVMsList struct { + state protoimpl.MessageState `protogen:"open.v1"` + Vms []*SourceVMListEntry `protobuf:"bytes,1,rep,name=vms,proto3" json:"vms,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SourceVMsList) Reset() { + *x = SourceVMsList{} + mi := &file_fluid_v1_source_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SourceVMsList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceVMsList) ProtoMessage() {} + +func (x *SourceVMsList) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_source_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceVMsList.ProtoReflect.Descriptor instead. +func (*SourceVMsList) Descriptor() ([]byte, []int) { + return file_fluid_v1_source_proto_rawDescGZIP(), []int{7} +} + +func (x *SourceVMsList) GetVms() []*SourceVMListEntry { + if x != nil { + return x.Vms + } + return nil +} + +type SourceVMListEntry struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + Prepared bool `protobuf:"varint,4,opt,name=prepared,proto3" json:"prepared,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SourceVMListEntry) Reset() { + *x = SourceVMListEntry{} + mi := &file_fluid_v1_source_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SourceVMListEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceVMListEntry) ProtoMessage() {} + +func (x *SourceVMListEntry) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_source_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceVMListEntry.ProtoReflect.Descriptor instead. +func (*SourceVMListEntry) Descriptor() ([]byte, []int) { + return file_fluid_v1_source_proto_rawDescGZIP(), []int{8} +} + +func (x *SourceVMListEntry) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SourceVMListEntry) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *SourceVMListEntry) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +func (x *SourceVMListEntry) GetPrepared() bool { + if x != nil { + return x.Prepared + } + return false +} + +// ValidateSourceVMCommand instructs the host to validate a source VM's +// readiness for read-only access. +type ValidateSourceVMCommand struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceVm string `protobuf:"bytes,1,opt,name=source_vm,json=sourceVm,proto3" json:"source_vm,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ValidateSourceVMCommand) Reset() { + *x = ValidateSourceVMCommand{} + mi := &file_fluid_v1_source_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ValidateSourceVMCommand) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateSourceVMCommand) ProtoMessage() {} + +func (x *ValidateSourceVMCommand) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_source_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateSourceVMCommand.ProtoReflect.Descriptor instead. +func (*ValidateSourceVMCommand) Descriptor() ([]byte, []int) { + return file_fluid_v1_source_proto_rawDescGZIP(), []int{9} +} + +func (x *ValidateSourceVMCommand) GetSourceVm() string { + if x != nil { + return x.SourceVm + } + return "" +} + +// SourceVMValidation returns the validation result for a source VM. +type SourceVMValidation struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceVm string `protobuf:"bytes,1,opt,name=source_vm,json=sourceVm,proto3" json:"source_vm,omitempty"` + Valid bool `protobuf:"varint,2,opt,name=valid,proto3" json:"valid,omitempty"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` + MacAddress string `protobuf:"bytes,4,opt,name=mac_address,json=macAddress,proto3" json:"mac_address,omitempty"` + IpAddress string `protobuf:"bytes,5,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + HasNetwork bool `protobuf:"varint,6,opt,name=has_network,json=hasNetwork,proto3" json:"has_network,omitempty"` + Warnings []string `protobuf:"bytes,7,rep,name=warnings,proto3" json:"warnings,omitempty"` + Errors []string `protobuf:"bytes,8,rep,name=errors,proto3" json:"errors,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SourceVMValidation) Reset() { + *x = SourceVMValidation{} + mi := &file_fluid_v1_source_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SourceVMValidation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceVMValidation) ProtoMessage() {} + +func (x *SourceVMValidation) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_source_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceVMValidation.ProtoReflect.Descriptor instead. +func (*SourceVMValidation) Descriptor() ([]byte, []int) { + return file_fluid_v1_source_proto_rawDescGZIP(), []int{10} +} + +func (x *SourceVMValidation) GetSourceVm() string { + if x != nil { + return x.SourceVm + } + return "" +} + +func (x *SourceVMValidation) GetValid() bool { + if x != nil { + return x.Valid + } + return false +} + +func (x *SourceVMValidation) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *SourceVMValidation) GetMacAddress() string { + if x != nil { + return x.MacAddress + } + return "" +} + +func (x *SourceVMValidation) GetIpAddress() string { + if x != nil { + return x.IpAddress + } + return "" +} + +func (x *SourceVMValidation) GetHasNetwork() bool { + if x != nil { + return x.HasNetwork + } + return false +} + +func (x *SourceVMValidation) GetWarnings() []string { + if x != nil { + return x.Warnings + } + return nil +} + +func (x *SourceVMValidation) GetErrors() []string { + if x != nil { + return x.Errors + } + return nil +} + +var File_fluid_v1_source_proto protoreflect.FileDescriptor + +const file_fluid_v1_source_proto_rawDesc = "" + + "\n" + + "\x15fluid/v1/source.proto\x12\bfluid.v1\"r\n" + + "\x16PrepareSourceVMCommand\x12\x1b\n" + + "\tsource_vm\x18\x01 \x01(\tR\bsourceVm\x12\x19\n" + + "\bssh_user\x18\x02 \x01(\tR\asshUser\x12 \n" + + "\fssh_key_path\x18\x03 \x01(\tR\n" + + "sshKeyPath\"\xdf\x02\n" + + "\x10SourceVMPrepared\x12\x1b\n" + + "\tsource_vm\x18\x01 \x01(\tR\bsourceVm\x12\x1d\n" + + "\n" + + "ip_address\x18\x02 \x01(\tR\tipAddress\x12\x1a\n" + + "\bprepared\x18\x03 \x01(\bR\bprepared\x12!\n" + + "\fuser_created\x18\x04 \x01(\bR\vuserCreated\x12'\n" + + "\x0fshell_installed\x18\x05 \x01(\bR\x0eshellInstalled\x12(\n" + + "\x10ca_key_installed\x18\x06 \x01(\bR\x0ecaKeyInstalled\x12'\n" + + "\x0fsshd_configured\x18\a \x01(\bR\x0esshdConfigured\x12-\n" + + "\x12principals_created\x18\b \x01(\bR\x11principalsCreated\x12%\n" + + "\x0esshd_restarted\x18\t \x01(\bR\rsshdRestarted\"y\n" + + "\x17RunSourceCommandCommand\x12\x1b\n" + + "\tsource_vm\x18\x01 \x01(\tR\bsourceVm\x12\x18\n" + + "\acommand\x18\x02 \x01(\tR\acommand\x12'\n" + + "\x0ftimeout_seconds\x18\x03 \x01(\x05R\x0etimeoutSeconds\"\x7f\n" + + "\x13SourceCommandResult\x12\x1b\n" + + "\tsource_vm\x18\x01 \x01(\tR\bsourceVm\x12\x1b\n" + + "\texit_code\x18\x02 \x01(\x05R\bexitCode\x12\x16\n" + + "\x06stdout\x18\x03 \x01(\tR\x06stdout\x12\x16\n" + + "\x06stderr\x18\x04 \x01(\tR\x06stderr\"H\n" + + "\x15ReadSourceFileCommand\x12\x1b\n" + + "\tsource_vm\x18\x01 \x01(\tR\bsourceVm\x12\x12\n" + + "\x04path\x18\x02 \x01(\tR\x04path\"]\n" + + "\x10SourceFileResult\x12\x1b\n" + + "\tsource_vm\x18\x01 \x01(\tR\bsourceVm\x12\x12\n" + + "\x04path\x18\x02 \x01(\tR\x04path\x12\x18\n" + + "\acontent\x18\x03 \x01(\tR\acontent\"\x16\n" + + "\x14ListSourceVMsCommand\">\n" + + "\rSourceVMsList\x12-\n" + + "\x03vms\x18\x01 \x03(\v2\x1b.fluid.v1.SourceVMListEntryR\x03vms\"x\n" + + "\x11SourceVMListEntry\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x14\n" + + "\x05state\x18\x02 \x01(\tR\x05state\x12\x1d\n" + + "\n" + + "ip_address\x18\x03 \x01(\tR\tipAddress\x12\x1a\n" + + "\bprepared\x18\x04 \x01(\bR\bprepared\"6\n" + + "\x17ValidateSourceVMCommand\x12\x1b\n" + + "\tsource_vm\x18\x01 \x01(\tR\bsourceVm\"\xf2\x01\n" + + "\x12SourceVMValidation\x12\x1b\n" + + "\tsource_vm\x18\x01 \x01(\tR\bsourceVm\x12\x14\n" + + "\x05valid\x18\x02 \x01(\bR\x05valid\x12\x14\n" + + "\x05state\x18\x03 \x01(\tR\x05state\x12\x1f\n" + + "\vmac_address\x18\x04 \x01(\tR\n" + + "macAddress\x12\x1d\n" + + "\n" + + "ip_address\x18\x05 \x01(\tR\tipAddress\x12\x1f\n" + + "\vhas_network\x18\x06 \x01(\bR\n" + + "hasNetwork\x12\x1a\n" + + "\bwarnings\x18\a \x03(\tR\bwarnings\x12\x16\n" + + "\x06errors\x18\b \x03(\tR\x06errorsB fluid.v1.SourceVMListEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_fluid_v1_source_proto_init() } +func file_fluid_v1_source_proto_init() { + if File_fluid_v1_source_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_fluid_v1_source_proto_rawDesc), len(file_fluid_v1_source_proto_rawDesc)), + NumEnums: 0, + NumMessages: 11, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_fluid_v1_source_proto_goTypes, + DependencyIndexes: file_fluid_v1_source_proto_depIdxs, + MessageInfos: file_fluid_v1_source_proto_msgTypes, + }.Build() + File_fluid_v1_source_proto = out.File + file_fluid_v1_source_proto_goTypes = nil + file_fluid_v1_source_proto_depIdxs = nil +} diff --git a/proto/gen/go/fluid/v1/stream.pb.go b/proto/gen/go/fluid/v1/stream.pb.go new file mode 100644 index 00000000..c7191081 --- /dev/null +++ b/proto/gen/go/fluid/v1/stream.pb.go @@ -0,0 +1,828 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: fluid/v1/stream.proto + +package fluidv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// HostMessage is the envelope for all messages sent from sandbox host to control plane. +type HostMessage struct { + state protoimpl.MessageState `protogen:"open.v1"` + // request_id correlates responses to requests. + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Types that are valid to be assigned to Payload: + // + // *HostMessage_Registration + // *HostMessage_Heartbeat + // *HostMessage_ResourceReport + // *HostMessage_ErrorReport + // *HostMessage_SandboxCreated + // *HostMessage_SandboxDestroyed + // *HostMessage_StateChanged + // *HostMessage_SandboxStarted + // *HostMessage_SandboxStopped + // *HostMessage_CommandResult + // *HostMessage_SnapshotCreated + // *HostMessage_SourceVmPrepared + // *HostMessage_SourceCommandResult + // *HostMessage_SourceFileResult + // *HostMessage_SourceVmsList + // *HostMessage_SourceVmValidation + // *HostMessage_DiscoverHostsResult + Payload isHostMessage_Payload `protobuf_oneof:"payload"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HostMessage) Reset() { + *x = HostMessage{} + mi := &file_fluid_v1_stream_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HostMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HostMessage) ProtoMessage() {} + +func (x *HostMessage) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_stream_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HostMessage.ProtoReflect.Descriptor instead. +func (*HostMessage) Descriptor() ([]byte, []int) { + return file_fluid_v1_stream_proto_rawDescGZIP(), []int{0} +} + +func (x *HostMessage) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *HostMessage) GetPayload() isHostMessage_Payload { + if x != nil { + return x.Payload + } + return nil +} + +func (x *HostMessage) GetRegistration() *HostRegistration { + if x != nil { + if x, ok := x.Payload.(*HostMessage_Registration); ok { + return x.Registration + } + } + return nil +} + +func (x *HostMessage) GetHeartbeat() *Heartbeat { + if x != nil { + if x, ok := x.Payload.(*HostMessage_Heartbeat); ok { + return x.Heartbeat + } + } + return nil +} + +func (x *HostMessage) GetResourceReport() *ResourceReport { + if x != nil { + if x, ok := x.Payload.(*HostMessage_ResourceReport); ok { + return x.ResourceReport + } + } + return nil +} + +func (x *HostMessage) GetErrorReport() *ErrorReport { + if x != nil { + if x, ok := x.Payload.(*HostMessage_ErrorReport); ok { + return x.ErrorReport + } + } + return nil +} + +func (x *HostMessage) GetSandboxCreated() *SandboxCreated { + if x != nil { + if x, ok := x.Payload.(*HostMessage_SandboxCreated); ok { + return x.SandboxCreated + } + } + return nil +} + +func (x *HostMessage) GetSandboxDestroyed() *SandboxDestroyed { + if x != nil { + if x, ok := x.Payload.(*HostMessage_SandboxDestroyed); ok { + return x.SandboxDestroyed + } + } + return nil +} + +func (x *HostMessage) GetStateChanged() *SandboxStateChanged { + if x != nil { + if x, ok := x.Payload.(*HostMessage_StateChanged); ok { + return x.StateChanged + } + } + return nil +} + +func (x *HostMessage) GetSandboxStarted() *SandboxStarted { + if x != nil { + if x, ok := x.Payload.(*HostMessage_SandboxStarted); ok { + return x.SandboxStarted + } + } + return nil +} + +func (x *HostMessage) GetSandboxStopped() *SandboxStopped { + if x != nil { + if x, ok := x.Payload.(*HostMessage_SandboxStopped); ok { + return x.SandboxStopped + } + } + return nil +} + +func (x *HostMessage) GetCommandResult() *CommandResult { + if x != nil { + if x, ok := x.Payload.(*HostMessage_CommandResult); ok { + return x.CommandResult + } + } + return nil +} + +func (x *HostMessage) GetSnapshotCreated() *SnapshotCreated { + if x != nil { + if x, ok := x.Payload.(*HostMessage_SnapshotCreated); ok { + return x.SnapshotCreated + } + } + return nil +} + +func (x *HostMessage) GetSourceVmPrepared() *SourceVMPrepared { + if x != nil { + if x, ok := x.Payload.(*HostMessage_SourceVmPrepared); ok { + return x.SourceVmPrepared + } + } + return nil +} + +func (x *HostMessage) GetSourceCommandResult() *SourceCommandResult { + if x != nil { + if x, ok := x.Payload.(*HostMessage_SourceCommandResult); ok { + return x.SourceCommandResult + } + } + return nil +} + +func (x *HostMessage) GetSourceFileResult() *SourceFileResult { + if x != nil { + if x, ok := x.Payload.(*HostMessage_SourceFileResult); ok { + return x.SourceFileResult + } + } + return nil +} + +func (x *HostMessage) GetSourceVmsList() *SourceVMsList { + if x != nil { + if x, ok := x.Payload.(*HostMessage_SourceVmsList); ok { + return x.SourceVmsList + } + } + return nil +} + +func (x *HostMessage) GetSourceVmValidation() *SourceVMValidation { + if x != nil { + if x, ok := x.Payload.(*HostMessage_SourceVmValidation); ok { + return x.SourceVmValidation + } + } + return nil +} + +func (x *HostMessage) GetDiscoverHostsResult() *DiscoverHostsResult { + if x != nil { + if x, ok := x.Payload.(*HostMessage_DiscoverHostsResult); ok { + return x.DiscoverHostsResult + } + } + return nil +} + +type isHostMessage_Payload interface { + isHostMessage_Payload() +} + +type HostMessage_Registration struct { + // Registration and health + Registration *HostRegistration `protobuf:"bytes,10,opt,name=registration,proto3,oneof"` +} + +type HostMessage_Heartbeat struct { + Heartbeat *Heartbeat `protobuf:"bytes,11,opt,name=heartbeat,proto3,oneof"` +} + +type HostMessage_ResourceReport struct { + ResourceReport *ResourceReport `protobuf:"bytes,12,opt,name=resource_report,json=resourceReport,proto3,oneof"` +} + +type HostMessage_ErrorReport struct { + ErrorReport *ErrorReport `protobuf:"bytes,13,opt,name=error_report,json=errorReport,proto3,oneof"` +} + +type HostMessage_SandboxCreated struct { + // Sandbox lifecycle responses + SandboxCreated *SandboxCreated `protobuf:"bytes,20,opt,name=sandbox_created,json=sandboxCreated,proto3,oneof"` +} + +type HostMessage_SandboxDestroyed struct { + SandboxDestroyed *SandboxDestroyed `protobuf:"bytes,21,opt,name=sandbox_destroyed,json=sandboxDestroyed,proto3,oneof"` +} + +type HostMessage_StateChanged struct { + StateChanged *SandboxStateChanged `protobuf:"bytes,22,opt,name=state_changed,json=stateChanged,proto3,oneof"` +} + +type HostMessage_SandboxStarted struct { + SandboxStarted *SandboxStarted `protobuf:"bytes,23,opt,name=sandbox_started,json=sandboxStarted,proto3,oneof"` +} + +type HostMessage_SandboxStopped struct { + SandboxStopped *SandboxStopped `protobuf:"bytes,24,opt,name=sandbox_stopped,json=sandboxStopped,proto3,oneof"` +} + +type HostMessage_CommandResult struct { + CommandResult *CommandResult `protobuf:"bytes,25,opt,name=command_result,json=commandResult,proto3,oneof"` +} + +type HostMessage_SnapshotCreated struct { + SnapshotCreated *SnapshotCreated `protobuf:"bytes,26,opt,name=snapshot_created,json=snapshotCreated,proto3,oneof"` +} + +type HostMessage_SourceVmPrepared struct { + // Source VM responses + SourceVmPrepared *SourceVMPrepared `protobuf:"bytes,30,opt,name=source_vm_prepared,json=sourceVmPrepared,proto3,oneof"` +} + +type HostMessage_SourceCommandResult struct { + SourceCommandResult *SourceCommandResult `protobuf:"bytes,31,opt,name=source_command_result,json=sourceCommandResult,proto3,oneof"` +} + +type HostMessage_SourceFileResult struct { + SourceFileResult *SourceFileResult `protobuf:"bytes,32,opt,name=source_file_result,json=sourceFileResult,proto3,oneof"` +} + +type HostMessage_SourceVmsList struct { + SourceVmsList *SourceVMsList `protobuf:"bytes,33,opt,name=source_vms_list,json=sourceVmsList,proto3,oneof"` +} + +type HostMessage_SourceVmValidation struct { + SourceVmValidation *SourceVMValidation `protobuf:"bytes,34,opt,name=source_vm_validation,json=sourceVmValidation,proto3,oneof"` +} + +type HostMessage_DiscoverHostsResult struct { + // Host discovery responses + DiscoverHostsResult *DiscoverHostsResult `protobuf:"bytes,40,opt,name=discover_hosts_result,json=discoverHostsResult,proto3,oneof"` +} + +func (*HostMessage_Registration) isHostMessage_Payload() {} + +func (*HostMessage_Heartbeat) isHostMessage_Payload() {} + +func (*HostMessage_ResourceReport) isHostMessage_Payload() {} + +func (*HostMessage_ErrorReport) isHostMessage_Payload() {} + +func (*HostMessage_SandboxCreated) isHostMessage_Payload() {} + +func (*HostMessage_SandboxDestroyed) isHostMessage_Payload() {} + +func (*HostMessage_StateChanged) isHostMessage_Payload() {} + +func (*HostMessage_SandboxStarted) isHostMessage_Payload() {} + +func (*HostMessage_SandboxStopped) isHostMessage_Payload() {} + +func (*HostMessage_CommandResult) isHostMessage_Payload() {} + +func (*HostMessage_SnapshotCreated) isHostMessage_Payload() {} + +func (*HostMessage_SourceVmPrepared) isHostMessage_Payload() {} + +func (*HostMessage_SourceCommandResult) isHostMessage_Payload() {} + +func (*HostMessage_SourceFileResult) isHostMessage_Payload() {} + +func (*HostMessage_SourceVmsList) isHostMessage_Payload() {} + +func (*HostMessage_SourceVmValidation) isHostMessage_Payload() {} + +func (*HostMessage_DiscoverHostsResult) isHostMessage_Payload() {} + +// ControlMessage is the envelope for all messages sent from control plane to sandbox host. +type ControlMessage struct { + state protoimpl.MessageState `protogen:"open.v1"` + // request_id correlates requests to responses. + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Types that are valid to be assigned to Payload: + // + // *ControlMessage_RegistrationAck + // *ControlMessage_CreateSandbox + // *ControlMessage_DestroySandbox + // *ControlMessage_StartSandbox + // *ControlMessage_StopSandbox + // *ControlMessage_RunCommand + // *ControlMessage_CreateSnapshot + // *ControlMessage_PrepareSourceVm + // *ControlMessage_RunSourceCommand + // *ControlMessage_ReadSourceFile + // *ControlMessage_ListSourceVms + // *ControlMessage_ValidateSourceVm + // *ControlMessage_DiscoverHosts + Payload isControlMessage_Payload `protobuf_oneof:"payload"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ControlMessage) Reset() { + *x = ControlMessage{} + mi := &file_fluid_v1_stream_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ControlMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControlMessage) ProtoMessage() {} + +func (x *ControlMessage) ProtoReflect() protoreflect.Message { + mi := &file_fluid_v1_stream_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControlMessage.ProtoReflect.Descriptor instead. +func (*ControlMessage) Descriptor() ([]byte, []int) { + return file_fluid_v1_stream_proto_rawDescGZIP(), []int{1} +} + +func (x *ControlMessage) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *ControlMessage) GetPayload() isControlMessage_Payload { + if x != nil { + return x.Payload + } + return nil +} + +func (x *ControlMessage) GetRegistrationAck() *RegistrationAck { + if x != nil { + if x, ok := x.Payload.(*ControlMessage_RegistrationAck); ok { + return x.RegistrationAck + } + } + return nil +} + +func (x *ControlMessage) GetCreateSandbox() *CreateSandboxCommand { + if x != nil { + if x, ok := x.Payload.(*ControlMessage_CreateSandbox); ok { + return x.CreateSandbox + } + } + return nil +} + +func (x *ControlMessage) GetDestroySandbox() *DestroySandboxCommand { + if x != nil { + if x, ok := x.Payload.(*ControlMessage_DestroySandbox); ok { + return x.DestroySandbox + } + } + return nil +} + +func (x *ControlMessage) GetStartSandbox() *StartSandboxCommand { + if x != nil { + if x, ok := x.Payload.(*ControlMessage_StartSandbox); ok { + return x.StartSandbox + } + } + return nil +} + +func (x *ControlMessage) GetStopSandbox() *StopSandboxCommand { + if x != nil { + if x, ok := x.Payload.(*ControlMessage_StopSandbox); ok { + return x.StopSandbox + } + } + return nil +} + +func (x *ControlMessage) GetRunCommand() *RunCommandCommand { + if x != nil { + if x, ok := x.Payload.(*ControlMessage_RunCommand); ok { + return x.RunCommand + } + } + return nil +} + +func (x *ControlMessage) GetCreateSnapshot() *SnapshotCommand { + if x != nil { + if x, ok := x.Payload.(*ControlMessage_CreateSnapshot); ok { + return x.CreateSnapshot + } + } + return nil +} + +func (x *ControlMessage) GetPrepareSourceVm() *PrepareSourceVMCommand { + if x != nil { + if x, ok := x.Payload.(*ControlMessage_PrepareSourceVm); ok { + return x.PrepareSourceVm + } + } + return nil +} + +func (x *ControlMessage) GetRunSourceCommand() *RunSourceCommandCommand { + if x != nil { + if x, ok := x.Payload.(*ControlMessage_RunSourceCommand); ok { + return x.RunSourceCommand + } + } + return nil +} + +func (x *ControlMessage) GetReadSourceFile() *ReadSourceFileCommand { + if x != nil { + if x, ok := x.Payload.(*ControlMessage_ReadSourceFile); ok { + return x.ReadSourceFile + } + } + return nil +} + +func (x *ControlMessage) GetListSourceVms() *ListSourceVMsCommand { + if x != nil { + if x, ok := x.Payload.(*ControlMessage_ListSourceVms); ok { + return x.ListSourceVms + } + } + return nil +} + +func (x *ControlMessage) GetValidateSourceVm() *ValidateSourceVMCommand { + if x != nil { + if x, ok := x.Payload.(*ControlMessage_ValidateSourceVm); ok { + return x.ValidateSourceVm + } + } + return nil +} + +func (x *ControlMessage) GetDiscoverHosts() *DiscoverHostsCommand { + if x != nil { + if x, ok := x.Payload.(*ControlMessage_DiscoverHosts); ok { + return x.DiscoverHosts + } + } + return nil +} + +type isControlMessage_Payload interface { + isControlMessage_Payload() +} + +type ControlMessage_RegistrationAck struct { + // Registration response + RegistrationAck *RegistrationAck `protobuf:"bytes,10,opt,name=registration_ack,json=registrationAck,proto3,oneof"` +} + +type ControlMessage_CreateSandbox struct { + // Sandbox lifecycle commands + CreateSandbox *CreateSandboxCommand `protobuf:"bytes,20,opt,name=create_sandbox,json=createSandbox,proto3,oneof"` +} + +type ControlMessage_DestroySandbox struct { + DestroySandbox *DestroySandboxCommand `protobuf:"bytes,21,opt,name=destroy_sandbox,json=destroySandbox,proto3,oneof"` +} + +type ControlMessage_StartSandbox struct { + StartSandbox *StartSandboxCommand `protobuf:"bytes,22,opt,name=start_sandbox,json=startSandbox,proto3,oneof"` +} + +type ControlMessage_StopSandbox struct { + StopSandbox *StopSandboxCommand `protobuf:"bytes,23,opt,name=stop_sandbox,json=stopSandbox,proto3,oneof"` +} + +type ControlMessage_RunCommand struct { + RunCommand *RunCommandCommand `protobuf:"bytes,24,opt,name=run_command,json=runCommand,proto3,oneof"` +} + +type ControlMessage_CreateSnapshot struct { + CreateSnapshot *SnapshotCommand `protobuf:"bytes,25,opt,name=create_snapshot,json=createSnapshot,proto3,oneof"` +} + +type ControlMessage_PrepareSourceVm struct { + // Source VM commands + PrepareSourceVm *PrepareSourceVMCommand `protobuf:"bytes,30,opt,name=prepare_source_vm,json=prepareSourceVm,proto3,oneof"` +} + +type ControlMessage_RunSourceCommand struct { + RunSourceCommand *RunSourceCommandCommand `protobuf:"bytes,31,opt,name=run_source_command,json=runSourceCommand,proto3,oneof"` +} + +type ControlMessage_ReadSourceFile struct { + ReadSourceFile *ReadSourceFileCommand `protobuf:"bytes,32,opt,name=read_source_file,json=readSourceFile,proto3,oneof"` +} + +type ControlMessage_ListSourceVms struct { + ListSourceVms *ListSourceVMsCommand `protobuf:"bytes,33,opt,name=list_source_vms,json=listSourceVms,proto3,oneof"` +} + +type ControlMessage_ValidateSourceVm struct { + ValidateSourceVm *ValidateSourceVMCommand `protobuf:"bytes,34,opt,name=validate_source_vm,json=validateSourceVm,proto3,oneof"` +} + +type ControlMessage_DiscoverHosts struct { + // Host discovery commands + DiscoverHosts *DiscoverHostsCommand `protobuf:"bytes,40,opt,name=discover_hosts,json=discoverHosts,proto3,oneof"` +} + +func (*ControlMessage_RegistrationAck) isControlMessage_Payload() {} + +func (*ControlMessage_CreateSandbox) isControlMessage_Payload() {} + +func (*ControlMessage_DestroySandbox) isControlMessage_Payload() {} + +func (*ControlMessage_StartSandbox) isControlMessage_Payload() {} + +func (*ControlMessage_StopSandbox) isControlMessage_Payload() {} + +func (*ControlMessage_RunCommand) isControlMessage_Payload() {} + +func (*ControlMessage_CreateSnapshot) isControlMessage_Payload() {} + +func (*ControlMessage_PrepareSourceVm) isControlMessage_Payload() {} + +func (*ControlMessage_RunSourceCommand) isControlMessage_Payload() {} + +func (*ControlMessage_ReadSourceFile) isControlMessage_Payload() {} + +func (*ControlMessage_ListSourceVms) isControlMessage_Payload() {} + +func (*ControlMessage_ValidateSourceVm) isControlMessage_Payload() {} + +func (*ControlMessage_DiscoverHosts) isControlMessage_Payload() {} + +var File_fluid_v1_stream_proto protoreflect.FileDescriptor + +const file_fluid_v1_stream_proto_rawDesc = "" + + "\n" + + "\x15fluid/v1/stream.proto\x12\bfluid.v1\x1a\x13fluid/v1/host.proto\x1a\x16fluid/v1/sandbox.proto\x1a\x15fluid/v1/source.proto\x1a\x15fluid/v1/daemon.proto\"\xf0\t\n" + + "\vHostMessage\x12\x1d\n" + + "\n" + + "request_id\x18\x01 \x01(\tR\trequestId\x12@\n" + + "\fregistration\x18\n" + + " \x01(\v2\x1a.fluid.v1.HostRegistrationH\x00R\fregistration\x123\n" + + "\theartbeat\x18\v \x01(\v2\x13.fluid.v1.HeartbeatH\x00R\theartbeat\x12C\n" + + "\x0fresource_report\x18\f \x01(\v2\x18.fluid.v1.ResourceReportH\x00R\x0eresourceReport\x12:\n" + + "\ferror_report\x18\r \x01(\v2\x15.fluid.v1.ErrorReportH\x00R\verrorReport\x12C\n" + + "\x0fsandbox_created\x18\x14 \x01(\v2\x18.fluid.v1.SandboxCreatedH\x00R\x0esandboxCreated\x12I\n" + + "\x11sandbox_destroyed\x18\x15 \x01(\v2\x1a.fluid.v1.SandboxDestroyedH\x00R\x10sandboxDestroyed\x12D\n" + + "\rstate_changed\x18\x16 \x01(\v2\x1d.fluid.v1.SandboxStateChangedH\x00R\fstateChanged\x12C\n" + + "\x0fsandbox_started\x18\x17 \x01(\v2\x18.fluid.v1.SandboxStartedH\x00R\x0esandboxStarted\x12C\n" + + "\x0fsandbox_stopped\x18\x18 \x01(\v2\x18.fluid.v1.SandboxStoppedH\x00R\x0esandboxStopped\x12@\n" + + "\x0ecommand_result\x18\x19 \x01(\v2\x17.fluid.v1.CommandResultH\x00R\rcommandResult\x12F\n" + + "\x10snapshot_created\x18\x1a \x01(\v2\x19.fluid.v1.SnapshotCreatedH\x00R\x0fsnapshotCreated\x12J\n" + + "\x12source_vm_prepared\x18\x1e \x01(\v2\x1a.fluid.v1.SourceVMPreparedH\x00R\x10sourceVmPrepared\x12S\n" + + "\x15source_command_result\x18\x1f \x01(\v2\x1d.fluid.v1.SourceCommandResultH\x00R\x13sourceCommandResult\x12J\n" + + "\x12source_file_result\x18 \x01(\v2\x1a.fluid.v1.SourceFileResultH\x00R\x10sourceFileResult\x12A\n" + + "\x0fsource_vms_list\x18! \x01(\v2\x17.fluid.v1.SourceVMsListH\x00R\rsourceVmsList\x12P\n" + + "\x14source_vm_validation\x18\" \x01(\v2\x1c.fluid.v1.SourceVMValidationH\x00R\x12sourceVmValidation\x12S\n" + + "\x15discover_hosts_result\x18( \x01(\v2\x1d.fluid.v1.DiscoverHostsResultH\x00R\x13discoverHostsResultB\t\n" + + "\apayload\"\xfc\a\n" + + "\x0eControlMessage\x12\x1d\n" + + "\n" + + "request_id\x18\x01 \x01(\tR\trequestId\x12F\n" + + "\x10registration_ack\x18\n" + + " \x01(\v2\x19.fluid.v1.RegistrationAckH\x00R\x0fregistrationAck\x12G\n" + + "\x0ecreate_sandbox\x18\x14 \x01(\v2\x1e.fluid.v1.CreateSandboxCommandH\x00R\rcreateSandbox\x12J\n" + + "\x0fdestroy_sandbox\x18\x15 \x01(\v2\x1f.fluid.v1.DestroySandboxCommandH\x00R\x0edestroySandbox\x12D\n" + + "\rstart_sandbox\x18\x16 \x01(\v2\x1d.fluid.v1.StartSandboxCommandH\x00R\fstartSandbox\x12A\n" + + "\fstop_sandbox\x18\x17 \x01(\v2\x1c.fluid.v1.StopSandboxCommandH\x00R\vstopSandbox\x12>\n" + + "\vrun_command\x18\x18 \x01(\v2\x1b.fluid.v1.RunCommandCommandH\x00R\n" + + "runCommand\x12D\n" + + "\x0fcreate_snapshot\x18\x19 \x01(\v2\x19.fluid.v1.SnapshotCommandH\x00R\x0ecreateSnapshot\x12N\n" + + "\x11prepare_source_vm\x18\x1e \x01(\v2 .fluid.v1.PrepareSourceVMCommandH\x00R\x0fprepareSourceVm\x12Q\n" + + "\x12run_source_command\x18\x1f \x01(\v2!.fluid.v1.RunSourceCommandCommandH\x00R\x10runSourceCommand\x12K\n" + + "\x10read_source_file\x18 \x01(\v2\x1f.fluid.v1.ReadSourceFileCommandH\x00R\x0ereadSourceFile\x12H\n" + + "\x0flist_source_vms\x18! \x01(\v2\x1e.fluid.v1.ListSourceVMsCommandH\x00R\rlistSourceVms\x12Q\n" + + "\x12validate_source_vm\x18\" \x01(\v2!.fluid.v1.ValidateSourceVMCommandH\x00R\x10validateSourceVm\x12G\n" + + "\x0ediscover_hosts\x18( \x01(\v2\x1e.fluid.v1.DiscoverHostsCommandH\x00R\rdiscoverHostsB\t\n" + + "\apayload2M\n" + + "\vHostService\x12>\n" + + "\aConnect\x12\x15.fluid.v1.HostMessage\x1a\x18.fluid.v1.ControlMessage(\x010\x01B fluid.v1.HostRegistration + 3, // 1: fluid.v1.HostMessage.heartbeat:type_name -> fluid.v1.Heartbeat + 4, // 2: fluid.v1.HostMessage.resource_report:type_name -> fluid.v1.ResourceReport + 5, // 3: fluid.v1.HostMessage.error_report:type_name -> fluid.v1.ErrorReport + 6, // 4: fluid.v1.HostMessage.sandbox_created:type_name -> fluid.v1.SandboxCreated + 7, // 5: fluid.v1.HostMessage.sandbox_destroyed:type_name -> fluid.v1.SandboxDestroyed + 8, // 6: fluid.v1.HostMessage.state_changed:type_name -> fluid.v1.SandboxStateChanged + 9, // 7: fluid.v1.HostMessage.sandbox_started:type_name -> fluid.v1.SandboxStarted + 10, // 8: fluid.v1.HostMessage.sandbox_stopped:type_name -> fluid.v1.SandboxStopped + 11, // 9: fluid.v1.HostMessage.command_result:type_name -> fluid.v1.CommandResult + 12, // 10: fluid.v1.HostMessage.snapshot_created:type_name -> fluid.v1.SnapshotCreated + 13, // 11: fluid.v1.HostMessage.source_vm_prepared:type_name -> fluid.v1.SourceVMPrepared + 14, // 12: fluid.v1.HostMessage.source_command_result:type_name -> fluid.v1.SourceCommandResult + 15, // 13: fluid.v1.HostMessage.source_file_result:type_name -> fluid.v1.SourceFileResult + 16, // 14: fluid.v1.HostMessage.source_vms_list:type_name -> fluid.v1.SourceVMsList + 17, // 15: fluid.v1.HostMessage.source_vm_validation:type_name -> fluid.v1.SourceVMValidation + 18, // 16: fluid.v1.HostMessage.discover_hosts_result:type_name -> fluid.v1.DiscoverHostsResult + 19, // 17: fluid.v1.ControlMessage.registration_ack:type_name -> fluid.v1.RegistrationAck + 20, // 18: fluid.v1.ControlMessage.create_sandbox:type_name -> fluid.v1.CreateSandboxCommand + 21, // 19: fluid.v1.ControlMessage.destroy_sandbox:type_name -> fluid.v1.DestroySandboxCommand + 22, // 20: fluid.v1.ControlMessage.start_sandbox:type_name -> fluid.v1.StartSandboxCommand + 23, // 21: fluid.v1.ControlMessage.stop_sandbox:type_name -> fluid.v1.StopSandboxCommand + 24, // 22: fluid.v1.ControlMessage.run_command:type_name -> fluid.v1.RunCommandCommand + 25, // 23: fluid.v1.ControlMessage.create_snapshot:type_name -> fluid.v1.SnapshotCommand + 26, // 24: fluid.v1.ControlMessage.prepare_source_vm:type_name -> fluid.v1.PrepareSourceVMCommand + 27, // 25: fluid.v1.ControlMessage.run_source_command:type_name -> fluid.v1.RunSourceCommandCommand + 28, // 26: fluid.v1.ControlMessage.read_source_file:type_name -> fluid.v1.ReadSourceFileCommand + 29, // 27: fluid.v1.ControlMessage.list_source_vms:type_name -> fluid.v1.ListSourceVMsCommand + 30, // 28: fluid.v1.ControlMessage.validate_source_vm:type_name -> fluid.v1.ValidateSourceVMCommand + 31, // 29: fluid.v1.ControlMessage.discover_hosts:type_name -> fluid.v1.DiscoverHostsCommand + 0, // 30: fluid.v1.HostService.Connect:input_type -> fluid.v1.HostMessage + 1, // 31: fluid.v1.HostService.Connect:output_type -> fluid.v1.ControlMessage + 31, // [31:32] is the sub-list for method output_type + 30, // [30:31] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_fluid_v1_stream_proto_init() } +func file_fluid_v1_stream_proto_init() { + if File_fluid_v1_stream_proto != nil { + return + } + file_fluid_v1_host_proto_init() + file_fluid_v1_sandbox_proto_init() + file_fluid_v1_source_proto_init() + file_fluid_v1_daemon_proto_init() + file_fluid_v1_stream_proto_msgTypes[0].OneofWrappers = []any{ + (*HostMessage_Registration)(nil), + (*HostMessage_Heartbeat)(nil), + (*HostMessage_ResourceReport)(nil), + (*HostMessage_ErrorReport)(nil), + (*HostMessage_SandboxCreated)(nil), + (*HostMessage_SandboxDestroyed)(nil), + (*HostMessage_StateChanged)(nil), + (*HostMessage_SandboxStarted)(nil), + (*HostMessage_SandboxStopped)(nil), + (*HostMessage_CommandResult)(nil), + (*HostMessage_SnapshotCreated)(nil), + (*HostMessage_SourceVmPrepared)(nil), + (*HostMessage_SourceCommandResult)(nil), + (*HostMessage_SourceFileResult)(nil), + (*HostMessage_SourceVmsList)(nil), + (*HostMessage_SourceVmValidation)(nil), + (*HostMessage_DiscoverHostsResult)(nil), + } + file_fluid_v1_stream_proto_msgTypes[1].OneofWrappers = []any{ + (*ControlMessage_RegistrationAck)(nil), + (*ControlMessage_CreateSandbox)(nil), + (*ControlMessage_DestroySandbox)(nil), + (*ControlMessage_StartSandbox)(nil), + (*ControlMessage_StopSandbox)(nil), + (*ControlMessage_RunCommand)(nil), + (*ControlMessage_CreateSnapshot)(nil), + (*ControlMessage_PrepareSourceVm)(nil), + (*ControlMessage_RunSourceCommand)(nil), + (*ControlMessage_ReadSourceFile)(nil), + (*ControlMessage_ListSourceVms)(nil), + (*ControlMessage_ValidateSourceVm)(nil), + (*ControlMessage_DiscoverHosts)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_fluid_v1_stream_proto_rawDesc), len(file_fluid_v1_stream_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_fluid_v1_stream_proto_goTypes, + DependencyIndexes: file_fluid_v1_stream_proto_depIdxs, + MessageInfos: file_fluid_v1_stream_proto_msgTypes, + }.Build() + File_fluid_v1_stream_proto = out.File + file_fluid_v1_stream_proto_goTypes = nil + file_fluid_v1_stream_proto_depIdxs = nil +} diff --git a/proto/gen/go/fluid/v1/stream_grpc.pb.go b/proto/gen/go/fluid/v1/stream_grpc.pb.go new file mode 100644 index 00000000..21b1241e --- /dev/null +++ b/proto/gen/go/fluid/v1/stream_grpc.pb.go @@ -0,0 +1,131 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.6.1 +// - protoc (unknown) +// source: fluid/v1/stream.proto + +package fluidv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + HostService_Connect_FullMethodName = "/fluid.v1.HostService/Connect" +) + +// HostServiceClient is the client API for HostService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// HostService is the bidirectional streaming service between sandbox hosts +// and the control plane. The sandbox host connects OUT to the control plane +// (NAT-friendly). Both sides exchange typed envelope messages with request_id +// for correlation. +type HostServiceClient interface { + // Connect opens a bidirectional stream. The host sends a HostRegistration + // as its first message and receives a RegistrationAck in response. + // After registration, both sides exchange messages asynchronously. + Connect(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[HostMessage, ControlMessage], error) +} + +type hostServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewHostServiceClient(cc grpc.ClientConnInterface) HostServiceClient { + return &hostServiceClient{cc} +} + +func (c *hostServiceClient) Connect(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[HostMessage, ControlMessage], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &HostService_ServiceDesc.Streams[0], HostService_Connect_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[HostMessage, ControlMessage]{ClientStream: stream} + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type HostService_ConnectClient = grpc.BidiStreamingClient[HostMessage, ControlMessage] + +// HostServiceServer is the server API for HostService service. +// All implementations must embed UnimplementedHostServiceServer +// for forward compatibility. +// +// HostService is the bidirectional streaming service between sandbox hosts +// and the control plane. The sandbox host connects OUT to the control plane +// (NAT-friendly). Both sides exchange typed envelope messages with request_id +// for correlation. +type HostServiceServer interface { + // Connect opens a bidirectional stream. The host sends a HostRegistration + // as its first message and receives a RegistrationAck in response. + // After registration, both sides exchange messages asynchronously. + Connect(grpc.BidiStreamingServer[HostMessage, ControlMessage]) error + mustEmbedUnimplementedHostServiceServer() +} + +// UnimplementedHostServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHostServiceServer struct{} + +func (UnimplementedHostServiceServer) Connect(grpc.BidiStreamingServer[HostMessage, ControlMessage]) error { + return status.Error(codes.Unimplemented, "method Connect not implemented") +} +func (UnimplementedHostServiceServer) mustEmbedUnimplementedHostServiceServer() {} +func (UnimplementedHostServiceServer) testEmbeddedByValue() {} + +// UnsafeHostServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to HostServiceServer will +// result in compilation errors. +type UnsafeHostServiceServer interface { + mustEmbedUnimplementedHostServiceServer() +} + +func RegisterHostServiceServer(s grpc.ServiceRegistrar, srv HostServiceServer) { + // If the following call panics, it indicates UnimplementedHostServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&HostService_ServiceDesc, srv) +} + +func _HostService_Connect_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(HostServiceServer).Connect(&grpc.GenericServerStream[HostMessage, ControlMessage]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type HostService_ConnectServer = grpc.BidiStreamingServer[HostMessage, ControlMessage] + +// HostService_ServiceDesc is the grpc.ServiceDesc for HostService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var HostService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "fluid.v1.HostService", + HandlerType: (*HostServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Connect", + Handler: _HostService_Connect_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "fluid/v1/stream.proto", +} diff --git a/proto/gen/go/go.mod b/proto/gen/go/go.mod new file mode 100644 index 00000000..d3c9ea81 --- /dev/null +++ b/proto/gen/go/go.mod @@ -0,0 +1,17 @@ +module github.com/aspectrr/fluid.sh/proto/gen/go + +go 1.24.0 + +toolchain go1.24.4 + +require ( + google.golang.org/grpc v1.72.2 + google.golang.org/protobuf v1.36.6 +) + +require ( + golang.org/x/net v0.35.0 // indirect + golang.org/x/sys v0.30.0 // indirect + golang.org/x/text v0.22.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect +) diff --git a/proto/gen/go/go.sum b/proto/gen/go/go.sum new file mode 100644 index 00000000..e655b1b0 --- /dev/null +++ b/proto/gen/go/go.sum @@ -0,0 +1,34 @@ +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= +google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= +google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= diff --git a/scripts/reset-proxmox.sh b/scripts/reset-proxmox.sh new file mode 100755 index 00000000..e8509cfa --- /dev/null +++ b/scripts/reset-proxmox.sh @@ -0,0 +1,460 @@ +#!/bin/bash +# reset-proxmox.sh +# +# Resets a Proxmox host by destroying all source VMs/CTs and sandboxes, +# then recreating the source LXC container and QEMU VM. +# WARN: This will destroy containers in VMID 100-199 (sources), 200-299 (QEMU sources), +# and 9000-9999 (sandboxes). +# +# Usage: sudo ./reset-proxmox.sh [VM_INDEX] [--ssh-users-file ] +# +# Options: +# VM_INDEX VM index number (default: 1) +# --ssh-users-file Path to file with SSH users (one per line: ) + +VM_INDEX="" +SSH_USERS_FILE="" + +# Parse arguments: first positional arg is VM_INDEX, rest are named flags +while [[ $# -gt 0 ]]; do + case "$1" in + --ssh-users-file) + SSH_USERS_FILE="$2" + shift 2 + ;; + --help|-h) + echo "Usage: sudo ./reset-proxmox.sh [VM_INDEX] [--ssh-users-file ]" + echo "" + echo "Options:" + echo " VM_INDEX VM index number (default: 1)" + echo " --ssh-users-file Path to file with SSH users (one per line: )" + exit 0 + ;; + *) + if [[ -z "$VM_INDEX" ]]; then + VM_INDEX="$1" + else + echo "Unknown argument: $1" >&2 + exit 1 + fi + shift + ;; + esac +done + +VM_INDEX="${VM_INDEX:-1}" + +set -euo pipefail + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" >&2 +} + +# Check if running as root +if [[ $EUID -ne 0 ]]; then + log_error "This script must be run as root" + exit 1 +fi + +# Check for required commands +if ! command -v pvesh &>/dev/null || ! systemctl is-active --quiet pve-cluster 2>/dev/null; then + log_error "Proxmox VE is not installed or pve-cluster is not running." + log_error "Please run setup-proxmox.sh first." + exit 1 +fi + +# VMID scheme +LXC_VMID=$((100 + VM_INDEX)) +QEMU_VMID=$((200 + VM_INDEX)) +LXC_NAME="test-vm-${VM_INDEX}" +QEMU_NAME="test-vm-qemu-${VM_INDEX}" +PVE_NODE=$(hostname) + +log_warn "This script will DESTROY source CTs (100-199), source VMs (200-299), and sandboxes (9000-9999)." +log_warn "Then recreate ${LXC_NAME} and ${QEMU_NAME}." + +# ============================================================================ +# STEP 1: Destroy LXC containers in source range (100-199) +# ============================================================================ +log_info "Destroying LXC source containers (VMID 100-199)..." + +for vmid in $(pct list 2>/dev/null | awk 'NR>1{print $1}'); do + if [[ "$vmid" -ge 100 ]] && [[ "$vmid" -le 199 ]]; then + local_name=$(pct list 2>/dev/null | awk -v id="$vmid" '$1==id{print $3}') + log_info "Destroying LXC container ${vmid} (${local_name:-unknown})..." + pct stop "$vmid" --force 2>/dev/null || true + sleep 1 + pct destroy "$vmid" --force --purge 2>/dev/null || true + log_success "Destroyed container ${vmid}." + fi +done + +# ============================================================================ +# STEP 2: Destroy sandbox LXC containers (9000-9999) +# ============================================================================ +log_info "Destroying sandbox containers (VMID 9000-9999)..." + +for vmid in $(pct list 2>/dev/null | awk 'NR>1{print $1}'); do + if [[ "$vmid" -ge 9000 ]] && [[ "$vmid" -le 9999 ]]; then + log_info "Destroying sandbox container ${vmid}..." + pct stop "$vmid" --force 2>/dev/null || true + sleep 1 + pct destroy "$vmid" --force --purge 2>/dev/null || true + log_success "Destroyed sandbox ${vmid}." + fi +done + +# ============================================================================ +# STEP 3: Destroy QEMU VMs in source range (200-299) +# ============================================================================ +log_info "Destroying QEMU source VMs (VMID 200-299)..." + +for vmid in $(qm list 2>/dev/null | awk 'NR>1{print $1}'); do + if [[ "$vmid" -ge 200 ]] && [[ "$vmid" -le 299 ]]; then + local_name=$(qm list 2>/dev/null | awk -v id="$vmid" '$1==id{print $2}') + log_info "Destroying QEMU VM ${vmid} (${local_name:-unknown})..." + qm stop "$vmid" --force 2>/dev/null || true + sleep 1 + qm destroy "$vmid" --force --purge 2>/dev/null || true + log_success "Destroyed VM ${vmid}." + fi +done + +log_success "Cleanup complete." + +# ============================================================================ +# STEP 4: Ensure LXC template is available +# ============================================================================ +log_info "Verifying LXC template availability..." + +LXC_TEMPLATE="ubuntu-22.04-standard_22.04-1_amd64.tar.zst" + +pveam update 2>/dev/null || true + +if pveam list local 2>/dev/null | grep -q "$LXC_TEMPLATE"; then + log_success "LXC template available." +else + log_info "Downloading $LXC_TEMPLATE..." + if pveam download local "$LXC_TEMPLATE"; then + log_success "LXC template downloaded." + else + LXC_TEMPLATE="ubuntu-22.04-standard_22.04-1_amd64.tar.gz" + log_info "Trying alternate template: $LXC_TEMPLATE..." + if pveam download local "$LXC_TEMPLATE"; then + log_success "LXC template downloaded." + else + log_error "Failed to download LXC template." + exit 1 + fi + fi +fi + +# Add SSH public keys to host +if [[ -n "$SSH_USERS_FILE" ]] && [[ -f "$SSH_USERS_FILE" ]]; then + log_info "Adding SSH public keys to host authorized_keys..." + HOST_SSH_DIR="/root/.ssh" + mkdir -p "$HOST_SSH_DIR" + chmod 700 "$HOST_SSH_DIR" + touch "$HOST_SSH_DIR/authorized_keys" + chmod 600 "$HOST_SSH_DIR/authorized_keys" + while IFS= read -r line || [[ -n "$line" ]]; do + [[ -z "$line" ]] && continue + [[ "$line" =~ ^#.*$ ]] && continue + pubkey="${line#* }" + if ! grep -qF "$pubkey" "$HOST_SSH_DIR/authorized_keys"; then + echo "$pubkey" >> "$HOST_SSH_DIR/authorized_keys" + username="${line%% *}" + log_success "Added key for ${username} to host authorized_keys" + fi + done < "$SSH_USERS_FILE" +fi + +# Arrays to track created resources for summary +CREATED_NAMES=() +CREATED_IPS=() +CREATED_TYPES=() + +# ============================================================================ +# create_lxc: Create an LXC source container +# +# Arguments: +# $1 - ct_name +# $2 - vmid +# ============================================================================ +create_lxc() { + local ct_name="$1" + local vmid="$2" + + log_info "Creating LXC container '${ct_name}' (VMID: ${vmid})..." + + # Destroy existing if present (safety) + if pct status "$vmid" &>/dev/null; then + pct stop "$vmid" --force 2>/dev/null || true + sleep 2 + pct destroy "$vmid" --force --purge 2>/dev/null || true + fi + + # Create container + pct create "$vmid" "local:vztmpl/${LXC_TEMPLATE}" \ + --hostname "$ct_name" \ + --cores 2 \ + --memory 1024 \ + --swap 512 \ + --storage local-lvm \ + --rootfs local-lvm:8 \ + --net0 "name=eth0,bridge=vmbr0,ip=dhcp" \ + --unprivileged 1 \ + --features nesting=1 \ + --start 0 + + log_success "Container '${ct_name}' created." + + # Start container + log_info "Starting container '${ct_name}'..." + pct start "$vmid" + sleep 5 + + # Wait for IP + log_info "Waiting for '${ct_name}' to obtain IP address..." + local max_wait=120 + local wait_interval=5 + local elapsed=0 + local ct_ip="" + + while [[ $elapsed -lt $max_wait ]]; do + ct_ip=$(pct exec "$vmid" -- ip -4 addr show eth0 2>/dev/null | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | head -1 || true) + if [[ -n "$ct_ip" ]] && [[ "$ct_ip" != "127."* ]]; then + log_success "Container '${ct_name}' IP: ${ct_ip}" + break + fi + log_info "Waiting for '${ct_name}' IP... (${elapsed}s / ${max_wait}s)" + sleep $wait_interval + elapsed=$((elapsed + wait_interval)) + done + + if [[ -z "$ct_ip" ]] || [[ "$ct_ip" == "127."* ]]; then + ct_ip="" + log_warn "Container '${ct_name}' did not obtain IP within ${max_wait}s." + log_warn "Check: pct exec ${vmid} -- ip addr" + fi + + # Install openssh-server + basic tools + log_info "Installing packages in container '${ct_name}'..." + pct exec "$vmid" -- bash -c "apt-get update -qq && apt-get install -y -qq openssh-server curl wget sudo" 2>/dev/null || true + + # Ensure SSH is running + pct exec "$vmid" -- bash -c "systemctl enable ssh && systemctl start ssh" 2>/dev/null || true + + # Add SSH users from file + if [[ -n "$SSH_USERS_FILE" ]] && [[ -f "$SSH_USERS_FILE" ]]; then + log_info "Adding SSH users to container '${ct_name}'..." + while IFS= read -r line || [[ -n "$line" ]]; do + [[ -z "$line" ]] && continue + [[ "$line" =~ ^#.*$ ]] && continue + local username="${line%% *}" + local pubkey="${line#* }" + pct exec "$vmid" -- bash -c " + id '$username' &>/dev/null || useradd -m -s /bin/bash '$username' + echo '$username ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/$username + mkdir -p /home/$username/.ssh + echo '$pubkey' >> /home/$username/.ssh/authorized_keys + chmod 700 /home/$username/.ssh + chmod 600 /home/$username/.ssh/authorized_keys + chown -R $username:$username /home/$username/.ssh + " 2>/dev/null || true + log_success "Added user ${username} to container '${ct_name}'" + done < "$SSH_USERS_FILE" + fi + + # Set root password for fallback access + pct exec "$vmid" -- bash -c "echo 'root:ubuntu' | chpasswd" 2>/dev/null || true + + # Track for summary + CREATED_NAMES+=("$ct_name") + CREATED_IPS+=("${ct_ip:-pending}") + CREATED_TYPES+=("LXC (VMID: $vmid)") +} + +# ============================================================================ +# create_qemu_vm: Create a QEMU VM with cloud-init +# +# Arguments: +# $1 - vm_name +# $2 - vmid +# ============================================================================ +create_qemu_vm() { + local vm_name="$1" + local vmid="$2" + + log_info "Creating QEMU VM '${vm_name}' (VMID: ${vmid})..." + + # Destroy existing if present (safety) + if qm status "$vmid" &>/dev/null; then + qm stop "$vmid" --force 2>/dev/null || true + sleep 2 + qm destroy "$vmid" --force --purge 2>/dev/null || true + fi + + # Download Ubuntu cloud image if missing + local IMAGE_DIR="/var/lib/vz/template/qemu" + local CLOUD_IMAGE="ubuntu-22.04-minimal-cloudimg-amd64.img" + local CLOUD_IMAGE_URL="https://cloud-images.ubuntu.com/minimal/releases/jammy/release/${CLOUD_IMAGE}" + local CLOUD_IMAGE_PATH="${IMAGE_DIR}/${CLOUD_IMAGE}" + + mkdir -p "$IMAGE_DIR" + + if [[ ! -f "$CLOUD_IMAGE_PATH" ]]; then + log_info "Downloading Ubuntu cloud image..." + if wget -q --show-progress -O "$CLOUD_IMAGE_PATH" "$CLOUD_IMAGE_URL"; then + log_success "Cloud image downloaded." + else + log_error "Failed to download cloud image." + exit 1 + fi + else + log_info "Cloud image already exists at $CLOUD_IMAGE_PATH" + fi + + # Create VM + qm create "$vmid" \ + --name "$vm_name" \ + --cores 2 \ + --memory 2048 \ + --net0 "virtio,bridge=vmbr0" \ + --agent enabled=1 \ + --ostype l26 \ + --scsihw virtio-scsi-single + + # Import disk to local storage + log_info "Importing disk from cloud image..." + qm importdisk "$vmid" "$CLOUD_IMAGE_PATH" local 2>/dev/null + + # Attach disk as scsi0 + qm set "$vmid" --scsi0 "local:${vmid}/vm-${vmid}-disk-0.raw" + qm set "$vmid" --boot order=scsi0 + + # Resize disk to 10G + qm resize "$vmid" scsi0 10G + + # Add cloud-init drive + qm set "$vmid" --ide2 local-lvm:cloudinit + + # Configure cloud-init + qm set "$vmid" --ciuser ubuntu --cipassword ubuntu + qm set "$vmid" --ipconfig0 ip=dhcp + + # Add SSH keys via cloud-init if users file exists + if [[ -n "$SSH_USERS_FILE" ]] && [[ -f "$SSH_USERS_FILE" ]]; then + local SSHKEYS_TMP + SSHKEYS_TMP=$(mktemp) + while IFS= read -r line || [[ -n "$line" ]]; do + [[ -z "$line" ]] && continue + [[ "$line" =~ ^#.*$ ]] && continue + local pubkey="${line#* }" + echo "$pubkey" >> "$SSHKEYS_TMP" + done < "$SSH_USERS_FILE" + qm set "$vmid" --sshkeys "$SSHKEYS_TMP" + rm -f "$SSHKEYS_TMP" + fi + + # Start VM + log_info "Starting VM '${vm_name}'..." + qm start "$vmid" + + # Wait for IP via guest agent + log_info "Waiting for '${vm_name}' to obtain IP address (requires guest agent)..." + local max_wait=180 + local wait_interval=10 + local elapsed=0 + local vm_ip="" + + while [[ $elapsed -lt $max_wait ]]; do + vm_ip=$(qm guest cmd "$vmid" network-get-interfaces 2>/dev/null | \ + grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | grep -v '^127\.' | head -1 || true) + + if [[ -n "$vm_ip" ]]; then + log_success "VM '${vm_name}' IP: ${vm_ip}" + break + fi + + log_info "Waiting for '${vm_name}' IP... (${elapsed}s / ${max_wait}s)" + sleep $wait_interval + elapsed=$((elapsed + wait_interval)) + done + + if [[ -z "$vm_ip" ]]; then + log_warn "VM '${vm_name}' did not obtain IP within ${max_wait}s." + log_warn "Check: qm guest cmd ${vmid} network-get-interfaces" + fi + + # Track for summary + CREATED_NAMES+=("$vm_name") + CREATED_IPS+=("${vm_ip:-pending}") + CREATED_TYPES+=("QEMU (VMID: $vmid)") +} + +# ============================================================================ +# STEP 5: Recreate source CT and QEMU VM +# ============================================================================ +create_lxc "$LXC_NAME" "$LXC_VMID" +create_qemu_vm "$QEMU_NAME" "$QEMU_VMID" + +# ============================================================================ +# STEP 6: Final Summary +# ============================================================================ +echo "" +echo "============================================================================" +log_success "Proxmox host reset complete!" +echo "============================================================================" +echo "" +echo "Reset Summary:" +echo " - Source containers (100-199) destroyed and recreated" +echo " - Source VMs (200-299) destroyed and recreated" +echo " - Sandbox containers (9000-9999) destroyed" +echo " - API token was NOT changed (existing token still valid)" +echo "" +for i in "${!CREATED_NAMES[@]}"; do + echo " ${CREATED_TYPES[$i]}: '${CREATED_NAMES[$i]}'" + if [[ "${CREATED_IPS[$i]}" != "pending" ]]; then + echo " - IP Address: ${CREATED_IPS[$i]}" + else + echo " - IP Address: (pending)" + fi +done +echo "" +echo " - Login: ubuntu / ubuntu (password)" +if [[ -n "$SSH_USERS_FILE" ]] && [[ -f "$SSH_USERS_FILE" ]]; then + echo " - SSH Users:" + while IFS= read -r line || [[ -n "$line" ]]; do + [[ -z "$line" ]] && continue + [[ "$line" =~ ^#.*$ ]] && continue + username="${line%% *}" + echo " ${username} (key-based auth)" + done < "$SSH_USERS_FILE" +fi +echo "" +echo "Useful commands:" +echo " pct list # List LXC containers" +echo " qm list # List QEMU VMs" +echo " pct exec ${LXC_VMID} -- bash # Shell into LXC container" +echo " qm terminal ${QEMU_VMID} # Console to QEMU VM" +echo "" diff --git a/scripts/reset-ubuntu.sh b/scripts/reset-ubuntu.sh index 94b10af8..80dc5df8 100755 --- a/scripts/reset-ubuntu.sh +++ b/scripts/reset-ubuntu.sh @@ -1,7 +1,7 @@ #!/bin/bash # reset-ubuntu.sh # -# Resets the Ubuntu host to contain ONLY the specified test-vm-{INDEX}. +# Resets the Ubuntu host to contain ONLY test-vm-{INDEX} and sandbox-host-{INDEX}. # WARN: This will delete ALL other VMs on the system to ensure a clean state. # # Usage: sudo ./reset-ubuntu.sh [VM_INDEX] [--ssh-users-file ] @@ -41,7 +41,6 @@ while [[ $# -gt 0 ]]; do done VM_INDEX="${VM_INDEX:-1}" -VM_NAME="test-vm-${VM_INDEX}" set -euo pipefail @@ -81,7 +80,7 @@ if ! command -v virsh &> /dev/null || ! command -v virt-install &> /dev/null; th exit 1 fi -log_warn "This script will DESTROY ALL VMs on this host and recreate '${VM_NAME}'." +log_warn "This script will DESTROY ALL VMs on this host and recreate test-vm-${VM_INDEX} + sandbox-host-${VM_INDEX}." # ============================================================================ # STEP 1: Ensure default network is active @@ -132,6 +131,7 @@ rm -rf /var/lib/libvirt/images/cloud-init/* 2>/dev/null || true # Clean up old VM disks (except base images) log_info "Cleaning up old VM disks..." rm -f /var/lib/libvirt/images/test-vm-*.qcow2 2>/dev/null || true +rm -f /var/lib/libvirt/images/sandbox-host-*.qcow2 2>/dev/null || true rm -f /var/lib/libvirt/images/sbx-*.qcow2 2>/dev/null || true # Clean up sandbox work directories @@ -150,9 +150,9 @@ virsh net-start default > /dev/null 2>&1 || true log_success "DHCP leases flushed." # ============================================================================ -# STEP 3: Create Test VM (Ubuntu 22.04 Cloud Image) +# STEP 3: Create Test VMs (Ubuntu 22.04 Cloud Image) # ============================================================================ -log_info "Creating fresh Ubuntu test VM '${VM_NAME}'..." +log_info "Creating fresh test VMs..." IMAGE_DIR="/var/lib/libvirt/images" CLOUD_INIT_DIR="${IMAGE_DIR}/cloud-init" @@ -177,59 +177,95 @@ else log_info "Base image already exists at $BASE_IMAGE_PATH" fi -# 2. Create Disk for this VM (Copy-on-Write) -VM_DISK="${IMAGE_DIR}/${VM_NAME}.qcow2" -log_info "Creating VM disk: $VM_DISK" -if [[ -f "$VM_DISK" ]]; then - rm -f "$VM_DISK" +# Add SSH public keys to KVM host for proxy jump access (once, not per VM) +if [[ -n "$SSH_USERS_FILE" ]] && [[ -f "$SSH_USERS_FILE" ]]; then + log_info "Adding SSH public keys to KVM host authorized_keys..." + HOST_SSH_DIR="/root/.ssh" + mkdir -p "$HOST_SSH_DIR" + chmod 700 "$HOST_SSH_DIR" + touch "$HOST_SSH_DIR/authorized_keys" + chmod 600 "$HOST_SSH_DIR/authorized_keys" + while IFS= read -r line || [[ -n "$line" ]]; do + [[ -z "$line" ]] && continue + [[ "$line" =~ ^#.*$ ]] && continue + pubkey="${line#* }" + if ! grep -qF "$pubkey" "$HOST_SSH_DIR/authorized_keys"; then + echo "$pubkey" >> "$HOST_SSH_DIR/authorized_keys" + username="${line%% *}" + log_success "Added key for ${username} to host authorized_keys" + fi + done < "$SSH_USERS_FILE" fi -qemu-img create -f qcow2 -F qcow2 -b "$BASE_IMAGE_PATH" "$VM_DISK" 10G -# 3. Create Cloud-Init Config with proper network configuration -# Store in persistent location so VM can access it on reboot -SEED_DIR="${CLOUD_INIT_DIR}/${VM_NAME}" -mkdir -p "$SEED_DIR" +# Arrays to track created VMs for summary +CREATED_VM_NAMES=() +CREATED_VM_IPS=() +CREATED_VM_MACS=() +CREATED_VM_DISKS=() + +# ============================================================================ +# create_vm: Create a single VM with cloud-init, wait for IP, verify network +# +# Arguments: +# $1 - vm_name (e.g. "test-vm-1") +# $2 - vm_index (numeric, for MAC generation) +# $3 - mac_prefix (e.g. "52:54:00" or "52:54:01") +# ============================================================================ +create_vm() { + local vm_name="$1" + local vm_index="$2" + local mac_prefix="$3" + + log_info "Creating VM '${vm_name}'..." + + # Create Disk (Copy-on-Write) + local vm_disk="${IMAGE_DIR}/${vm_name}.qcow2" + log_info "Creating VM disk: $vm_disk" + if [[ -f "$vm_disk" ]]; then + rm -f "$vm_disk" + fi + qemu-img create -f qcow2 -F qcow2 -b "$BASE_IMAGE_PATH" "$vm_disk" 10G -USER_DATA="${SEED_DIR}/user-data" -META_DATA="${SEED_DIR}/meta-data" -NETWORK_CONFIG="${SEED_DIR}/network-config" + # Create Cloud-Init Config + local seed_dir="${CLOUD_INIT_DIR}/${vm_name}" + mkdir -p "$seed_dir" -# Generate a unique instance-id for this VM -INSTANCE_ID="${VM_NAME}-$(date +%s)" + local user_data="${seed_dir}/user-data" + local meta_data="${seed_dir}/meta-data" + local network_config="${seed_dir}/network-config" + local instance_id="${vm_name}-$(date +%s)" -log_info "Creating cloud-init configuration with network settings..." + log_info "Creating cloud-init configuration for '${vm_name}'..." -# User-data: password, SSH, guest agent -# NOTE: Network config is in separate network-config file, not here -# Having it in both places can cause conflicts -cat > "$USER_DATA" < "$user_data" <> "$USER_DATA" - echo "users:" >> "$USER_DATA" - echo " - default" >> "$USER_DATA" - while IFS= read -r line || [[ -n "$line" ]]; do - [[ -z "$line" ]] && continue - [[ "$line" =~ ^#.*$ ]] && continue - username="${line%% *}" - pubkey="${line#* }" - cat >> "$USER_DATA" <> "$user_data" + echo "users:" >> "$user_data" + echo " - default" >> "$user_data" + while IFS= read -r line || [[ -n "$line" ]]; do + [[ -z "$line" ]] && continue + [[ "$line" =~ ^#.*$ ]] && continue + local username="${line%% *}" + local pubkey="${line#* }" + cat >> "$user_data" <> "$USER_DATA" <> "$user_data" <> "$HOST_SSH_DIR/authorized_keys" - username="${line%% *}" - log_success "Added key for ${username} to host authorized_keys" - fi - done < "$SSH_USERS_FILE" -fi - -# Meta-data: unique instance-id is CRITICAL for cloud-init to run on clones -cat > "$META_DATA" < "$meta_data" < "$NETWORK_CONFIG" < "$network_config" </dev/null | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | head -1 || true) + while [[ $elapsed -lt $max_wait ]]; do + vm_ip=$(virsh domifaddr "${vm_name}" --source lease 2>/dev/null | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | head -1 || true) - if [[ -n "$VM_IP" ]]; then - log_success "VM '${VM_NAME}' obtained IP address: ${VM_IP}" - break + if [[ -n "$vm_ip" ]]; then + log_success "VM '${vm_name}' obtained IP address: ${vm_ip}" + break + fi + + log_info "Waiting for '${vm_name}' IP... (${elapsed}s / ${max_wait}s)" + sleep $wait_interval + elapsed=$((elapsed + wait_interval)) + done + + if [[ -z "$vm_ip" ]]; then + log_warn "VM '${vm_name}' did not obtain IP address within ${max_wait} seconds." + log_warn "Troubleshooting steps:" + log_warn " 1. Check VM is running: virsh list --all" + log_warn " 2. Check network interface: virsh domiflist ${vm_name}" + log_warn " 3. Check DHCP leases: virsh net-dhcp-leases default" + log_warn " 4. Access VM console: virsh console ${vm_name} (login: ubuntu/ubuntu)" + log_warn " 5. Inside VM, check: ip addr show; cloud-init status" fi - log_info "Waiting for IP... (${ELAPSED}s / ${MAX_WAIT}s)" - sleep $WAIT_INTERVAL - ELAPSED=$((ELAPSED + WAIT_INTERVAL)) -done + # Verify VM network interface + log_info "Verifying '${vm_name}' network configuration..." -if [[ -z "$VM_IP" ]]; then - log_warn "VM did not obtain IP address within ${MAX_WAIT} seconds." - log_warn "This may indicate a network configuration issue." - log_warn "Troubleshooting steps:" - log_warn " 1. Check VM is running: virsh list --all" - log_warn " 2. Check network interface: virsh domiflist ${VM_NAME}" - log_warn " 3. Check DHCP leases: virsh net-dhcp-leases default" - log_warn " 4. Access VM console: virsh console ${VM_NAME} (login: ubuntu/ubuntu)" - log_warn " 5. Inside VM, check: ip addr show; cloud-init status" -fi + local vm_mac + vm_mac=$(virsh domiflist "${vm_name}" 2>/dev/null | grep -oE '([0-9a-f]{2}:){5}[0-9a-f]{2}' | head -1 || true) + if [[ -n "$vm_mac" ]]; then + log_success "VM '${vm_name}' MAC address: ${vm_mac}" + else + log_warn "Could not determine MAC address for '${vm_name}'" + fi -# ============================================================================ -# STEP 5: Verify VM network interface -# ============================================================================ -log_info "Verifying VM network configuration..." + local iface + iface=$(virsh domiflist "${vm_name}" 2>/dev/null | awk 'NR>2 && $1 != "" {print $1}' | head -1 || true) + if [[ -n "$iface" ]]; then + log_info "Network interface: ${iface}" + virsh domifstat "${vm_name}" "${iface}" 2>/dev/null || true + fi -# Check MAC address -VM_MAC=$(virsh domiflist "${VM_NAME}" 2>/dev/null | grep -oE '([0-9a-f]{2}:){5}[0-9a-f]{2}' | head -1 || true) -if [[ -n "$VM_MAC" ]]; then - log_success "VM MAC address: ${VM_MAC}" -else - log_warn "Could not determine VM MAC address" -fi + # Track results for summary + CREATED_VM_NAMES+=("$vm_name") + CREATED_VM_IPS+=("$vm_ip") + CREATED_VM_MACS+=("$vm_mac") + CREATED_VM_DISKS+=("$vm_disk") +} -# Check interface stats -IFACE=$(virsh domiflist "${VM_NAME}" 2>/dev/null | awk 'NR>2 && $1 != "" {print $1}' | head -1 || true) -if [[ -n "$IFACE" ]]; then - log_info "Network interface: ${IFACE}" - virsh domifstat "${VM_NAME}" "${IFACE}" 2>/dev/null || true -fi +# Create both VMs +create_vm "test-vm-${VM_INDEX}" "$VM_INDEX" "52:54:00" +create_vm "sandbox-host-${VM_INDEX}" "$VM_INDEX" "52:54:01" # ============================================================================ -# STEP 6: Final Summary +# STEP 4: Final Summary # ============================================================================ echo "" echo "============================================================================" @@ -372,17 +391,21 @@ echo "" echo "Reset Summary:" echo " - All previous VMs destroyed and undefined" echo " - Cloud-init data cleaned up" -echo " - Test VM: '${VM_NAME}' has been created and started" -echo " - VM Disk: ${VM_DISK}" +echo "" +for i in "${!CREATED_VM_NAMES[@]}"; do + echo " VM: '${CREATED_VM_NAMES[$i]}'" + echo " - Disk: ${CREATED_VM_DISKS[$i]}" + if [[ -n "${CREATED_VM_MACS[$i]}" ]]; then + echo " - MAC Address: ${CREATED_VM_MACS[$i]}" + fi + if [[ -n "${CREATED_VM_IPS[$i]}" ]]; then + echo " - IP Address: ${CREATED_VM_IPS[$i]}" + else + echo " - IP Address: (pending - check with 'virsh domifaddr ${CREATED_VM_NAMES[$i]} --source lease')" + fi +done +echo "" echo " - Cloud-Init: virt-install --cloud-init (native injection)" -if [[ -n "$VM_MAC" ]]; then - echo " - MAC Address: ${VM_MAC}" -fi -if [[ -n "$VM_IP" ]]; then - echo " - IP Address: ${VM_IP}" -else - echo " - IP Address: (pending - check with 'virsh domifaddr ${VM_NAME} --source lease')" -fi echo " - Login: ubuntu / ubuntu (password)" if [[ -n "$SSH_USERS_FILE" ]] && [[ -f "$SSH_USERS_FILE" ]]; then echo " - SSH Users:" @@ -396,20 +419,26 @@ fi echo "" echo "Useful commands:" echo " virsh list --all # List all VMs" -echo " virsh domifaddr ${VM_NAME} --source lease # Get VM IP" -echo " virsh console ${VM_NAME} # Access VM console" -echo " ssh ubuntu@${VM_IP:-} # SSH to VM (password: ubuntu)" +for i in "${!CREATED_VM_NAMES[@]}"; do + echo " virsh domifaddr ${CREATED_VM_NAMES[$i]} --source lease # Get ${CREATED_VM_NAMES[$i]} IP" +done echo "" -# Verify the VM is in a good state for cloning -log_info "Validating VM is ready for use as sandbox source..." +# Verify the VMs are in a good state for cloning +log_info "Validating VMs are ready for use as sandbox sources..." -if [[ -n "$VM_IP" ]] && [[ -n "$VM_MAC" ]]; then - log_success "VM '${VM_NAME}' is ready for use as a sandbox source!" - log_success "You can now create sandboxes with: fluid create --source-vm=${VM_NAME}" -else - log_warn "VM may not be fully ready. Please verify:" - log_warn " - VM has IP: virsh domifaddr ${VM_NAME} --source lease" - log_warn " - VM has MAC: virsh domiflist ${VM_NAME}" - log_warn " - Run validation: fluid validate ${VM_NAME}" +ALL_READY=true +for i in "${!CREATED_VM_NAMES[@]}"; do + if [[ -n "${CREATED_VM_IPS[$i]}" ]] && [[ -n "${CREATED_VM_MACS[$i]}" ]]; then + log_success "VM '${CREATED_VM_NAMES[$i]}' is ready for use as a sandbox source!" + else + log_warn "VM '${CREATED_VM_NAMES[$i]}' may not be fully ready. Please verify:" + log_warn " - VM has IP: virsh domifaddr ${CREATED_VM_NAMES[$i]} --source lease" + log_warn " - VM has MAC: virsh domiflist ${CREATED_VM_NAMES[$i]}" + ALL_READY=false + fi +done + +if [[ "$ALL_READY" == true ]]; then + log_success "All VMs are ready!" fi diff --git a/scripts/setup-proxmox.sh b/scripts/setup-proxmox.sh new file mode 100755 index 00000000..85971bb9 --- /dev/null +++ b/scripts/setup-proxmox.sh @@ -0,0 +1,617 @@ +#!/bin/bash +# setup-proxmox.sh +# +# Sets up Proxmox VE on a Debian host and creates source LXC containers + QEMU VMs. +# Requires a reboot after initial Proxmox install (PVE kernel). Re-run after reboot +# to complete VM/CT creation. +# +# Usage: sudo ./setup-proxmox.sh [VM_INDEX] [--ssh-users-file ] +# +# Options: +# VM_INDEX VM index number (default: 1) +# --ssh-users-file Path to file with SSH users (one per line: ) + +VM_INDEX="" +SSH_USERS_FILE="" + +# Parse arguments: first positional arg is VM_INDEX, rest are named flags +while [[ $# -gt 0 ]]; do + case "$1" in + --ssh-users-file) + SSH_USERS_FILE="$2" + shift 2 + ;; + --help|-h) + echo "Usage: sudo ./setup-proxmox.sh [VM_INDEX] [--ssh-users-file ]" + echo "" + echo "Options:" + echo " VM_INDEX VM index number (default: 1)" + echo " --ssh-users-file Path to file with SSH users (one per line: )" + exit 0 + ;; + *) + if [[ -z "$VM_INDEX" ]]; then + VM_INDEX="$1" + else + echo "Unknown argument: $1" >&2 + exit 1 + fi + shift + ;; + esac +done + +VM_INDEX="${VM_INDEX:-1}" + +set -euo pipefail + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" >&2 +} + +# Check if running as root +if [[ $EUID -ne 0 ]]; then + log_error "This script must be run as root" + exit 1 +fi + +# VMID scheme +LXC_VMID=$((100 + VM_INDEX)) +QEMU_VMID=$((200 + VM_INDEX)) +LXC_NAME="test-vm-${VM_INDEX}" +QEMU_NAME="test-vm-qemu-${VM_INDEX}" + +log_info "Starting Proxmox setup (VM_INDEX=${VM_INDEX})..." + +# ============================================================================ +# STEP 1: Check if Proxmox is already installed +# ============================================================================ +log_info "Checking if Proxmox VE is already installed..." + +PROXMOX_INSTALLED=false +if command -v pvesh &>/dev/null && systemctl is-active --quiet pve-cluster 2>/dev/null; then + PROXMOX_INSTALLED=true + log_success "Proxmox VE is already installed and running." +fi + +# ============================================================================ +# STEP 2: Install Proxmox VE (if not installed) +# ============================================================================ +if [[ "$PROXMOX_INSTALLED" == false ]]; then + log_info "Proxmox VE not detected. Installing..." + + # Verify this is a Debian host + if [[ ! -f /etc/os-release ]]; then + log_error "Cannot detect OS. /etc/os-release missing." + exit 1 + fi + source /etc/os-release + if [[ "$ID" != "debian" ]]; then + log_error "This script requires Debian. Detected: $ID" + exit 1 + fi + log_info "Detected Debian $VERSION_CODENAME" + + # Ensure hostname resolves in /etc/hosts (Proxmox requirement) + HOSTNAME_FQDN=$(hostname -f 2>/dev/null || hostname) + HOSTNAME_SHORT=$(hostname -s 2>/dev/null || hostname) + HOST_IP=$(hostname -I | awk '{print $1}') + if ! grep -q "$HOSTNAME_FQDN" /etc/hosts 2>/dev/null; then + log_info "Adding hostname to /etc/hosts..." + echo "${HOST_IP} ${HOSTNAME_FQDN} ${HOSTNAME_SHORT}" >> /etc/hosts + log_success "Added ${HOST_IP} ${HOSTNAME_FQDN} ${HOSTNAME_SHORT} to /etc/hosts" + fi + + # Add Proxmox GPG key + no-subscription repo + export DEBIAN_FRONTEND=noninteractive + log_info "Adding Proxmox VE repository..." + apt-get update -qq + apt-get install -y -qq wget gnupg2 + + wget -qO /etc/apt/trusted.gpg.d/proxmox-release-$VERSION_CODENAME.gpg \ + "http://download.proxmox.com/debian/proxmox-release-$VERSION_CODENAME.gpg" + + echo "deb http://download.proxmox.com/debian/pve $VERSION_CODENAME pve-no-subscription" \ + > /etc/apt/sources.list.d/pve-install-repo.list + + apt-get update -qq + log_success "Proxmox repository added." + + # Preseed postfix debconf (local only) + log_info "Preseeding postfix configuration..." + debconf-set-selections <<< "postfix postfix/mailname string $(hostname -f)" + debconf-set-selections <<< "postfix postfix/main_mailer_type string 'Local only'" + + # Install Proxmox VE + log_info "Installing Proxmox VE packages (this may take several minutes)..." + apt-get install -y proxmox-ve postfix open-iscsi chrony + + log_success "Proxmox VE packages installed." + + # Remove Debian default kernel and os-prober + log_info "Removing Debian default kernel and os-prober..." + apt-get remove -y os-prober 2>/dev/null || true + # Remove non-PVE kernels + DEBIAN_KERNELS=$(dpkg -l | awk '/linux-image-[0-9]/{print $2}' | grep -v pve || true) + if [[ -n "$DEBIAN_KERNELS" ]]; then + log_info "Removing Debian kernels: $DEBIAN_KERNELS" + apt-get remove -y $DEBIAN_KERNELS 2>/dev/null || true + fi + apt-get autoremove -y 2>/dev/null || true + + log_success "Cleanup complete." +fi + +# ============================================================================ +# REBOOT GATE: Check if running PVE kernel +# ============================================================================ +log_info "Checking running kernel..." + +RUNNING_KERNEL=$(uname -r) +if [[ "$RUNNING_KERNEL" != *-pve ]]; then + echo "" + echo "============================================================================" + log_warn "Proxmox VE is installed but NOT running the PVE kernel." + log_warn "Current kernel: $RUNNING_KERNEL" + echo "" + log_warn "A REBOOT is required before Proxmox services can start." + log_warn "After reboot, re-run this script to complete setup (VM/CT creation)." + echo "" + log_info "To reboot: sudo reboot" + echo "============================================================================" + exit 0 +fi + +log_success "Running PVE kernel: $RUNNING_KERNEL" + +# ============================================================================ +# STEP 3: Verify Proxmox services +# ============================================================================ +log_info "Verifying Proxmox services..." + +SERVICES=("pve-cluster" "pvedaemon" "pveproxy" "pvestatd") +ALL_OK=true +for svc in "${SERVICES[@]}"; do + if systemctl is-active --quiet "$svc"; then + log_success "$svc is running." + else + log_warn "$svc is not running. Attempting to start..." + systemctl start "$svc" 2>/dev/null || true + sleep 2 + if systemctl is-active --quiet "$svc"; then + log_success "$svc started." + else + log_error "$svc failed to start." + ALL_OK=false + fi + fi +done + +if [[ "$ALL_OK" == false ]]; then + log_error "Some Proxmox services failed. Check 'systemctl status pve-cluster'." + exit 1 +fi + +# ============================================================================ +# STEP 4: Disable enterprise repo nag +# ============================================================================ +log_info "Disabling enterprise repository sources..." + +rm -f /etc/apt/sources.list.d/pve-enterprise.list 2>/dev/null || true +rm -f /etc/apt/sources.list.d/ceph.list 2>/dev/null || true + +log_success "Enterprise repo sources removed." + +# ============================================================================ +# STEP 5: Create API token +# ============================================================================ +log_info "Creating API token for daemon access..." + +API_TOKEN_ID="root@pam!fluid" +API_SECRET="" + +# Check if token already exists +if pveum user token list root@pam 2>/dev/null | grep -q "fluid"; then + log_warn "API token 'fluid' already exists. Removing and recreating..." + pveum user token remove root@pam fluid 2>/dev/null || true +fi + +TOKEN_OUTPUT=$(pveum user token add root@pam fluid --privsep 0 2>&1) +API_SECRET=$(echo "$TOKEN_OUTPUT" | grep -oE '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}') + +if [[ -z "$API_SECRET" ]]; then + log_error "Failed to extract API token secret from output:" + echo "$TOKEN_OUTPUT" + exit 1 +fi + +log_success "API token created: $API_TOKEN_ID" + +# ============================================================================ +# STEP 6: Detect node name + verify storage +# ============================================================================ +log_info "Detecting node name and verifying storage..." + +PVE_NODE=$(hostname) +log_info "Node name: $PVE_NODE" + +# Verify local-lvm exists (for LXC rootfs) +if pvesm status | grep -q "local-lvm"; then + log_success "Storage 'local-lvm' available." +else + log_warn "Storage 'local-lvm' not found. LXC containers may need manual storage config." +fi + +# Verify local exists (for QEMU disk images, ISOs, vzdump) +if pvesm status | grep -q "local "; then + log_success "Storage 'local' available." +else + log_error "Storage 'local' not found. Cannot continue." + exit 1 +fi + +# ============================================================================ +# STEP 7: Add SSH keys to host +# ============================================================================ +if [[ -n "$SSH_USERS_FILE" ]] && [[ -f "$SSH_USERS_FILE" ]]; then + log_info "Adding SSH public keys to host authorized_keys..." + HOST_SSH_DIR="/root/.ssh" + mkdir -p "$HOST_SSH_DIR" + chmod 700 "$HOST_SSH_DIR" + touch "$HOST_SSH_DIR/authorized_keys" + chmod 600 "$HOST_SSH_DIR/authorized_keys" + while IFS= read -r line || [[ -n "$line" ]]; do + [[ -z "$line" ]] && continue + [[ "$line" =~ ^#.*$ ]] && continue + pubkey="${line#* }" + if ! grep -qF "$pubkey" "$HOST_SSH_DIR/authorized_keys"; then + echo "$pubkey" >> "$HOST_SSH_DIR/authorized_keys" + username="${line%% *}" + log_success "Added key for ${username} to host authorized_keys" + fi + done < "$SSH_USERS_FILE" +fi + +# ============================================================================ +# STEP 8: Download Ubuntu LXC template +# ============================================================================ +log_info "Downloading Ubuntu LXC template..." + +LXC_TEMPLATE="ubuntu-22.04-standard_22.04-1_amd64.tar.zst" + +pveam update 2>/dev/null || true + +# Check if template already exists +if pveam list local 2>/dev/null | grep -q "$LXC_TEMPLATE"; then + log_success "LXC template already available." +else + log_info "Downloading $LXC_TEMPLATE..." + if pveam download local "$LXC_TEMPLATE"; then + log_success "LXC template downloaded." + else + # Try alternate template name + LXC_TEMPLATE="ubuntu-22.04-standard_22.04-1_amd64.tar.gz" + log_info "Trying alternate template: $LXC_TEMPLATE..." + if pveam download local "$LXC_TEMPLATE"; then + log_success "LXC template downloaded." + else + log_error "Failed to download LXC template." + exit 1 + fi + fi +fi + +# Arrays to track created resources for summary +CREATED_NAMES=() +CREATED_IPS=() +CREATED_TYPES=() + +# ============================================================================ +# create_lxc: Create an LXC source container +# +# Arguments: +# $1 - ct_name +# $2 - vmid +# ============================================================================ +create_lxc() { + local ct_name="$1" + local vmid="$2" + + log_info "Creating LXC container '${ct_name}' (VMID: ${vmid})..." + + # Destroy existing if present + if pct status "$vmid" &>/dev/null; then + log_warn "Container VMID ${vmid} already exists. Destroying..." + pct stop "$vmid" --force 2>/dev/null || true + sleep 2 + pct destroy "$vmid" --force --purge 2>/dev/null || true + fi + + # Create container + pct create "$vmid" "local:vztmpl/${LXC_TEMPLATE}" \ + --hostname "$ct_name" \ + --cores 2 \ + --memory 1024 \ + --swap 512 \ + --storage local-lvm \ + --rootfs local-lvm:8 \ + --net0 "name=eth0,bridge=vmbr0,ip=dhcp" \ + --unprivileged 1 \ + --features nesting=1 \ + --start 0 + + log_success "Container '${ct_name}' created." + + # Start container + log_info "Starting container '${ct_name}'..." + pct start "$vmid" + sleep 5 + + # Wait for IP + log_info "Waiting for '${ct_name}' to obtain IP address..." + local max_wait=120 + local wait_interval=5 + local elapsed=0 + local ct_ip="" + + while [[ $elapsed -lt $max_wait ]]; do + ct_ip=$(pct exec "$vmid" -- ip -4 addr show eth0 2>/dev/null | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | head -1 || true) + # Skip localhost + if [[ -n "$ct_ip" ]] && [[ "$ct_ip" != "127."* ]]; then + log_success "Container '${ct_name}' IP: ${ct_ip}" + break + fi + log_info "Waiting for '${ct_name}' IP... (${elapsed}s / ${max_wait}s)" + sleep $wait_interval + elapsed=$((elapsed + wait_interval)) + done + + if [[ -z "$ct_ip" ]] || [[ "$ct_ip" == "127."* ]]; then + ct_ip="" + log_warn "Container '${ct_name}' did not obtain IP within ${max_wait}s." + log_warn "Check: pct exec ${vmid} -- ip addr" + fi + + # Install openssh-server + basic tools + log_info "Installing packages in container '${ct_name}'..." + pct exec "$vmid" -- bash -c "apt-get update -qq && apt-get install -y -qq openssh-server curl wget sudo" 2>/dev/null || true + + # Ensure SSH is running + pct exec "$vmid" -- bash -c "systemctl enable ssh && systemctl start ssh" 2>/dev/null || true + + # Add SSH users from file + if [[ -n "$SSH_USERS_FILE" ]] && [[ -f "$SSH_USERS_FILE" ]]; then + log_info "Adding SSH users to container '${ct_name}'..." + while IFS= read -r line || [[ -n "$line" ]]; do + [[ -z "$line" ]] && continue + [[ "$line" =~ ^#.*$ ]] && continue + local username="${line%% *}" + local pubkey="${line#* }" + pct exec "$vmid" -- bash -c " + id '$username' &>/dev/null || useradd -m -s /bin/bash '$username' + echo '$username ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/$username + mkdir -p /home/$username/.ssh + echo '$pubkey' >> /home/$username/.ssh/authorized_keys + chmod 700 /home/$username/.ssh + chmod 600 /home/$username/.ssh/authorized_keys + chown -R $username:$username /home/$username/.ssh + " 2>/dev/null || true + log_success "Added user ${username} to container '${ct_name}'" + done < "$SSH_USERS_FILE" + fi + + # Also set root password for fallback access + pct exec "$vmid" -- bash -c "echo 'root:ubuntu' | chpasswd" 2>/dev/null || true + + # Track for summary + CREATED_NAMES+=("$ct_name") + CREATED_IPS+=("${ct_ip:-pending}") + CREATED_TYPES+=("LXC (VMID: $vmid)") +} + +# ============================================================================ +# create_qemu_vm: Create a QEMU VM with cloud-init +# +# Arguments: +# $1 - vm_name +# $2 - vmid +# ============================================================================ +create_qemu_vm() { + local vm_name="$1" + local vmid="$2" + + log_info "Creating QEMU VM '${vm_name}' (VMID: ${vmid})..." + + # Destroy existing if present + if qm status "$vmid" &>/dev/null; then + log_warn "VM VMID ${vmid} already exists. Destroying..." + qm stop "$vmid" --force 2>/dev/null || true + sleep 2 + qm destroy "$vmid" --force --purge 2>/dev/null || true + fi + + # Download Ubuntu cloud image if missing + local IMAGE_DIR="/var/lib/vz/template/qemu" + local CLOUD_IMAGE="ubuntu-22.04-minimal-cloudimg-amd64.img" + local CLOUD_IMAGE_URL="https://cloud-images.ubuntu.com/minimal/releases/jammy/release/${CLOUD_IMAGE}" + local CLOUD_IMAGE_PATH="${IMAGE_DIR}/${CLOUD_IMAGE}" + + mkdir -p "$IMAGE_DIR" + + if [[ ! -f "$CLOUD_IMAGE_PATH" ]]; then + log_info "Downloading Ubuntu cloud image..." + if wget -q --show-progress -O "$CLOUD_IMAGE_PATH" "$CLOUD_IMAGE_URL"; then + log_success "Cloud image downloaded." + else + log_error "Failed to download cloud image." + exit 1 + fi + else + log_info "Cloud image already exists at $CLOUD_IMAGE_PATH" + fi + + # Create VM + qm create "$vmid" \ + --name "$vm_name" \ + --cores 2 \ + --memory 2048 \ + --net0 "virtio,bridge=vmbr0" \ + --agent enabled=1 \ + --ostype l26 \ + --scsihw virtio-scsi-single + + # Import disk to local storage + log_info "Importing disk from cloud image..." + qm importdisk "$vmid" "$CLOUD_IMAGE_PATH" local 2>/dev/null + + # Attach disk as scsi0 + qm set "$vmid" --scsi0 "local:${vmid}/vm-${vmid}-disk-0.raw" + qm set "$vmid" --boot order=scsi0 + + # Resize disk to 10G + qm resize "$vmid" scsi0 10G + + # Add cloud-init drive + qm set "$vmid" --ide2 local-lvm:cloudinit + + # Configure cloud-init + qm set "$vmid" --ciuser ubuntu --cipassword ubuntu + qm set "$vmid" --ipconfig0 ip=dhcp + + # Add SSH keys via cloud-init if users file exists + if [[ -n "$SSH_USERS_FILE" ]] && [[ -f "$SSH_USERS_FILE" ]]; then + local SSHKEYS_TMP + SSHKEYS_TMP=$(mktemp) + while IFS= read -r line || [[ -n "$line" ]]; do + [[ -z "$line" ]] && continue + [[ "$line" =~ ^#.*$ ]] && continue + local pubkey="${line#* }" + echo "$pubkey" >> "$SSHKEYS_TMP" + done < "$SSH_USERS_FILE" + qm set "$vmid" --sshkeys "$SSHKEYS_TMP" + rm -f "$SSHKEYS_TMP" + fi + + # Start VM + log_info "Starting VM '${vm_name}'..." + qm start "$vmid" + + # Wait for IP via guest agent + log_info "Waiting for '${vm_name}' to obtain IP address (requires guest agent)..." + local max_wait=180 + local wait_interval=10 + local elapsed=0 + local vm_ip="" + + while [[ $elapsed -lt $max_wait ]]; do + # Try guest agent first + vm_ip=$(qm guest cmd "$vmid" network-get-interfaces 2>/dev/null | \ + grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | grep -v '^127\.' | head -1 || true) + + if [[ -n "$vm_ip" ]]; then + log_success "VM '${vm_name}' IP: ${vm_ip}" + break + fi + + log_info "Waiting for '${vm_name}' IP... (${elapsed}s / ${max_wait}s)" + sleep $wait_interval + elapsed=$((elapsed + wait_interval)) + done + + if [[ -z "$vm_ip" ]]; then + log_warn "VM '${vm_name}' did not obtain IP within ${max_wait}s." + log_warn "The guest agent may not be ready yet. Check manually:" + log_warn " qm guest cmd ${vmid} network-get-interfaces" + log_warn " qm terminal ${vmid}" + fi + + # Track for summary + CREATED_NAMES+=("$vm_name") + CREATED_IPS+=("${vm_ip:-pending}") + CREATED_TYPES+=("QEMU (VMID: $vmid)") +} + +# ============================================================================ +# STEP 9: Create source LXC container +# ============================================================================ +create_lxc "$LXC_NAME" "$LXC_VMID" + +# ============================================================================ +# STEP 10: Create source QEMU VM +# ============================================================================ +create_qemu_vm "$QEMU_NAME" "$QEMU_VMID" + +# ============================================================================ +# STEP 11: Final Summary +# ============================================================================ +echo "" +echo "============================================================================" +log_success "Proxmox setup complete!" +echo "============================================================================" +echo "" +echo "Setup Summary:" +echo " - Proxmox VE running on kernel: $(uname -r)" +echo " - Node: ${PVE_NODE}" +echo "" +for i in "${!CREATED_NAMES[@]}"; do + echo " ${CREATED_TYPES[$i]}: '${CREATED_NAMES[$i]}'" + if [[ "${CREATED_IPS[$i]}" != "pending" ]]; then + echo " - IP Address: ${CREATED_IPS[$i]}" + else + echo " - IP Address: (pending)" + fi +done +echo "" +echo " API Token:" +echo " - Token ID: ${API_TOKEN_ID}" +echo " - Secret: ${API_SECRET}" +echo "" +if [[ -n "$SSH_USERS_FILE" ]] && [[ -f "$SSH_USERS_FILE" ]]; then + echo " SSH Users:" + while IFS= read -r line || [[ -n "$line" ]]; do + [[ -z "$line" ]] && continue + [[ "$line" =~ ^#.*$ ]] && continue + username="${line%% *}" + echo " ${username} (key-based auth)" + done < "$SSH_USERS_FILE" + echo "" +fi +echo " Daemon config snippet (daemon.yaml):" +echo " provider: lxc" +echo " lxc:" +echo " host: \"https://$(hostname -I | awk '{print $1}'):8006\"" +echo " token_id: \"${API_TOKEN_ID}\"" +echo " secret: \"${API_SECRET}\"" +echo " node: \"${PVE_NODE}\"" +echo " storage: \"local-lvm\"" +echo " bridge: \"vmbr0\"" +echo " vmid_start: 9000" +echo " vmid_end: 9999" +echo " verify_ssl: false" +echo "" +echo "Useful commands:" +echo " pct list # List LXC containers" +echo " qm list # List QEMU VMs" +echo " pvesh get /access/users/root@pam/token # List API tokens" +echo " pct exec ${LXC_VMID} -- bash # Shell into LXC container" +echo " qm terminal ${QEMU_VMID} # Console to QEMU VM" +echo "" diff --git a/scripts/setup-ubuntu.sh b/scripts/setup-ubuntu.sh index f3f97467..1984ae5c 100755 --- a/scripts/setup-ubuntu.sh +++ b/scripts/setup-ubuntu.sh @@ -41,7 +41,6 @@ while [[ $# -gt 0 ]]; do done VM_INDEX="${VM_INDEX:-1}" -VM_NAME="test-vm-${VM_INDEX}" set -euo pipefail @@ -196,9 +195,9 @@ else fi # ============================================================================ -# STEP 7: Create Test VM (Ubuntu 22.04 Cloud Image) +# STEP 7: Create Test VMs (Ubuntu 22.04 Cloud Image) # ============================================================================ -log_info "Creating real Ubuntu test VM '${VM_NAME}'..." +log_info "Creating test VMs..." IMAGE_DIR="/var/lib/libvirt/images" CLOUD_INIT_DIR="${IMAGE_DIR}/cloud-init" @@ -223,60 +222,96 @@ else log_info "Base image already exists at $BASE_IMAGE_PATH" fi -# 2. Create Disk for this VM (Copy-on-Write) -VM_DISK="${IMAGE_DIR}/${VM_NAME}.qcow2" -log_info "Creating VM disk: $VM_DISK" -if [[ -f "$VM_DISK" ]]; then - log_warn "Disk $VM_DISK already exists, overwriting..." - rm -f "$VM_DISK" +# Add SSH public keys to KVM host for proxy jump access (once, not per VM) +if [[ -n "$SSH_USERS_FILE" ]] && [[ -f "$SSH_USERS_FILE" ]]; then + log_info "Adding SSH public keys to KVM host authorized_keys..." + HOST_SSH_DIR="/root/.ssh" + mkdir -p "$HOST_SSH_DIR" + chmod 700 "$HOST_SSH_DIR" + touch "$HOST_SSH_DIR/authorized_keys" + chmod 600 "$HOST_SSH_DIR/authorized_keys" + while IFS= read -r line || [[ -n "$line" ]]; do + [[ -z "$line" ]] && continue + [[ "$line" =~ ^#.*$ ]] && continue + pubkey="${line#* }" + if ! grep -qF "$pubkey" "$HOST_SSH_DIR/authorized_keys"; then + echo "$pubkey" >> "$HOST_SSH_DIR/authorized_keys" + username="${line%% *}" + log_success "Added key for ${username} to host authorized_keys" + fi + done < "$SSH_USERS_FILE" fi -qemu-img create -f qcow2 -F qcow2 -b "$BASE_IMAGE_PATH" "$VM_DISK" 10G -# 3. Create Cloud-Init Config with proper network configuration -# Store in persistent location so VM can access it on reboot -SEED_DIR="${CLOUD_INIT_DIR}/${VM_NAME}" -mkdir -p "$SEED_DIR" +# Arrays to track created VMs for summary +CREATED_VM_NAMES=() +CREATED_VM_IPS=() +CREATED_VM_MACS=() +CREATED_VM_DISKS=() + +# ============================================================================ +# create_vm: Create a single VM with cloud-init, wait for IP, verify network +# +# Arguments: +# $1 - vm_name (e.g. "test-vm-1") +# $2 - vm_index (numeric, for MAC generation) +# $3 - mac_prefix (e.g. "52:54:00" or "52:54:01") +# ============================================================================ +create_vm() { + local vm_name="$1" + local vm_index="$2" + local mac_prefix="$3" + + log_info "Creating VM '${vm_name}'..." + + # Create Disk (Copy-on-Write) + local vm_disk="${IMAGE_DIR}/${vm_name}.qcow2" + log_info "Creating VM disk: $vm_disk" + if [[ -f "$vm_disk" ]]; then + log_warn "Disk $vm_disk already exists, overwriting..." + rm -f "$vm_disk" + fi + qemu-img create -f qcow2 -F qcow2 -b "$BASE_IMAGE_PATH" "$vm_disk" 10G -USER_DATA="${SEED_DIR}/user-data" -META_DATA="${SEED_DIR}/meta-data" -NETWORK_CONFIG="${SEED_DIR}/network-config" + # Create Cloud-Init Config + local seed_dir="${CLOUD_INIT_DIR}/${vm_name}" + mkdir -p "$seed_dir" -# Generate a unique instance-id for this VM -INSTANCE_ID="${VM_NAME}-$(date +%s)" + local user_data="${seed_dir}/user-data" + local meta_data="${seed_dir}/meta-data" + local network_config="${seed_dir}/network-config" + local instance_id="${vm_name}-$(date +%s)" -log_info "Creating cloud-init configuration with network settings..." + log_info "Creating cloud-init configuration for '${vm_name}'..." -# User-data: password, SSH, guest agent -# NOTE: Network config is in separate network-config file, not here -# Having it in both places can cause conflicts -cat > "$USER_DATA" < "$user_data" <> "$USER_DATA" - echo "users:" >> "$USER_DATA" - echo " - default" >> "$USER_DATA" - while IFS= read -r line || [[ -n "$line" ]]; do - [[ -z "$line" ]] && continue - [[ "$line" =~ ^#.*$ ]] && continue - username="${line%% *}" - pubkey="${line#* }" - cat >> "$USER_DATA" <> "$user_data" + echo "users:" >> "$user_data" + echo " - default" >> "$user_data" + while IFS= read -r line || [[ -n "$line" ]]; do + [[ -z "$line" ]] && continue + [[ "$line" =~ ^#.*$ ]] && continue + local username="${line%% *}" + local pubkey="${line#* }" + cat >> "$user_data" <> "$USER_DATA" <> "$user_data" <> "$HOST_SSH_DIR/authorized_keys" - username="${line%% *}" - log_success "Added key for ${username} to host authorized_keys" - fi - done < "$SSH_USERS_FILE" -fi - -# Meta-data: unique instance-id is CRITICAL for cloud-init to run on clones -cat > "$META_DATA" < "$meta_data" < "$NETWORK_CONFIG" < "$network_config" </dev/null | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | head -1 || true) + while [[ $elapsed -lt $max_wait ]]; do + vm_ip=$(virsh domifaddr "${vm_name}" --source lease 2>/dev/null | grep -oE '([0-9]{1,3}\.){3}[0-9]{1,3}' | head -1 || true) - if [[ -n "$VM_IP" ]]; then - log_success "VM '${VM_NAME}' obtained IP address: ${VM_IP}" - break + if [[ -n "$vm_ip" ]]; then + log_success "VM '${vm_name}' obtained IP address: ${vm_ip}" + break + fi + + log_info "Waiting for '${vm_name}' IP... (${elapsed}s / ${max_wait}s)" + sleep $wait_interval + elapsed=$((elapsed + wait_interval)) + done + + if [[ -z "$vm_ip" ]]; then + log_warn "VM '${vm_name}' did not obtain IP address within ${max_wait} seconds." + log_warn "Check: virsh domifaddr ${vm_name} --source lease" + log_warn "Check: virsh console ${vm_name} (login: ubuntu/ubuntu)" fi - log_info "Waiting for IP... (${ELAPSED}s / ${MAX_WAIT}s)" - sleep $WAIT_INTERVAL - ELAPSED=$((ELAPSED + WAIT_INTERVAL)) -done + # Verify VM network interface + log_info "Verifying '${vm_name}' network configuration..." -if [[ -z "$VM_IP" ]]; then - log_warn "VM did not obtain IP address within ${MAX_WAIT} seconds." - log_warn "This may indicate a network configuration issue." - log_warn "Check: virsh domifaddr ${VM_NAME} --source lease" - log_warn "Check: virsh console ${VM_NAME} (login: ubuntu/ubuntu)" -fi + local vm_mac + vm_mac=$(virsh domiflist "${vm_name}" 2>/dev/null | grep -oE '([0-9a-f]{2}:){5}[0-9a-f]{2}' | head -1 || true) + if [[ -n "$vm_mac" ]]; then + log_success "VM '${vm_name}' MAC address: ${vm_mac}" + else + log_warn "Could not determine MAC address for '${vm_name}'" + fi -# ============================================================================ -# STEP 9: Verify VM network interface -# ============================================================================ -log_info "Verifying VM network configuration..." + local iface + iface=$(virsh domiflist "${vm_name}" 2>/dev/null | awk 'NR>2 && $1 != "" {print $1}' | head -1 || true) + if [[ -n "$iface" ]]; then + log_info "Network interface: ${iface}" + virsh domifstat "${vm_name}" "${iface}" 2>/dev/null || true + fi -# Check MAC address -VM_MAC=$(virsh domiflist "${VM_NAME}" 2>/dev/null | grep -oE '([0-9a-f]{2}:){5}[0-9a-f]{2}' | head -1 || true) -if [[ -n "$VM_MAC" ]]; then - log_success "VM MAC address: ${VM_MAC}" -else - log_warn "Could not determine VM MAC address" -fi + # Track results for summary + CREATED_VM_NAMES+=("$vm_name") + CREATED_VM_IPS+=("$vm_ip") + CREATED_VM_MACS+=("$vm_mac") + CREATED_VM_DISKS+=("$vm_disk") +} -# Check interface stats -IFACE=$(virsh domiflist "${VM_NAME}" 2>/dev/null | awk 'NR>2 && $1 != "" {print $1}' | head -1 || true) -if [[ -n "$IFACE" ]]; then - log_info "Network interface: ${IFACE}" - virsh domifstat "${VM_NAME}" "${IFACE}" 2>/dev/null || true -fi +# Create both VMs +create_vm "test-vm-${VM_INDEX}" "$VM_INDEX" "52:54:00" +create_vm "sandbox-host-${VM_INDEX}" "$VM_INDEX" "52:54:01" # ============================================================================ -# STEP 10: Final Summary +# STEP 8: Final Summary # ============================================================================ echo "" echo "============================================================================" @@ -416,17 +434,21 @@ echo "Setup Summary:" echo " - Installed: qemu-kvm, libvirt-daemon-system, libvirt-clients, bridge-utils, virtinst" echo " - Service: libvirtd enabled and started" echo " - Network: default network active with DHCP" -echo " - Test VM: '${VM_NAME}' has been created and started" -echo " - VM Disk: ${VM_DISK}" +echo "" +for i in "${!CREATED_VM_NAMES[@]}"; do + echo " VM: '${CREATED_VM_NAMES[$i]}'" + echo " - Disk: ${CREATED_VM_DISKS[$i]}" + if [[ -n "${CREATED_VM_MACS[$i]}" ]]; then + echo " - MAC Address: ${CREATED_VM_MACS[$i]}" + fi + if [[ -n "${CREATED_VM_IPS[$i]}" ]]; then + echo " - IP Address: ${CREATED_VM_IPS[$i]}" + else + echo " - IP Address: (pending - check with 'virsh domifaddr ${CREATED_VM_NAMES[$i]} --source lease')" + fi +done +echo "" echo " - Cloud-Init: virt-install --cloud-init (native injection)" -if [[ -n "$VM_MAC" ]]; then - echo " - MAC Address: ${VM_MAC}" -fi -if [[ -n "$VM_IP" ]]; then - echo " - IP Address: ${VM_IP}" -else - echo " - IP Address: (pending - check with 'virsh domifaddr ${VM_NAME} --source lease')" -fi echo " - Login: ubuntu / ubuntu (password)" if [[ -n "$SSH_USERS_FILE" ]] && [[ -f "$SSH_USERS_FILE" ]]; then echo " - SSH Users:" @@ -444,7 +466,7 @@ fi echo "" echo "Useful commands:" echo " virsh list --all # List all VMs" -echo " virsh domifaddr ${VM_NAME} --source lease # Get VM IP" -echo " virsh console ${VM_NAME} # Access VM console" -echo " ssh ubuntu@${VM_IP:-} # SSH to VM (password: ubuntu)" +for i in "${!CREATED_VM_NAMES[@]}"; do + echo " virsh domifaddr ${CREATED_VM_NAMES[$i]} --source lease # Get ${CREATED_VM_NAMES[$i]} IP" +done echo "" diff --git a/sdk/fluid-py/.openapi-generator/FILES b/sdk/fluid-py/.openapi-generator/FILES index 1db6db3d..26a11bd4 100644 --- a/sdk/fluid-py/.openapi-generator/FILES +++ b/sdk/fluid-py/.openapi-generator/FILES @@ -3,267 +3,91 @@ .gitlab-ci.yml .travis.yml README.md -docs/AccessApi.md -docs/AnsibleApi.md -docs/AnsiblePlaybooksApi.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleExportPlaybookResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleGetPlaybookResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleJob.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobStatus.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleListPlaybooksResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleReorderTasksRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalErrorErrorResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestAccessErrorResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestCaPublicKeyResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestCertificateResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestDestroySandboxResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestDiffRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestDiffResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestDiscoverIPResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestErrorResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestGenerateResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestGetSandboxResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestHostError.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestInjectSSHKeyRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestListCertificatesResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxCommandsResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxesResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestListSessionsResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestListVMsResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestPublishRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestPublishResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestSandboxInfo.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestSessionResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxRequest.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxResponse.md -docs/GithubComAspectrrFluidShFluidRemoteInternalRestVmInfo.md -docs/GithubComAspectrrFluidShFluidRemoteInternalStoreChangeDiff.md -docs/GithubComAspectrrFluidShFluidRemoteInternalStoreCommand.md -docs/GithubComAspectrrFluidShFluidRemoteInternalStoreCommandExecRecord.md -docs/GithubComAspectrrFluidShFluidRemoteInternalStoreCommandSummary.md -docs/GithubComAspectrrFluidShFluidRemoteInternalStoreDiff.md -docs/GithubComAspectrrFluidShFluidRemoteInternalStorePackageInfo.md -docs/GithubComAspectrrFluidShFluidRemoteInternalStorePlaybook.md -docs/GithubComAspectrrFluidShFluidRemoteInternalStorePlaybookTask.md -docs/GithubComAspectrrFluidShFluidRemoteInternalStoreSandbox.md -docs/GithubComAspectrrFluidShFluidRemoteInternalStoreSandboxState.md -docs/GithubComAspectrrFluidShFluidRemoteInternalStoreServiceChange.md -docs/GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshot.md -docs/GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshotKind.md +docs/AuthApi.md +docs/BillingApi.md docs/HealthApi.md -docs/InternalAnsibleAddTaskRequest.md -docs/InternalAnsibleAddTaskResponse.md -docs/InternalAnsibleCreatePlaybookRequest.md -docs/InternalAnsibleCreatePlaybookResponse.md -docs/InternalAnsibleExportPlaybookResponse.md -docs/InternalAnsibleGetPlaybookResponse.md -docs/InternalAnsibleJob.md -docs/InternalAnsibleJobRequest.md -docs/InternalAnsibleJobResponse.md -docs/InternalAnsibleJobStatus.md -docs/InternalAnsibleListPlaybooksResponse.md -docs/InternalAnsibleReorderTasksRequest.md -docs/InternalAnsibleUpdateTaskRequest.md -docs/InternalAnsibleUpdateTaskResponse.md -docs/InternalRestAccessErrorResponse.md -docs/InternalRestCaPublicKeyResponse.md -docs/InternalRestCertificateResponse.md -docs/InternalRestCreateSandboxRequest.md -docs/InternalRestCreateSandboxResponse.md -docs/InternalRestDestroySandboxResponse.md -docs/InternalRestDiffRequest.md -docs/InternalRestDiffResponse.md -docs/InternalRestDiscoverIPResponse.md -docs/InternalRestErrorResponse.md -docs/InternalRestGenerateResponse.md -docs/InternalRestGetSandboxResponse.md -docs/InternalRestHealthResponse.md -docs/InternalRestHostError.md -docs/InternalRestInjectSSHKeyRequest.md -docs/InternalRestListCertificatesResponse.md -docs/InternalRestListSandboxCommandsResponse.md -docs/InternalRestListSandboxesResponse.md -docs/InternalRestListSessionsResponse.md -docs/InternalRestListVMsResponse.md -docs/InternalRestPublishRequest.md -docs/InternalRestPublishResponse.md -docs/InternalRestRequestAccessRequest.md -docs/InternalRestRequestAccessResponse.md -docs/InternalRestRevokeCertificateRequest.md -docs/InternalRestRevokeCertificateResponse.md -docs/InternalRestRunCommandRequest.md -docs/InternalRestRunCommandResponse.md -docs/InternalRestSandboxInfo.md -docs/InternalRestSessionEndRequest.md -docs/InternalRestSessionEndResponse.md -docs/InternalRestSessionResponse.md -docs/InternalRestSessionStartRequest.md -docs/InternalRestSessionStartResponse.md -docs/InternalRestSnapshotRequest.md -docs/InternalRestSnapshotResponse.md -docs/InternalRestStartSandboxRequest.md -docs/InternalRestStartSandboxResponse.md -docs/InternalRestVmInfo.md -docs/SandboxApi.md -docs/TimeDuration.md -docs/VMsApi.md +docs/HostTokensApi.md +docs/HostsApi.md +docs/MembersApi.md +docs/OrchestratorCreateSandboxRequest.md +docs/OrchestratorHostInfo.md +docs/OrchestratorPrepareRequest.md +docs/OrchestratorReadSourceRequest.md +docs/OrchestratorRunCommandRequest.md +docs/OrchestratorRunSourceRequest.md +docs/OrchestratorSnapshotRequest.md +docs/OrchestratorSnapshotResponse.md +docs/OrchestratorSourceCommandResult.md +docs/OrchestratorSourceFileResult.md +docs/OrganizationsApi.md +docs/RestAddMemberRequest.md +docs/RestAuthResponse.md +docs/RestBillingResponse.md +docs/RestCalculatorRequest.md +docs/RestCalculatorResponse.md +docs/RestCreateHostTokenRequest.md +docs/RestCreateOrgRequest.md +docs/RestFreeTierInfo.md +docs/RestHostTokenResponse.md +docs/RestLoginRequest.md +docs/RestMemberResponse.md +docs/RestOrgResponse.md +docs/RestRegisterRequest.md +docs/RestSwaggerError.md +docs/RestUpdateOrgRequest.md +docs/RestUsageSummary.md +docs/RestUserResponse.md +docs/SandboxesApi.md +docs/SourceVMsApi.md +docs/StoreCommand.md +docs/StoreSandbox.md +docs/StoreSandboxState.md fluid/__init__.py fluid/api/__init__.py -fluid/api/access_api.py -fluid/api/ansible_api.py -fluid/api/ansible_playbooks_api.py +fluid/api/auth_api.py +fluid/api/billing_api.py fluid/api/health_api.py -fluid/api/sandbox_api.py -fluid/api/vms_api.py +fluid/api/host_tokens_api.py +fluid/api/hosts_api.py +fluid/api/members_api.py +fluid/api/organizations_api.py +fluid/api/sandboxes_api.py +fluid/api/source_vms_api.py fluid/api_client.py fluid/api_response.py fluid/configuration.py fluid/exceptions.py fluid/models/__init__.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_add_task_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_add_task_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_create_playbook_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_create_playbook_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_export_playbook_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_get_playbook_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_status.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_list_playbooks_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_reorder_tasks_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_update_task_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_update_task_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_error_error_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_access_error_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_ca_public_key_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_certificate_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_create_sandbox_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_create_sandbox_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_destroy_sandbox_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_diff_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_diff_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_discover_ip_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_error_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_generate_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_get_sandbox_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_health_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_host_error.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_inject_ssh_key_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_certificates_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sandbox_commands_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sandboxes_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sessions_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_vms_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_publish_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_publish_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_request_access_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_request_access_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_revoke_certificate_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_revoke_certificate_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_run_command_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_run_command_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_sandbox_info.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_end_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_end_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_start_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_start_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_snapshot_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_snapshot_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_start_sandbox_request.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_start_sandbox_response.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_vm_info.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_store_change_diff.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command_exec_record.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command_summary.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_store_diff.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_store_package_info.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_store_playbook.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_store_playbook_task.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_store_sandbox.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_store_sandbox_state.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_store_service_change.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_store_snapshot.py -fluid/models/github_com_aspectrr_fluid_sh_fluid_remote_internal_store_snapshot_kind.py -fluid/models/internal_ansible_add_task_request.py -fluid/models/internal_ansible_add_task_response.py -fluid/models/internal_ansible_create_playbook_request.py -fluid/models/internal_ansible_create_playbook_response.py -fluid/models/internal_ansible_export_playbook_response.py -fluid/models/internal_ansible_get_playbook_response.py -fluid/models/internal_ansible_job.py -fluid/models/internal_ansible_job_request.py -fluid/models/internal_ansible_job_response.py -fluid/models/internal_ansible_job_status.py -fluid/models/internal_ansible_list_playbooks_response.py -fluid/models/internal_ansible_reorder_tasks_request.py -fluid/models/internal_ansible_update_task_request.py -fluid/models/internal_ansible_update_task_response.py -fluid/models/internal_rest_access_error_response.py -fluid/models/internal_rest_ca_public_key_response.py -fluid/models/internal_rest_certificate_response.py -fluid/models/internal_rest_create_sandbox_request.py -fluid/models/internal_rest_create_sandbox_response.py -fluid/models/internal_rest_destroy_sandbox_response.py -fluid/models/internal_rest_diff_request.py -fluid/models/internal_rest_diff_response.py -fluid/models/internal_rest_discover_ip_response.py -fluid/models/internal_rest_error_response.py -fluid/models/internal_rest_generate_response.py -fluid/models/internal_rest_get_sandbox_response.py -fluid/models/internal_rest_health_response.py -fluid/models/internal_rest_host_error.py -fluid/models/internal_rest_inject_ssh_key_request.py -fluid/models/internal_rest_list_certificates_response.py -fluid/models/internal_rest_list_sandbox_commands_response.py -fluid/models/internal_rest_list_sandboxes_response.py -fluid/models/internal_rest_list_sessions_response.py -fluid/models/internal_rest_list_vms_response.py -fluid/models/internal_rest_publish_request.py -fluid/models/internal_rest_publish_response.py -fluid/models/internal_rest_request_access_request.py -fluid/models/internal_rest_request_access_response.py -fluid/models/internal_rest_revoke_certificate_request.py -fluid/models/internal_rest_revoke_certificate_response.py -fluid/models/internal_rest_run_command_request.py -fluid/models/internal_rest_run_command_response.py -fluid/models/internal_rest_sandbox_info.py -fluid/models/internal_rest_session_end_request.py -fluid/models/internal_rest_session_end_response.py -fluid/models/internal_rest_session_response.py -fluid/models/internal_rest_session_start_request.py -fluid/models/internal_rest_session_start_response.py -fluid/models/internal_rest_snapshot_request.py -fluid/models/internal_rest_snapshot_response.py -fluid/models/internal_rest_start_sandbox_request.py -fluid/models/internal_rest_start_sandbox_response.py -fluid/models/internal_rest_vm_info.py -fluid/models/time_duration.py +fluid/models/orchestrator_create_sandbox_request.py +fluid/models/orchestrator_host_info.py +fluid/models/orchestrator_prepare_request.py +fluid/models/orchestrator_read_source_request.py +fluid/models/orchestrator_run_command_request.py +fluid/models/orchestrator_run_source_request.py +fluid/models/orchestrator_snapshot_request.py +fluid/models/orchestrator_snapshot_response.py +fluid/models/orchestrator_source_command_result.py +fluid/models/orchestrator_source_file_result.py +fluid/models/rest_add_member_request.py +fluid/models/rest_auth_response.py +fluid/models/rest_billing_response.py +fluid/models/rest_calculator_request.py +fluid/models/rest_calculator_response.py +fluid/models/rest_create_host_token_request.py +fluid/models/rest_create_org_request.py +fluid/models/rest_free_tier_info.py +fluid/models/rest_host_token_response.py +fluid/models/rest_login_request.py +fluid/models/rest_member_response.py +fluid/models/rest_org_response.py +fluid/models/rest_register_request.py +fluid/models/rest_swagger_error.py +fluid/models/rest_update_org_request.py +fluid/models/rest_usage_summary.py +fluid/models/rest_user_response.py +fluid/models/store_command.py +fluid/models/store_sandbox.py +fluid/models/store_sandbox_state.py fluid/py.typed fluid/rest.py git_push.sh @@ -273,72 +97,42 @@ setup.cfg setup.py test-requirements.txt test/__init__.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_add_task_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_add_task_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_create_playbook_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_create_playbook_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_export_playbook_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_get_playbook_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_status.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_list_playbooks_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_reorder_tasks_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_update_task_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_update_task_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_error_error_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_access_error_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_ca_public_key_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_certificate_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_create_sandbox_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_create_sandbox_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_destroy_sandbox_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_diff_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_diff_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_discover_ip_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_error_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_generate_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_get_sandbox_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_health_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_host_error.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_inject_ssh_key_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_certificates_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sandbox_commands_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sandboxes_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sessions_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_vms_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_publish_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_publish_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_request_access_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_request_access_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_revoke_certificate_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_revoke_certificate_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_run_command_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_run_command_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_sandbox_info.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_end_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_end_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_start_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_start_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_snapshot_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_snapshot_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_start_sandbox_request.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_start_sandbox_response.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_vm_info.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_store_change_diff.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command_exec_record.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command_summary.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_store_diff.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_store_package_info.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_store_playbook.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_store_playbook_task.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_store_sandbox.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_store_sandbox_state.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_store_service_change.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_store_snapshot.py -test/test_github_com_aspectrr_fluid_sh_fluid_remote_internal_store_snapshot_kind.py -test/test_internal_rest_host_error.py +test/test_auth_api.py +test/test_billing_api.py +test/test_host_tokens_api.py +test/test_hosts_api.py +test/test_members_api.py +test/test_orchestrator_create_sandbox_request.py +test/test_orchestrator_host_info.py +test/test_orchestrator_prepare_request.py +test/test_orchestrator_read_source_request.py +test/test_orchestrator_run_command_request.py +test/test_orchestrator_run_source_request.py +test/test_orchestrator_snapshot_request.py +test/test_orchestrator_snapshot_response.py +test/test_orchestrator_source_command_result.py +test/test_orchestrator_source_file_result.py +test/test_organizations_api.py +test/test_rest_add_member_request.py +test/test_rest_auth_response.py +test/test_rest_billing_response.py +test/test_rest_calculator_request.py +test/test_rest_calculator_response.py +test/test_rest_create_host_token_request.py +test/test_rest_create_org_request.py +test/test_rest_free_tier_info.py +test/test_rest_host_token_response.py +test/test_rest_login_request.py +test/test_rest_member_response.py +test/test_rest_org_response.py +test/test_rest_register_request.py +test/test_rest_swagger_error.py +test/test_rest_update_org_request.py +test/test_rest_usage_summary.py +test/test_rest_user_response.py +test/test_sandboxes_api.py +test/test_source_vms_api.py +test/test_store_command.py +test/test_store_sandbox.py +test/test_store_sandbox_state.py tox.ini diff --git a/sdk/fluid-py/.openapi-generator/VERSION b/sdk/fluid-py/.openapi-generator/VERSION index 193a12d6..0610c66b 100644 --- a/sdk/fluid-py/.openapi-generator/VERSION +++ b/sdk/fluid-py/.openapi-generator/VERSION @@ -1 +1 @@ -7.20.0-SNAPSHOT +7.21.0-SNAPSHOT diff --git a/sdk/fluid-py/README.md b/sdk/fluid-py/README.md index d0a6197a..fbf605b1 100644 --- a/sdk/fluid-py/README.md +++ b/sdk/fluid-py/README.md @@ -1,11 +1,11 @@ # Fluid -API for managing AI Agent VM sandboxes using libvirt +API for managing sandboxes, organizations, billing, and hosts This Python package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project: -- API version: 0.1.0 +- API version: 1.0 - Package version: 0.1.0 -- Generator version: 7.20.0-SNAPSHOT +- Generator version: 7.21.0-SNAPSHOT - Build package: org.openapitools.codegen.languages.PythonClientCodegen ## Requirements. @@ -61,7 +61,6 @@ async def main(): # Create a unified client client = VirshSandbox( host="http://localhost:8080", - tmux_host="http://localhost:8081" # Optional: separate host for tmux operations ) # Create a sandbox with simple parameters - no request objects needed! @@ -118,195 +117,109 @@ import fluid from fluid.rest import ApiException from pprint import pprint -# Defining the host is optional and defaults to http://localhost +# Defining the host is optional and defaults to http://localhost:8081/v1 # See configuration.py for a list of all supported configuration parameters. configuration = fluid.Configuration( - host = "http://localhost" + host = "http://localhost:8081/v1" ) # Enter a context with an instance of the API client with fluid.ApiClient(configuration) as api_client: # Create an instance of the API class - api_instance = fluid.AccessApi(api_client) + api_instance = fluid.AuthApi(api_client) + code = 'code_example' # str | OAuth authorization code try: - # Get the SSH CA public key - api_response = api_instance.get_ca_public_key() - print("The response of AccessApi->get_ca_public_key:\n") - pprint(api_response) + # GitHub OAuth callback + api_instance.auth_github_callback_get(code) except ApiException as e: - print("Exception when calling AccessApi->get_ca_public_key: %s\n" % e) + print("Exception when calling AuthApi->auth_github_callback_get: %s\n" % e) ``` ## Documentation for API Endpoints -All URIs are relative to *http://localhost* +All URIs are relative to *http://localhost:8081/v1* Class | Method | HTTP request | Description ------------ | ------------- | ------------- | ------------- -*AccessApi* | [**get_ca_public_key**](docs/AccessApi.md#get_ca_public_key) | **GET** /v1/access/ca-pubkey | Get the SSH CA public key -*AccessApi* | [**get_certificate**](docs/AccessApi.md#get_certificate) | **GET** /v1/access/certificate/{certID} | Get certificate details -*AccessApi* | [**list_certificates**](docs/AccessApi.md#list_certificates) | **GET** /v1/access/certificates | List certificates -*AccessApi* | [**list_sessions**](docs/AccessApi.md#list_sessions) | **GET** /v1/access/sessions | List sessions -*AccessApi* | [**record_session_end**](docs/AccessApi.md#record_session_end) | **POST** /v1/access/session/end | Record session end -*AccessApi* | [**record_session_start**](docs/AccessApi.md#record_session_start) | **POST** /v1/access/session/start | Record session start -*AccessApi* | [**request_access**](docs/AccessApi.md#request_access) | **POST** /v1/access/request | Request SSH access to a sandbox -*AccessApi* | [**revoke_certificate**](docs/AccessApi.md#revoke_certificate) | **DELETE** /v1/access/certificate/{certID} | Revoke a certificate -*AnsibleApi* | [**create_ansible_job**](docs/AnsibleApi.md#create_ansible_job) | **POST** /v1/ansible/jobs | Create Ansible job -*AnsibleApi* | [**get_ansible_job**](docs/AnsibleApi.md#get_ansible_job) | **GET** /v1/ansible/jobs/{job_id} | Get Ansible job -*AnsibleApi* | [**stream_ansible_job_output**](docs/AnsibleApi.md#stream_ansible_job_output) | **GET** /v1/ansible/jobs/{job_id}/stream | Stream Ansible job output -*AnsiblePlaybooksApi* | [**add_playbook_task**](docs/AnsiblePlaybooksApi.md#add_playbook_task) | **POST** /v1/ansible/playbooks/{playbook_name}/tasks | Add task to playbook -*AnsiblePlaybooksApi* | [**create_playbook**](docs/AnsiblePlaybooksApi.md#create_playbook) | **POST** /v1/ansible/playbooks | Create playbook -*AnsiblePlaybooksApi* | [**delete_playbook**](docs/AnsiblePlaybooksApi.md#delete_playbook) | **DELETE** /v1/ansible/playbooks/{playbook_name} | Delete playbook -*AnsiblePlaybooksApi* | [**delete_playbook_task**](docs/AnsiblePlaybooksApi.md#delete_playbook_task) | **DELETE** /v1/ansible/playbooks/{playbook_name}/tasks/{task_id} | Delete task -*AnsiblePlaybooksApi* | [**export_playbook**](docs/AnsiblePlaybooksApi.md#export_playbook) | **GET** /v1/ansible/playbooks/{playbook_name}/export | Export playbook -*AnsiblePlaybooksApi* | [**get_playbook**](docs/AnsiblePlaybooksApi.md#get_playbook) | **GET** /v1/ansible/playbooks/{playbook_name} | Get playbook -*AnsiblePlaybooksApi* | [**list_playbooks**](docs/AnsiblePlaybooksApi.md#list_playbooks) | **GET** /v1/ansible/playbooks | List playbooks -*AnsiblePlaybooksApi* | [**reorder_playbook_tasks**](docs/AnsiblePlaybooksApi.md#reorder_playbook_tasks) | **PATCH** /v1/ansible/playbooks/{playbook_name}/tasks/reorder | Reorder tasks -*AnsiblePlaybooksApi* | [**update_playbook_task**](docs/AnsiblePlaybooksApi.md#update_playbook_task) | **PUT** /v1/ansible/playbooks/{playbook_name}/tasks/{task_id} | Update task -*HealthApi* | [**get_health**](docs/HealthApi.md#get_health) | **GET** /v1/health | Health check -*SandboxApi* | [**create_sandbox**](docs/SandboxApi.md#create_sandbox) | **POST** /v1/sandboxes | Create a new sandbox -*SandboxApi* | [**create_snapshot**](docs/SandboxApi.md#create_snapshot) | **POST** /v1/sandboxes/{id}/snapshot | Create snapshot -*SandboxApi* | [**destroy_sandbox**](docs/SandboxApi.md#destroy_sandbox) | **DELETE** /v1/sandboxes/{id} | Destroy sandbox -*SandboxApi* | [**diff_snapshots**](docs/SandboxApi.md#diff_snapshots) | **POST** /v1/sandboxes/{id}/diff | Diff snapshots -*SandboxApi* | [**discover_sandbox_ip**](docs/SandboxApi.md#discover_sandbox_ip) | **GET** /v1/sandboxes/{id}/ip | Discover sandbox IP -*SandboxApi* | [**generate_configuration**](docs/SandboxApi.md#generate_configuration) | **POST** /v1/sandboxes/{id}/generate/{tool} | Generate configuration -*SandboxApi* | [**get_sandbox**](docs/SandboxApi.md#get_sandbox) | **GET** /v1/sandboxes/{id} | Get sandbox details -*SandboxApi* | [**inject_ssh_key**](docs/SandboxApi.md#inject_ssh_key) | **POST** /v1/sandboxes/{id}/sshkey | Inject SSH key into sandbox -*SandboxApi* | [**list_sandbox_commands**](docs/SandboxApi.md#list_sandbox_commands) | **GET** /v1/sandboxes/{id}/commands | List sandbox commands -*SandboxApi* | [**list_sandboxes**](docs/SandboxApi.md#list_sandboxes) | **GET** /v1/sandboxes | List sandboxes -*SandboxApi* | [**publish_changes**](docs/SandboxApi.md#publish_changes) | **POST** /v1/sandboxes/{id}/publish | Publish changes -*SandboxApi* | [**run_sandbox_command**](docs/SandboxApi.md#run_sandbox_command) | **POST** /v1/sandboxes/{id}/run | Run command in sandbox -*SandboxApi* | [**start_sandbox**](docs/SandboxApi.md#start_sandbox) | **POST** /v1/sandboxes/{id}/start | Start sandbox -*SandboxApi* | [**stream_sandbox_activity**](docs/SandboxApi.md#stream_sandbox_activity) | **GET** /v1/sandboxes/{id}/stream | Stream sandbox activity -*VMsApi* | [**list_virtual_machines**](docs/VMsApi.md#list_virtual_machines) | **GET** /v1/vms | List all host VMs +*AuthApi* | [**auth_github_callback_get**](docs/AuthApi.md#auth_github_callback_get) | **GET** /auth/github/callback | GitHub OAuth callback +*AuthApi* | [**auth_github_get**](docs/AuthApi.md#auth_github_get) | **GET** /auth/github | GitHub OAuth login +*AuthApi* | [**auth_google_callback_get**](docs/AuthApi.md#auth_google_callback_get) | **GET** /auth/google/callback | Google OAuth callback +*AuthApi* | [**auth_google_get**](docs/AuthApi.md#auth_google_get) | **GET** /auth/google | Google OAuth login +*AuthApi* | [**auth_login_post**](docs/AuthApi.md#auth_login_post) | **POST** /auth/login | Log in +*AuthApi* | [**auth_logout_post**](docs/AuthApi.md#auth_logout_post) | **POST** /auth/logout | Log out +*AuthApi* | [**auth_me_get**](docs/AuthApi.md#auth_me_get) | **GET** /auth/me | Get current user +*AuthApi* | [**auth_register_post**](docs/AuthApi.md#auth_register_post) | **POST** /auth/register | Register a new user +*BillingApi* | [**billing_calculator_post**](docs/BillingApi.md#billing_calculator_post) | **POST** /billing/calculator | Pricing calculator +*BillingApi* | [**orgs_slug_billing_get**](docs/BillingApi.md#orgs_slug_billing_get) | **GET** /orgs/{slug}/billing | Get billing info +*BillingApi* | [**orgs_slug_billing_portal_post**](docs/BillingApi.md#orgs_slug_billing_portal_post) | **POST** /orgs/{slug}/billing/portal | Billing portal +*BillingApi* | [**orgs_slug_billing_subscribe_post**](docs/BillingApi.md#orgs_slug_billing_subscribe_post) | **POST** /orgs/{slug}/billing/subscribe | Subscribe +*BillingApi* | [**orgs_slug_billing_usage_get**](docs/BillingApi.md#orgs_slug_billing_usage_get) | **GET** /orgs/{slug}/billing/usage | Get usage +*BillingApi* | [**webhooks_stripe_post**](docs/BillingApi.md#webhooks_stripe_post) | **POST** /webhooks/stripe | Stripe webhook +*HealthApi* | [**health_get**](docs/HealthApi.md#health_get) | **GET** /health | Health check +*HostTokensApi* | [**orgs_slug_hosts_tokens_get**](docs/HostTokensApi.md#orgs_slug_hosts_tokens_get) | **GET** /orgs/{slug}/hosts/tokens | List host tokens +*HostTokensApi* | [**orgs_slug_hosts_tokens_post**](docs/HostTokensApi.md#orgs_slug_hosts_tokens_post) | **POST** /orgs/{slug}/hosts/tokens | Create host token +*HostTokensApi* | [**orgs_slug_hosts_tokens_token_id_delete**](docs/HostTokensApi.md#orgs_slug_hosts_tokens_token_id_delete) | **DELETE** /orgs/{slug}/hosts/tokens/{tokenID} | Delete host token +*HostsApi* | [**orgs_slug_hosts_get**](docs/HostsApi.md#orgs_slug_hosts_get) | **GET** /orgs/{slug}/hosts | List hosts +*HostsApi* | [**orgs_slug_hosts_host_id_get**](docs/HostsApi.md#orgs_slug_hosts_host_id_get) | **GET** /orgs/{slug}/hosts/{hostID} | Get host +*MembersApi* | [**orgs_slug_members_get**](docs/MembersApi.md#orgs_slug_members_get) | **GET** /orgs/{slug}/members | List members +*MembersApi* | [**orgs_slug_members_member_id_delete**](docs/MembersApi.md#orgs_slug_members_member_id_delete) | **DELETE** /orgs/{slug}/members/{memberID} | Remove member +*MembersApi* | [**orgs_slug_members_post**](docs/MembersApi.md#orgs_slug_members_post) | **POST** /orgs/{slug}/members | Add member +*OrganizationsApi* | [**orgs_get**](docs/OrganizationsApi.md#orgs_get) | **GET** /orgs | List organizations +*OrganizationsApi* | [**orgs_post**](docs/OrganizationsApi.md#orgs_post) | **POST** /orgs | Create organization +*OrganizationsApi* | [**orgs_slug_delete**](docs/OrganizationsApi.md#orgs_slug_delete) | **DELETE** /orgs/{slug} | Delete organization +*OrganizationsApi* | [**orgs_slug_get**](docs/OrganizationsApi.md#orgs_slug_get) | **GET** /orgs/{slug} | Get organization +*OrganizationsApi* | [**orgs_slug_patch**](docs/OrganizationsApi.md#orgs_slug_patch) | **PATCH** /orgs/{slug} | Update organization +*SandboxesApi* | [**orgs_slug_sandboxes_get**](docs/SandboxesApi.md#orgs_slug_sandboxes_get) | **GET** /orgs/{slug}/sandboxes | List sandboxes +*SandboxesApi* | [**orgs_slug_sandboxes_post**](docs/SandboxesApi.md#orgs_slug_sandboxes_post) | **POST** /orgs/{slug}/sandboxes | Create sandbox +*SandboxesApi* | [**orgs_slug_sandboxes_sandbox_id_commands_get**](docs/SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_commands_get) | **GET** /orgs/{slug}/sandboxes/{sandboxID}/commands | List commands +*SandboxesApi* | [**orgs_slug_sandboxes_sandbox_id_delete**](docs/SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_delete) | **DELETE** /orgs/{slug}/sandboxes/{sandboxID} | Destroy sandbox +*SandboxesApi* | [**orgs_slug_sandboxes_sandbox_id_get**](docs/SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_get) | **GET** /orgs/{slug}/sandboxes/{sandboxID} | Get sandbox +*SandboxesApi* | [**orgs_slug_sandboxes_sandbox_id_run_post**](docs/SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_run_post) | **POST** /orgs/{slug}/sandboxes/{sandboxID}/run | Run command +*SandboxesApi* | [**orgs_slug_sandboxes_sandbox_id_snapshot_post**](docs/SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_snapshot_post) | **POST** /orgs/{slug}/sandboxes/{sandboxID}/snapshot | Create snapshot +*SandboxesApi* | [**orgs_slug_sandboxes_sandbox_id_start_post**](docs/SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_start_post) | **POST** /orgs/{slug}/sandboxes/{sandboxID}/start | Start sandbox +*SandboxesApi* | [**orgs_slug_sandboxes_sandbox_id_stop_post**](docs/SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_stop_post) | **POST** /orgs/{slug}/sandboxes/{sandboxID}/stop | Stop sandbox +*SandboxesApi* | [**orgs_slug_sandboxes_sandbox_idip_get**](docs/SandboxesApi.md#orgs_slug_sandboxes_sandbox_idip_get) | **GET** /orgs/{slug}/sandboxes/{sandboxID}/ip | Get sandbox IP +*SourceVMsApi* | [**orgs_slug_sources_vm_prepare_post**](docs/SourceVMsApi.md#orgs_slug_sources_vm_prepare_post) | **POST** /orgs/{slug}/sources/{vm}/prepare | Prepare source VM +*SourceVMsApi* | [**orgs_slug_sources_vm_read_post**](docs/SourceVMsApi.md#orgs_slug_sources_vm_read_post) | **POST** /orgs/{slug}/sources/{vm}/read | Read source file +*SourceVMsApi* | [**orgs_slug_sources_vm_run_post**](docs/SourceVMsApi.md#orgs_slug_sources_vm_run_post) | **POST** /orgs/{slug}/sources/{vm}/run | Run source command +*SourceVMsApi* | [**orgs_slug_vms_get**](docs/SourceVMsApi.md#orgs_slug_vms_get) | **GET** /orgs/{slug}/vms | List source VMs ## Documentation For Models - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleExportPlaybookResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleExportPlaybookResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleGetPlaybookResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleGetPlaybookResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleJob](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleJob.md) - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobStatus](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobStatus.md) - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleListPlaybooksResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleListPlaybooksResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleReorderTasksRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleReorderTasksRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalErrorErrorResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalErrorErrorResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestAccessErrorResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestAccessErrorResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestCaPublicKeyResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestCaPublicKeyResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestCertificateResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestCertificateResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestDestroySandboxResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestDestroySandboxResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestDiffRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalRestDiffRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestDiffResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestDiffResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestDiscoverIPResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestDiscoverIPResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestErrorResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestErrorResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestGenerateResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestGenerateResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestGetSandboxResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestGetSandboxResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestHostError](docs/GithubComAspectrrFluidShFluidRemoteInternalRestHostError.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestInjectSSHKeyRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalRestInjectSSHKeyRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestListCertificatesResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestListCertificatesResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxCommandsResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxCommandsResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxesResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxesResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestListSessionsResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestListSessionsResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestListVMsResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestListVMsResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestPublishRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalRestPublishRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestPublishResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestPublishResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestSandboxInfo](docs/GithubComAspectrrFluidShFluidRemoteInternalRestSandboxInfo.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestSessionResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestSessionResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxRequest](docs/GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxRequest.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxResponse](docs/GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxResponse.md) - - [GithubComAspectrrFluidShFluidRemoteInternalRestVmInfo](docs/GithubComAspectrrFluidShFluidRemoteInternalRestVmInfo.md) - - [GithubComAspectrrFluidShFluidRemoteInternalStoreChangeDiff](docs/GithubComAspectrrFluidShFluidRemoteInternalStoreChangeDiff.md) - - [GithubComAspectrrFluidShFluidRemoteInternalStoreCommand](docs/GithubComAspectrrFluidShFluidRemoteInternalStoreCommand.md) - - [GithubComAspectrrFluidShFluidRemoteInternalStoreCommandExecRecord](docs/GithubComAspectrrFluidShFluidRemoteInternalStoreCommandExecRecord.md) - - [GithubComAspectrrFluidShFluidRemoteInternalStoreCommandSummary](docs/GithubComAspectrrFluidShFluidRemoteInternalStoreCommandSummary.md) - - [GithubComAspectrrFluidShFluidRemoteInternalStoreDiff](docs/GithubComAspectrrFluidShFluidRemoteInternalStoreDiff.md) - - [GithubComAspectrrFluidShFluidRemoteInternalStorePackageInfo](docs/GithubComAspectrrFluidShFluidRemoteInternalStorePackageInfo.md) - - [GithubComAspectrrFluidShFluidRemoteInternalStorePlaybook](docs/GithubComAspectrrFluidShFluidRemoteInternalStorePlaybook.md) - - [GithubComAspectrrFluidShFluidRemoteInternalStorePlaybookTask](docs/GithubComAspectrrFluidShFluidRemoteInternalStorePlaybookTask.md) - - [GithubComAspectrrFluidShFluidRemoteInternalStoreSandbox](docs/GithubComAspectrrFluidShFluidRemoteInternalStoreSandbox.md) - - [GithubComAspectrrFluidShFluidRemoteInternalStoreSandboxState](docs/GithubComAspectrrFluidShFluidRemoteInternalStoreSandboxState.md) - - [GithubComAspectrrFluidShFluidRemoteInternalStoreServiceChange](docs/GithubComAspectrrFluidShFluidRemoteInternalStoreServiceChange.md) - - [GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshot](docs/GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshot.md) - - [GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshotKind](docs/GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshotKind.md) - - [InternalAnsibleAddTaskRequest](docs/InternalAnsibleAddTaskRequest.md) - - [InternalAnsibleAddTaskResponse](docs/InternalAnsibleAddTaskResponse.md) - - [InternalAnsibleCreatePlaybookRequest](docs/InternalAnsibleCreatePlaybookRequest.md) - - [InternalAnsibleCreatePlaybookResponse](docs/InternalAnsibleCreatePlaybookResponse.md) - - [InternalAnsibleExportPlaybookResponse](docs/InternalAnsibleExportPlaybookResponse.md) - - [InternalAnsibleGetPlaybookResponse](docs/InternalAnsibleGetPlaybookResponse.md) - - [InternalAnsibleJob](docs/InternalAnsibleJob.md) - - [InternalAnsibleJobRequest](docs/InternalAnsibleJobRequest.md) - - [InternalAnsibleJobResponse](docs/InternalAnsibleJobResponse.md) - - [InternalAnsibleJobStatus](docs/InternalAnsibleJobStatus.md) - - [InternalAnsibleListPlaybooksResponse](docs/InternalAnsibleListPlaybooksResponse.md) - - [InternalAnsibleReorderTasksRequest](docs/InternalAnsibleReorderTasksRequest.md) - - [InternalAnsibleUpdateTaskRequest](docs/InternalAnsibleUpdateTaskRequest.md) - - [InternalAnsibleUpdateTaskResponse](docs/InternalAnsibleUpdateTaskResponse.md) - - [InternalRestAccessErrorResponse](docs/InternalRestAccessErrorResponse.md) - - [InternalRestCaPublicKeyResponse](docs/InternalRestCaPublicKeyResponse.md) - - [InternalRestCertificateResponse](docs/InternalRestCertificateResponse.md) - - [InternalRestCreateSandboxRequest](docs/InternalRestCreateSandboxRequest.md) - - [InternalRestCreateSandboxResponse](docs/InternalRestCreateSandboxResponse.md) - - [InternalRestDestroySandboxResponse](docs/InternalRestDestroySandboxResponse.md) - - [InternalRestDiffRequest](docs/InternalRestDiffRequest.md) - - [InternalRestDiffResponse](docs/InternalRestDiffResponse.md) - - [InternalRestDiscoverIPResponse](docs/InternalRestDiscoverIPResponse.md) - - [InternalRestErrorResponse](docs/InternalRestErrorResponse.md) - - [InternalRestGenerateResponse](docs/InternalRestGenerateResponse.md) - - [InternalRestGetSandboxResponse](docs/InternalRestGetSandboxResponse.md) - - [InternalRestHealthResponse](docs/InternalRestHealthResponse.md) - - [InternalRestHostError](docs/InternalRestHostError.md) - - [InternalRestInjectSSHKeyRequest](docs/InternalRestInjectSSHKeyRequest.md) - - [InternalRestListCertificatesResponse](docs/InternalRestListCertificatesResponse.md) - - [InternalRestListSandboxCommandsResponse](docs/InternalRestListSandboxCommandsResponse.md) - - [InternalRestListSandboxesResponse](docs/InternalRestListSandboxesResponse.md) - - [InternalRestListSessionsResponse](docs/InternalRestListSessionsResponse.md) - - [InternalRestListVMsResponse](docs/InternalRestListVMsResponse.md) - - [InternalRestPublishRequest](docs/InternalRestPublishRequest.md) - - [InternalRestPublishResponse](docs/InternalRestPublishResponse.md) - - [InternalRestRequestAccessRequest](docs/InternalRestRequestAccessRequest.md) - - [InternalRestRequestAccessResponse](docs/InternalRestRequestAccessResponse.md) - - [InternalRestRevokeCertificateRequest](docs/InternalRestRevokeCertificateRequest.md) - - [InternalRestRevokeCertificateResponse](docs/InternalRestRevokeCertificateResponse.md) - - [InternalRestRunCommandRequest](docs/InternalRestRunCommandRequest.md) - - [InternalRestRunCommandResponse](docs/InternalRestRunCommandResponse.md) - - [InternalRestSandboxInfo](docs/InternalRestSandboxInfo.md) - - [InternalRestSessionEndRequest](docs/InternalRestSessionEndRequest.md) - - [InternalRestSessionEndResponse](docs/InternalRestSessionEndResponse.md) - - [InternalRestSessionResponse](docs/InternalRestSessionResponse.md) - - [InternalRestSessionStartRequest](docs/InternalRestSessionStartRequest.md) - - [InternalRestSessionStartResponse](docs/InternalRestSessionStartResponse.md) - - [InternalRestSnapshotRequest](docs/InternalRestSnapshotRequest.md) - - [InternalRestSnapshotResponse](docs/InternalRestSnapshotResponse.md) - - [InternalRestStartSandboxRequest](docs/InternalRestStartSandboxRequest.md) - - [InternalRestStartSandboxResponse](docs/InternalRestStartSandboxResponse.md) - - [InternalRestVmInfo](docs/InternalRestVmInfo.md) - - [TimeDuration](docs/TimeDuration.md) + - [OrchestratorCreateSandboxRequest](docs/OrchestratorCreateSandboxRequest.md) + - [OrchestratorHostInfo](docs/OrchestratorHostInfo.md) + - [OrchestratorPrepareRequest](docs/OrchestratorPrepareRequest.md) + - [OrchestratorReadSourceRequest](docs/OrchestratorReadSourceRequest.md) + - [OrchestratorRunCommandRequest](docs/OrchestratorRunCommandRequest.md) + - [OrchestratorRunSourceRequest](docs/OrchestratorRunSourceRequest.md) + - [OrchestratorSnapshotRequest](docs/OrchestratorSnapshotRequest.md) + - [OrchestratorSnapshotResponse](docs/OrchestratorSnapshotResponse.md) + - [OrchestratorSourceCommandResult](docs/OrchestratorSourceCommandResult.md) + - [OrchestratorSourceFileResult](docs/OrchestratorSourceFileResult.md) + - [RestAddMemberRequest](docs/RestAddMemberRequest.md) + - [RestAuthResponse](docs/RestAuthResponse.md) + - [RestBillingResponse](docs/RestBillingResponse.md) + - [RestCalculatorRequest](docs/RestCalculatorRequest.md) + - [RestCalculatorResponse](docs/RestCalculatorResponse.md) + - [RestCreateHostTokenRequest](docs/RestCreateHostTokenRequest.md) + - [RestCreateOrgRequest](docs/RestCreateOrgRequest.md) + - [RestFreeTierInfo](docs/RestFreeTierInfo.md) + - [RestHostTokenResponse](docs/RestHostTokenResponse.md) + - [RestLoginRequest](docs/RestLoginRequest.md) + - [RestMemberResponse](docs/RestMemberResponse.md) + - [RestOrgResponse](docs/RestOrgResponse.md) + - [RestRegisterRequest](docs/RestRegisterRequest.md) + - [RestSwaggerError](docs/RestSwaggerError.md) + - [RestUpdateOrgRequest](docs/RestUpdateOrgRequest.md) + - [RestUsageSummary](docs/RestUsageSummary.md) + - [RestUserResponse](docs/RestUserResponse.md) + - [StoreCommand](docs/StoreCommand.md) + - [StoreSandbox](docs/StoreSandbox.md) + - [StoreSandboxState](docs/StoreSandboxState.md) @@ -316,7 +229,3 @@ Endpoints do not require authorization. ## Author - - - - diff --git a/sdk/fluid-py/docs/AuthApi.md b/sdk/fluid-py/docs/AuthApi.md new file mode 100644 index 00000000..708cd87a --- /dev/null +++ b/sdk/fluid-py/docs/AuthApi.md @@ -0,0 +1,546 @@ +# fluid.AuthApi + +All URIs are relative to *http://localhost:8081/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**auth_github_callback_get**](AuthApi.md#auth_github_callback_get) | **GET** /auth/github/callback | GitHub OAuth callback +[**auth_github_get**](AuthApi.md#auth_github_get) | **GET** /auth/github | GitHub OAuth login +[**auth_google_callback_get**](AuthApi.md#auth_google_callback_get) | **GET** /auth/google/callback | Google OAuth callback +[**auth_google_get**](AuthApi.md#auth_google_get) | **GET** /auth/google | Google OAuth login +[**auth_login_post**](AuthApi.md#auth_login_post) | **POST** /auth/login | Log in +[**auth_logout_post**](AuthApi.md#auth_logout_post) | **POST** /auth/logout | Log out +[**auth_me_get**](AuthApi.md#auth_me_get) | **GET** /auth/me | Get current user +[**auth_register_post**](AuthApi.md#auth_register_post) | **POST** /auth/register | Register a new user + + +# **auth_github_callback_get** +> auth_github_callback_get(code) + +GitHub OAuth callback + +Handle GitHub OAuth callback, create or link user, set session cookie, and redirect to dashboard + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.AuthApi(api_client) + code = 'code_example' # str | OAuth authorization code + + try: + # GitHub OAuth callback + api_instance.auth_github_callback_get(code) + except Exception as e: + print("Exception when calling AuthApi->auth_github_callback_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **code** | **str**| OAuth authorization code | + +### Return type + +void (empty response body) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: */* + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**302** | Redirect to dashboard | - | +**400** | Bad Request | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **auth_github_get** +> auth_github_get() + +GitHub OAuth login + +Redirect to GitHub OAuth authorization page + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.AuthApi(api_client) + + try: + # GitHub OAuth login + api_instance.auth_github_get() + except Exception as e: + print("Exception when calling AuthApi->auth_github_get: %s\n" % e) +``` + + + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +void (empty response body) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: */* + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**302** | Redirect to GitHub | - | +**501** | Not Implemented | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **auth_google_callback_get** +> auth_google_callback_get(code) + +Google OAuth callback + +Handle Google OAuth callback, create or link user, set session cookie, and redirect to dashboard + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.AuthApi(api_client) + code = 'code_example' # str | OAuth authorization code + + try: + # Google OAuth callback + api_instance.auth_google_callback_get(code) + except Exception as e: + print("Exception when calling AuthApi->auth_google_callback_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **code** | **str**| OAuth authorization code | + +### Return type + +void (empty response body) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: */* + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**302** | Redirect to dashboard | - | +**400** | Bad Request | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **auth_google_get** +> auth_google_get() + +Google OAuth login + +Redirect to Google OAuth authorization page + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.AuthApi(api_client) + + try: + # Google OAuth login + api_instance.auth_google_get() + except Exception as e: + print("Exception when calling AuthApi->auth_google_get: %s\n" % e) +``` + + + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +void (empty response body) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: */* + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**302** | Redirect to Google | - | +**501** | Not Implemented | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **auth_login_post** +> RestAuthResponse auth_login_post(request) + +Log in + +Authenticate with email and password, returns a session cookie + +### Example + + +```python +import fluid +from fluid.models.rest_auth_response import RestAuthResponse +from fluid.models.rest_login_request import RestLoginRequest +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.AuthApi(api_client) + request = fluid.RestLoginRequest() # RestLoginRequest | Login credentials + + try: + # Log in + api_response = api_instance.auth_login_post(request) + print("The response of AuthApi->auth_login_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling AuthApi->auth_login_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **request** | [**RestLoginRequest**](RestLoginRequest.md)| Login credentials | + +### Return type + +[**RestAuthResponse**](RestAuthResponse.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**400** | Bad Request | - | +**401** | Unauthorized | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **auth_logout_post** +> Dict[str, str] auth_logout_post() + +Log out + +Invalidate the current session and clear the session cookie + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.AuthApi(api_client) + + try: + # Log out + api_response = api_instance.auth_logout_post() + print("The response of AuthApi->auth_logout_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling AuthApi->auth_logout_post: %s\n" % e) +``` + + + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +**Dict[str, str]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **auth_me_get** +> RestAuthResponse auth_me_get() + +Get current user + +Return the currently authenticated user + +### Example + + +```python +import fluid +from fluid.models.rest_auth_response import RestAuthResponse +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.AuthApi(api_client) + + try: + # Get current user + api_response = api_instance.auth_me_get() + print("The response of AuthApi->auth_me_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling AuthApi->auth_me_get: %s\n" % e) +``` + + + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +[**RestAuthResponse**](RestAuthResponse.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**401** | Unauthorized | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **auth_register_post** +> RestAuthResponse auth_register_post(request) + +Register a new user + +Create a new user account and return a session cookie + +### Example + + +```python +import fluid +from fluid.models.rest_auth_response import RestAuthResponse +from fluid.models.rest_register_request import RestRegisterRequest +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.AuthApi(api_client) + request = fluid.RestRegisterRequest() # RestRegisterRequest | Registration details + + try: + # Register a new user + api_response = api_instance.auth_register_post(request) + print("The response of AuthApi->auth_register_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling AuthApi->auth_register_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **request** | [**RestRegisterRequest**](RestRegisterRequest.md)| Registration details | + +### Return type + +[**RestAuthResponse**](RestAuthResponse.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**201** | Created | - | +**400** | Bad Request | - | +**409** | Conflict | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdk/fluid-py/docs/BillingApi.md b/sdk/fluid-py/docs/BillingApi.md new file mode 100644 index 00000000..16a61fa6 --- /dev/null +++ b/sdk/fluid-py/docs/BillingApi.md @@ -0,0 +1,428 @@ +# fluid.BillingApi + +All URIs are relative to *http://localhost:8081/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**billing_calculator_post**](BillingApi.md#billing_calculator_post) | **POST** /billing/calculator | Pricing calculator +[**orgs_slug_billing_get**](BillingApi.md#orgs_slug_billing_get) | **GET** /orgs/{slug}/billing | Get billing info +[**orgs_slug_billing_portal_post**](BillingApi.md#orgs_slug_billing_portal_post) | **POST** /orgs/{slug}/billing/portal | Billing portal +[**orgs_slug_billing_subscribe_post**](BillingApi.md#orgs_slug_billing_subscribe_post) | **POST** /orgs/{slug}/billing/subscribe | Subscribe +[**orgs_slug_billing_usage_get**](BillingApi.md#orgs_slug_billing_usage_get) | **GET** /orgs/{slug}/billing/usage | Get usage +[**webhooks_stripe_post**](BillingApi.md#webhooks_stripe_post) | **POST** /webhooks/stripe | Stripe webhook + + +# **billing_calculator_post** +> RestCalculatorResponse billing_calculator_post(request) + +Pricing calculator + +Calculate estimated monthly costs based on resource usage + +### Example + + +```python +import fluid +from fluid.models.rest_calculator_request import RestCalculatorRequest +from fluid.models.rest_calculator_response import RestCalculatorResponse +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.BillingApi(api_client) + request = fluid.RestCalculatorRequest() # RestCalculatorRequest | Resource quantities + + try: + # Pricing calculator + api_response = api_instance.billing_calculator_post(request) + print("The response of BillingApi->billing_calculator_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling BillingApi->billing_calculator_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **request** | [**RestCalculatorRequest**](RestCalculatorRequest.md)| Resource quantities | + +### Return type + +[**RestCalculatorResponse**](RestCalculatorResponse.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**400** | Bad Request | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_billing_get** +> RestBillingResponse orgs_slug_billing_get(slug) + +Get billing info + +Get the current billing plan, status, and usage summary for an organization + +### Example + + +```python +import fluid +from fluid.models.rest_billing_response import RestBillingResponse +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.BillingApi(api_client) + slug = 'slug_example' # str | Organization slug + + try: + # Get billing info + api_response = api_instance.orgs_slug_billing_get(slug) + print("The response of BillingApi->orgs_slug_billing_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling BillingApi->orgs_slug_billing_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + +### Return type + +[**RestBillingResponse**](RestBillingResponse.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_billing_portal_post** +> Dict[str, str] orgs_slug_billing_portal_post(slug) + +Billing portal + +Create a Stripe billing portal session (owner only) + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.BillingApi(api_client) + slug = 'slug_example' # str | Organization slug + + try: + # Billing portal + api_response = api_instance.orgs_slug_billing_portal_post(slug) + print("The response of BillingApi->orgs_slug_billing_portal_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling BillingApi->orgs_slug_billing_portal_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + +### Return type + +**Dict[str, str]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_billing_subscribe_post** +> Dict[str, str] orgs_slug_billing_subscribe_post(slug) + +Subscribe + +Create a Stripe checkout session for the organization (owner only) + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.BillingApi(api_client) + slug = 'slug_example' # str | Organization slug + + try: + # Subscribe + api_response = api_instance.orgs_slug_billing_subscribe_post(slug) + print("The response of BillingApi->orgs_slug_billing_subscribe_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling BillingApi->orgs_slug_billing_subscribe_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + +### Return type + +**Dict[str, str]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_billing_usage_get** +> Dict[str, object] orgs_slug_billing_usage_get(slug) + +Get usage + +Get current month usage records for the organization + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.BillingApi(api_client) + slug = 'slug_example' # str | Organization slug + + try: + # Get usage + api_response = api_instance.orgs_slug_billing_usage_get(slug) + print("The response of BillingApi->orgs_slug_billing_usage_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling BillingApi->orgs_slug_billing_usage_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + +### Return type + +**Dict[str, object]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **webhooks_stripe_post** +> Dict[str, str] webhooks_stripe_post() + +Stripe webhook + +Handle incoming Stripe webhook events + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.BillingApi(api_client) + + try: + # Stripe webhook + api_response = api_instance.webhooks_stripe_post() + print("The response of BillingApi->webhooks_stripe_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling BillingApi->webhooks_stripe_post: %s\n" % e) +``` + + + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +**Dict[str, str]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdk/fluid-py/docs/HealthApi.md b/sdk/fluid-py/docs/HealthApi.md index b59f87be..278c5ec7 100644 --- a/sdk/fluid-py/docs/HealthApi.md +++ b/sdk/fluid-py/docs/HealthApi.md @@ -1,32 +1,31 @@ # fluid.HealthApi -All URIs are relative to *http://localhost* +All URIs are relative to *http://localhost:8081/v1* Method | HTTP request | Description ------------- | ------------- | ------------- -[**get_health**](HealthApi.md#get_health) | **GET** /v1/health | Health check +[**health_get**](HealthApi.md#health_get) | **GET** /health | Health check -# **get_health** -> GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse get_health() +# **health_get** +> Dict[str, str] health_get() Health check -Returns service health status +Returns API health status ### Example ```python import fluid -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_health_response import GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse from fluid.rest import ApiException from pprint import pprint -# Defining the host is optional and defaults to http://localhost +# Defining the host is optional and defaults to http://localhost:8081/v1 # See configuration.py for a list of all supported configuration parameters. configuration = fluid.Configuration( - host = "http://localhost" + host = "http://localhost:8081/v1" ) @@ -37,11 +36,11 @@ with fluid.ApiClient(configuration) as api_client: try: # Health check - api_response = api_instance.get_health() - print("The response of HealthApi->get_health:\n") + api_response = api_instance.health_get() + print("The response of HealthApi->health_get:\n") pprint(api_response) except Exception as e: - print("Exception when calling HealthApi->get_health: %s\n" % e) + print("Exception when calling HealthApi->health_get: %s\n" % e) ``` @@ -52,7 +51,7 @@ This endpoint does not need any parameter. ### Return type -[**GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse**](GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse.md) +**Dict[str, str]** ### Authorization diff --git a/sdk/fluid-py/docs/HostTokensApi.md b/sdk/fluid-py/docs/HostTokensApi.md new file mode 100644 index 00000000..d1343b05 --- /dev/null +++ b/sdk/fluid-py/docs/HostTokensApi.md @@ -0,0 +1,227 @@ +# fluid.HostTokensApi + +All URIs are relative to *http://localhost:8081/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**orgs_slug_hosts_tokens_get**](HostTokensApi.md#orgs_slug_hosts_tokens_get) | **GET** /orgs/{slug}/hosts/tokens | List host tokens +[**orgs_slug_hosts_tokens_post**](HostTokensApi.md#orgs_slug_hosts_tokens_post) | **POST** /orgs/{slug}/hosts/tokens | Create host token +[**orgs_slug_hosts_tokens_token_id_delete**](HostTokensApi.md#orgs_slug_hosts_tokens_token_id_delete) | **DELETE** /orgs/{slug}/hosts/tokens/{tokenID} | Delete host token + + +# **orgs_slug_hosts_tokens_get** +> Dict[str, object] orgs_slug_hosts_tokens_get(slug) + +List host tokens + +List all host tokens for the organization + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.HostTokensApi(api_client) + slug = 'slug_example' # str | Organization slug + + try: + # List host tokens + api_response = api_instance.orgs_slug_hosts_tokens_get(slug) + print("The response of HostTokensApi->orgs_slug_hosts_tokens_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling HostTokensApi->orgs_slug_hosts_tokens_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + +### Return type + +**Dict[str, object]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_hosts_tokens_post** +> RestHostTokenResponse orgs_slug_hosts_tokens_post(slug, request) + +Create host token + +Generate a new host authentication token (owner or admin only) + +### Example + + +```python +import fluid +from fluid.models.rest_create_host_token_request import RestCreateHostTokenRequest +from fluid.models.rest_host_token_response import RestHostTokenResponse +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.HostTokensApi(api_client) + slug = 'slug_example' # str | Organization slug + request = fluid.RestCreateHostTokenRequest() # RestCreateHostTokenRequest | Token details + + try: + # Create host token + api_response = api_instance.orgs_slug_hosts_tokens_post(slug, request) + print("The response of HostTokensApi->orgs_slug_hosts_tokens_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling HostTokensApi->orgs_slug_hosts_tokens_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **request** | [**RestCreateHostTokenRequest**](RestCreateHostTokenRequest.md)| Token details | + +### Return type + +[**RestHostTokenResponse**](RestHostTokenResponse.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**201** | Created | - | +**400** | Bad Request | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_hosts_tokens_token_id_delete** +> Dict[str, str] orgs_slug_hosts_tokens_token_id_delete(slug, token_id) + +Delete host token + +Delete a host token (owner or admin only) + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.HostTokensApi(api_client) + slug = 'slug_example' # str | Organization slug + token_id = 'token_id_example' # str | Token ID + + try: + # Delete host token + api_response = api_instance.orgs_slug_hosts_tokens_token_id_delete(slug, token_id) + print("The response of HostTokensApi->orgs_slug_hosts_tokens_token_id_delete:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling HostTokensApi->orgs_slug_hosts_tokens_token_id_delete: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **token_id** | **str**| Token ID | + +### Return type + +**Dict[str, str]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdk/fluid-py/docs/HostsApi.md b/sdk/fluid-py/docs/HostsApi.md new file mode 100644 index 00000000..1865e85f --- /dev/null +++ b/sdk/fluid-py/docs/HostsApi.md @@ -0,0 +1,152 @@ +# fluid.HostsApi + +All URIs are relative to *http://localhost:8081/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**orgs_slug_hosts_get**](HostsApi.md#orgs_slug_hosts_get) | **GET** /orgs/{slug}/hosts | List hosts +[**orgs_slug_hosts_host_id_get**](HostsApi.md#orgs_slug_hosts_host_id_get) | **GET** /orgs/{slug}/hosts/{hostID} | Get host + + +# **orgs_slug_hosts_get** +> Dict[str, object] orgs_slug_hosts_get(slug) + +List hosts + +List all connected sandbox hosts + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.HostsApi(api_client) + slug = 'slug_example' # str | Organization slug + + try: + # List hosts + api_response = api_instance.orgs_slug_hosts_get(slug) + print("The response of HostsApi->orgs_slug_hosts_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling HostsApi->orgs_slug_hosts_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + +### Return type + +**Dict[str, object]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_hosts_host_id_get** +> OrchestratorHostInfo orgs_slug_hosts_host_id_get(slug, host_id) + +Get host + +Get details of a specific connected host + +### Example + + +```python +import fluid +from fluid.models.orchestrator_host_info import OrchestratorHostInfo +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.HostsApi(api_client) + slug = 'slug_example' # str | Organization slug + host_id = 'host_id_example' # str | Host ID + + try: + # Get host + api_response = api_instance.orgs_slug_hosts_host_id_get(slug, host_id) + print("The response of HostsApi->orgs_slug_hosts_host_id_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling HostsApi->orgs_slug_hosts_host_id_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **host_id** | **str**| Host ID | + +### Return type + +[**OrchestratorHostInfo**](OrchestratorHostInfo.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdk/fluid-py/docs/MembersApi.md b/sdk/fluid-py/docs/MembersApi.md new file mode 100644 index 00000000..726e2252 --- /dev/null +++ b/sdk/fluid-py/docs/MembersApi.md @@ -0,0 +1,229 @@ +# fluid.MembersApi + +All URIs are relative to *http://localhost:8081/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**orgs_slug_members_get**](MembersApi.md#orgs_slug_members_get) | **GET** /orgs/{slug}/members | List members +[**orgs_slug_members_member_id_delete**](MembersApi.md#orgs_slug_members_member_id_delete) | **DELETE** /orgs/{slug}/members/{memberID} | Remove member +[**orgs_slug_members_post**](MembersApi.md#orgs_slug_members_post) | **POST** /orgs/{slug}/members | Add member + + +# **orgs_slug_members_get** +> Dict[str, object] orgs_slug_members_get(slug) + +List members + +List all members of an organization + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.MembersApi(api_client) + slug = 'slug_example' # str | Organization slug + + try: + # List members + api_response = api_instance.orgs_slug_members_get(slug) + print("The response of MembersApi->orgs_slug_members_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling MembersApi->orgs_slug_members_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + +### Return type + +**Dict[str, object]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_members_member_id_delete** +> Dict[str, str] orgs_slug_members_member_id_delete(slug, member_id) + +Remove member + +Remove a member from an organization (owner or admin only) + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.MembersApi(api_client) + slug = 'slug_example' # str | Organization slug + member_id = 'member_id_example' # str | Member ID + + try: + # Remove member + api_response = api_instance.orgs_slug_members_member_id_delete(slug, member_id) + print("The response of MembersApi->orgs_slug_members_member_id_delete:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling MembersApi->orgs_slug_members_member_id_delete: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **member_id** | **str**| Member ID | + +### Return type + +**Dict[str, str]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_members_post** +> RestMemberResponse orgs_slug_members_post(slug, request) + +Add member + +Add a user to an organization (owner or admin only) + +### Example + + +```python +import fluid +from fluid.models.rest_add_member_request import RestAddMemberRequest +from fluid.models.rest_member_response import RestMemberResponse +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.MembersApi(api_client) + slug = 'slug_example' # str | Organization slug + request = fluid.RestAddMemberRequest() # RestAddMemberRequest | Member details + + try: + # Add member + api_response = api_instance.orgs_slug_members_post(slug, request) + print("The response of MembersApi->orgs_slug_members_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling MembersApi->orgs_slug_members_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **request** | [**RestAddMemberRequest**](RestAddMemberRequest.md)| Member details | + +### Return type + +[**RestMemberResponse**](RestMemberResponse.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**201** | Created | - | +**400** | Bad Request | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**409** | Conflict | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdk/fluid-py/docs/OrchestratorCreateSandboxRequest.md b/sdk/fluid-py/docs/OrchestratorCreateSandboxRequest.md new file mode 100644 index 00000000..7ecc3843 --- /dev/null +++ b/sdk/fluid-py/docs/OrchestratorCreateSandboxRequest.md @@ -0,0 +1,37 @@ +# OrchestratorCreateSandboxRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**agent_id** | **str** | | [optional] +**base_image** | **str** | | [optional] +**memory_mb** | **int** | | [optional] +**name** | **str** | | [optional] +**network** | **str** | | [optional] +**org_id** | **str** | | [optional] +**source_vm** | **str** | | [optional] +**ttl_seconds** | **int** | | [optional] +**vcpus** | **int** | | [optional] + +## Example + +```python +from fluid.models.orchestrator_create_sandbox_request import OrchestratorCreateSandboxRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of OrchestratorCreateSandboxRequest from a JSON string +orchestrator_create_sandbox_request_instance = OrchestratorCreateSandboxRequest.from_json(json) +# print the JSON string representation of the object +print(OrchestratorCreateSandboxRequest.to_json()) + +# convert the object into a dict +orchestrator_create_sandbox_request_dict = orchestrator_create_sandbox_request_instance.to_dict() +# create an instance of OrchestratorCreateSandboxRequest from a dict +orchestrator_create_sandbox_request_from_dict = OrchestratorCreateSandboxRequest.from_dict(orchestrator_create_sandbox_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/OrchestratorHostInfo.md b/sdk/fluid-py/docs/OrchestratorHostInfo.md new file mode 100644 index 00000000..59421c4c --- /dev/null +++ b/sdk/fluid-py/docs/OrchestratorHostInfo.md @@ -0,0 +1,37 @@ +# OrchestratorHostInfo + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**active_sandboxes** | **int** | | [optional] +**available_cpus** | **int** | | [optional] +**available_disk_mb** | **int** | | [optional] +**available_memory_mb** | **int** | | [optional] +**base_images** | **List[str]** | | [optional] +**host_id** | **str** | | [optional] +**hostname** | **str** | | [optional] +**last_heartbeat** | **str** | | [optional] +**status** | **str** | | [optional] + +## Example + +```python +from fluid.models.orchestrator_host_info import OrchestratorHostInfo + +# TODO update the JSON string below +json = "{}" +# create an instance of OrchestratorHostInfo from a JSON string +orchestrator_host_info_instance = OrchestratorHostInfo.from_json(json) +# print the JSON string representation of the object +print(OrchestratorHostInfo.to_json()) + +# convert the object into a dict +orchestrator_host_info_dict = orchestrator_host_info_instance.to_dict() +# create an instance of OrchestratorHostInfo from a dict +orchestrator_host_info_from_dict = OrchestratorHostInfo.from_dict(orchestrator_host_info_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/OrchestratorPrepareRequest.md b/sdk/fluid-py/docs/OrchestratorPrepareRequest.md new file mode 100644 index 00000000..2ae01a8a --- /dev/null +++ b/sdk/fluid-py/docs/OrchestratorPrepareRequest.md @@ -0,0 +1,30 @@ +# OrchestratorPrepareRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**ssh_key_path** | **str** | | [optional] +**ssh_user** | **str** | | [optional] + +## Example + +```python +from fluid.models.orchestrator_prepare_request import OrchestratorPrepareRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of OrchestratorPrepareRequest from a JSON string +orchestrator_prepare_request_instance = OrchestratorPrepareRequest.from_json(json) +# print the JSON string representation of the object +print(OrchestratorPrepareRequest.to_json()) + +# convert the object into a dict +orchestrator_prepare_request_dict = orchestrator_prepare_request_instance.to_dict() +# create an instance of OrchestratorPrepareRequest from a dict +orchestrator_prepare_request_from_dict = OrchestratorPrepareRequest.from_dict(orchestrator_prepare_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/OrchestratorReadSourceRequest.md b/sdk/fluid-py/docs/OrchestratorReadSourceRequest.md new file mode 100644 index 00000000..a57318e9 --- /dev/null +++ b/sdk/fluid-py/docs/OrchestratorReadSourceRequest.md @@ -0,0 +1,29 @@ +# OrchestratorReadSourceRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**path** | **str** | | [optional] + +## Example + +```python +from fluid.models.orchestrator_read_source_request import OrchestratorReadSourceRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of OrchestratorReadSourceRequest from a JSON string +orchestrator_read_source_request_instance = OrchestratorReadSourceRequest.from_json(json) +# print the JSON string representation of the object +print(OrchestratorReadSourceRequest.to_json()) + +# convert the object into a dict +orchestrator_read_source_request_dict = orchestrator_read_source_request_instance.to_dict() +# create an instance of OrchestratorReadSourceRequest from a dict +orchestrator_read_source_request_from_dict = OrchestratorReadSourceRequest.from_dict(orchestrator_read_source_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/OrchestratorRunCommandRequest.md b/sdk/fluid-py/docs/OrchestratorRunCommandRequest.md new file mode 100644 index 00000000..65a0bd63 --- /dev/null +++ b/sdk/fluid-py/docs/OrchestratorRunCommandRequest.md @@ -0,0 +1,31 @@ +# OrchestratorRunCommandRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**command** | **str** | | [optional] +**env** | **Dict[str, str]** | | [optional] +**timeout_seconds** | **int** | | [optional] + +## Example + +```python +from fluid.models.orchestrator_run_command_request import OrchestratorRunCommandRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of OrchestratorRunCommandRequest from a JSON string +orchestrator_run_command_request_instance = OrchestratorRunCommandRequest.from_json(json) +# print the JSON string representation of the object +print(OrchestratorRunCommandRequest.to_json()) + +# convert the object into a dict +orchestrator_run_command_request_dict = orchestrator_run_command_request_instance.to_dict() +# create an instance of OrchestratorRunCommandRequest from a dict +orchestrator_run_command_request_from_dict = OrchestratorRunCommandRequest.from_dict(orchestrator_run_command_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/OrchestratorRunSourceRequest.md b/sdk/fluid-py/docs/OrchestratorRunSourceRequest.md new file mode 100644 index 00000000..cacf45c4 --- /dev/null +++ b/sdk/fluid-py/docs/OrchestratorRunSourceRequest.md @@ -0,0 +1,30 @@ +# OrchestratorRunSourceRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**command** | **str** | | [optional] +**timeout_seconds** | **int** | | [optional] + +## Example + +```python +from fluid.models.orchestrator_run_source_request import OrchestratorRunSourceRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of OrchestratorRunSourceRequest from a JSON string +orchestrator_run_source_request_instance = OrchestratorRunSourceRequest.from_json(json) +# print the JSON string representation of the object +print(OrchestratorRunSourceRequest.to_json()) + +# convert the object into a dict +orchestrator_run_source_request_dict = orchestrator_run_source_request_instance.to_dict() +# create an instance of OrchestratorRunSourceRequest from a dict +orchestrator_run_source_request_from_dict = OrchestratorRunSourceRequest.from_dict(orchestrator_run_source_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/OrchestratorSnapshotRequest.md b/sdk/fluid-py/docs/OrchestratorSnapshotRequest.md new file mode 100644 index 00000000..d2b854ba --- /dev/null +++ b/sdk/fluid-py/docs/OrchestratorSnapshotRequest.md @@ -0,0 +1,29 @@ +# OrchestratorSnapshotRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | | [optional] + +## Example + +```python +from fluid.models.orchestrator_snapshot_request import OrchestratorSnapshotRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of OrchestratorSnapshotRequest from a JSON string +orchestrator_snapshot_request_instance = OrchestratorSnapshotRequest.from_json(json) +# print the JSON string representation of the object +print(OrchestratorSnapshotRequest.to_json()) + +# convert the object into a dict +orchestrator_snapshot_request_dict = orchestrator_snapshot_request_instance.to_dict() +# create an instance of OrchestratorSnapshotRequest from a dict +orchestrator_snapshot_request_from_dict = OrchestratorSnapshotRequest.from_dict(orchestrator_snapshot_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/OrchestratorSnapshotResponse.md b/sdk/fluid-py/docs/OrchestratorSnapshotResponse.md new file mode 100644 index 00000000..2ede931f --- /dev/null +++ b/sdk/fluid-py/docs/OrchestratorSnapshotResponse.md @@ -0,0 +1,32 @@ +# OrchestratorSnapshotResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**created_at** | **str** | | [optional] +**sandbox_id** | **str** | | [optional] +**snapshot_id** | **str** | | [optional] +**snapshot_name** | **str** | | [optional] + +## Example + +```python +from fluid.models.orchestrator_snapshot_response import OrchestratorSnapshotResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of OrchestratorSnapshotResponse from a JSON string +orchestrator_snapshot_response_instance = OrchestratorSnapshotResponse.from_json(json) +# print the JSON string representation of the object +print(OrchestratorSnapshotResponse.to_json()) + +# convert the object into a dict +orchestrator_snapshot_response_dict = orchestrator_snapshot_response_instance.to_dict() +# create an instance of OrchestratorSnapshotResponse from a dict +orchestrator_snapshot_response_from_dict = OrchestratorSnapshotResponse.from_dict(orchestrator_snapshot_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/OrchestratorSourceCommandResult.md b/sdk/fluid-py/docs/OrchestratorSourceCommandResult.md new file mode 100644 index 00000000..80f2fb94 --- /dev/null +++ b/sdk/fluid-py/docs/OrchestratorSourceCommandResult.md @@ -0,0 +1,32 @@ +# OrchestratorSourceCommandResult + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**exit_code** | **int** | | [optional] +**source_vm** | **str** | | [optional] +**stderr** | **str** | | [optional] +**stdout** | **str** | | [optional] + +## Example + +```python +from fluid.models.orchestrator_source_command_result import OrchestratorSourceCommandResult + +# TODO update the JSON string below +json = "{}" +# create an instance of OrchestratorSourceCommandResult from a JSON string +orchestrator_source_command_result_instance = OrchestratorSourceCommandResult.from_json(json) +# print the JSON string representation of the object +print(OrchestratorSourceCommandResult.to_json()) + +# convert the object into a dict +orchestrator_source_command_result_dict = orchestrator_source_command_result_instance.to_dict() +# create an instance of OrchestratorSourceCommandResult from a dict +orchestrator_source_command_result_from_dict = OrchestratorSourceCommandResult.from_dict(orchestrator_source_command_result_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/OrchestratorSourceFileResult.md b/sdk/fluid-py/docs/OrchestratorSourceFileResult.md new file mode 100644 index 00000000..8603b871 --- /dev/null +++ b/sdk/fluid-py/docs/OrchestratorSourceFileResult.md @@ -0,0 +1,31 @@ +# OrchestratorSourceFileResult + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**content** | **str** | | [optional] +**path** | **str** | | [optional] +**source_vm** | **str** | | [optional] + +## Example + +```python +from fluid.models.orchestrator_source_file_result import OrchestratorSourceFileResult + +# TODO update the JSON string below +json = "{}" +# create an instance of OrchestratorSourceFileResult from a JSON string +orchestrator_source_file_result_instance = OrchestratorSourceFileResult.from_json(json) +# print the JSON string representation of the object +print(OrchestratorSourceFileResult.to_json()) + +# convert the object into a dict +orchestrator_source_file_result_dict = orchestrator_source_file_result_instance.to_dict() +# create an instance of OrchestratorSourceFileResult from a dict +orchestrator_source_file_result_from_dict = OrchestratorSourceFileResult.from_dict(orchestrator_source_file_result_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/OrganizationsApi.md b/sdk/fluid-py/docs/OrganizationsApi.md new file mode 100644 index 00000000..01ba39cd --- /dev/null +++ b/sdk/fluid-py/docs/OrganizationsApi.md @@ -0,0 +1,365 @@ +# fluid.OrganizationsApi + +All URIs are relative to *http://localhost:8081/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**orgs_get**](OrganizationsApi.md#orgs_get) | **GET** /orgs | List organizations +[**orgs_post**](OrganizationsApi.md#orgs_post) | **POST** /orgs | Create organization +[**orgs_slug_delete**](OrganizationsApi.md#orgs_slug_delete) | **DELETE** /orgs/{slug} | Delete organization +[**orgs_slug_get**](OrganizationsApi.md#orgs_slug_get) | **GET** /orgs/{slug} | Get organization +[**orgs_slug_patch**](OrganizationsApi.md#orgs_slug_patch) | **PATCH** /orgs/{slug} | Update organization + + +# **orgs_get** +> Dict[str, object] orgs_get() + +List organizations + +List all organizations the current user belongs to + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.OrganizationsApi(api_client) + + try: + # List organizations + api_response = api_instance.orgs_get() + print("The response of OrganizationsApi->orgs_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling OrganizationsApi->orgs_get: %s\n" % e) +``` + + + +### Parameters + +This endpoint does not need any parameter. + +### Return type + +**Dict[str, object]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_post** +> RestOrgResponse orgs_post(request) + +Create organization + +Create a new organization and add the current user as owner + +### Example + + +```python +import fluid +from fluid.models.rest_create_org_request import RestCreateOrgRequest +from fluid.models.rest_org_response import RestOrgResponse +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.OrganizationsApi(api_client) + request = fluid.RestCreateOrgRequest() # RestCreateOrgRequest | Organization details + + try: + # Create organization + api_response = api_instance.orgs_post(request) + print("The response of OrganizationsApi->orgs_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling OrganizationsApi->orgs_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **request** | [**RestCreateOrgRequest**](RestCreateOrgRequest.md)| Organization details | + +### Return type + +[**RestOrgResponse**](RestOrgResponse.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**201** | Created | - | +**400** | Bad Request | - | +**409** | Conflict | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_delete** +> Dict[str, str] orgs_slug_delete(slug) + +Delete organization + +Delete an organization (owner only) + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.OrganizationsApi(api_client) + slug = 'slug_example' # str | Organization slug + + try: + # Delete organization + api_response = api_instance.orgs_slug_delete(slug) + print("The response of OrganizationsApi->orgs_slug_delete:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling OrganizationsApi->orgs_slug_delete: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + +### Return type + +**Dict[str, str]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_get** +> RestOrgResponse orgs_slug_get(slug) + +Get organization + +Get organization details by slug + +### Example + + +```python +import fluid +from fluid.models.rest_org_response import RestOrgResponse +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.OrganizationsApi(api_client) + slug = 'slug_example' # str | Organization slug + + try: + # Get organization + api_response = api_instance.orgs_slug_get(slug) + print("The response of OrganizationsApi->orgs_slug_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling OrganizationsApi->orgs_slug_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + +### Return type + +[**RestOrgResponse**](RestOrgResponse.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_patch** +> RestOrgResponse orgs_slug_patch(slug, request) + +Update organization + +Update organization details (owner or admin only) + +### Example + + +```python +import fluid +from fluid.models.rest_org_response import RestOrgResponse +from fluid.models.rest_update_org_request import RestUpdateOrgRequest +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.OrganizationsApi(api_client) + slug = 'slug_example' # str | Organization slug + request = fluid.RestUpdateOrgRequest() # RestUpdateOrgRequest | Fields to update + + try: + # Update organization + api_response = api_instance.orgs_slug_patch(slug, request) + print("The response of OrganizationsApi->orgs_slug_patch:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling OrganizationsApi->orgs_slug_patch: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **request** | [**RestUpdateOrgRequest**](RestUpdateOrgRequest.md)| Fields to update | + +### Return type + +[**RestOrgResponse**](RestOrgResponse.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**400** | Bad Request | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdk/fluid-py/docs/RestAddMemberRequest.md b/sdk/fluid-py/docs/RestAddMemberRequest.md new file mode 100644 index 00000000..54b677b6 --- /dev/null +++ b/sdk/fluid-py/docs/RestAddMemberRequest.md @@ -0,0 +1,30 @@ +# RestAddMemberRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**email** | **str** | | [optional] +**role** | **str** | | [optional] + +## Example + +```python +from fluid.models.rest_add_member_request import RestAddMemberRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of RestAddMemberRequest from a JSON string +rest_add_member_request_instance = RestAddMemberRequest.from_json(json) +# print the JSON string representation of the object +print(RestAddMemberRequest.to_json()) + +# convert the object into a dict +rest_add_member_request_dict = rest_add_member_request_instance.to_dict() +# create an instance of RestAddMemberRequest from a dict +rest_add_member_request_from_dict = RestAddMemberRequest.from_dict(rest_add_member_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestAuthResponse.md b/sdk/fluid-py/docs/RestAuthResponse.md new file mode 100644 index 00000000..5df88090 --- /dev/null +++ b/sdk/fluid-py/docs/RestAuthResponse.md @@ -0,0 +1,29 @@ +# RestAuthResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**user** | [**RestUserResponse**](RestUserResponse.md) | | [optional] + +## Example + +```python +from fluid.models.rest_auth_response import RestAuthResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of RestAuthResponse from a JSON string +rest_auth_response_instance = RestAuthResponse.from_json(json) +# print the JSON string representation of the object +print(RestAuthResponse.to_json()) + +# convert the object into a dict +rest_auth_response_dict = rest_auth_response_instance.to_dict() +# create an instance of RestAuthResponse from a dict +rest_auth_response_from_dict = RestAuthResponse.from_dict(rest_auth_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestBillingResponse.md b/sdk/fluid-py/docs/RestBillingResponse.md new file mode 100644 index 00000000..b8420f6a --- /dev/null +++ b/sdk/fluid-py/docs/RestBillingResponse.md @@ -0,0 +1,32 @@ +# RestBillingResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**free_tier** | [**RestFreeTierInfo**](RestFreeTierInfo.md) | | [optional] +**plan** | **str** | | [optional] +**status** | **str** | | [optional] +**usage** | [**RestUsageSummary**](RestUsageSummary.md) | | [optional] + +## Example + +```python +from fluid.models.rest_billing_response import RestBillingResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of RestBillingResponse from a JSON string +rest_billing_response_instance = RestBillingResponse.from_json(json) +# print the JSON string representation of the object +print(RestBillingResponse.to_json()) + +# convert the object into a dict +rest_billing_response_dict = rest_billing_response_instance.to_dict() +# create an instance of RestBillingResponse from a dict +rest_billing_response_from_dict = RestBillingResponse.from_dict(rest_billing_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestCalculatorRequest.md b/sdk/fluid-py/docs/RestCalculatorRequest.md new file mode 100644 index 00000000..0aa44a11 --- /dev/null +++ b/sdk/fluid-py/docs/RestCalculatorRequest.md @@ -0,0 +1,32 @@ +# RestCalculatorRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**agent_hosts** | **int** | | [optional] +**concurrent_sandboxes** | **int** | | [optional] +**hours_per_month** | **float** | | [optional] +**source_vms** | **int** | | [optional] + +## Example + +```python +from fluid.models.rest_calculator_request import RestCalculatorRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of RestCalculatorRequest from a JSON string +rest_calculator_request_instance = RestCalculatorRequest.from_json(json) +# print the JSON string representation of the object +print(RestCalculatorRequest.to_json()) + +# convert the object into a dict +rest_calculator_request_dict = rest_calculator_request_instance.to_dict() +# create an instance of RestCalculatorRequest from a dict +rest_calculator_request_from_dict = RestCalculatorRequest.from_dict(rest_calculator_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestCalculatorResponse.md b/sdk/fluid-py/docs/RestCalculatorResponse.md new file mode 100644 index 00000000..4dbb0282 --- /dev/null +++ b/sdk/fluid-py/docs/RestCalculatorResponse.md @@ -0,0 +1,33 @@ +# RestCalculatorResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**agent_host_cost** | **float** | | [optional] +**currency** | **str** | | [optional] +**sandbox_cost** | **float** | | [optional] +**source_vm_cost** | **float** | | [optional] +**total_monthly** | **float** | | [optional] + +## Example + +```python +from fluid.models.rest_calculator_response import RestCalculatorResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of RestCalculatorResponse from a JSON string +rest_calculator_response_instance = RestCalculatorResponse.from_json(json) +# print the JSON string representation of the object +print(RestCalculatorResponse.to_json()) + +# convert the object into a dict +rest_calculator_response_dict = rest_calculator_response_instance.to_dict() +# create an instance of RestCalculatorResponse from a dict +rest_calculator_response_from_dict = RestCalculatorResponse.from_dict(rest_calculator_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestCreateHostTokenRequest.md b/sdk/fluid-py/docs/RestCreateHostTokenRequest.md new file mode 100644 index 00000000..ede726de --- /dev/null +++ b/sdk/fluid-py/docs/RestCreateHostTokenRequest.md @@ -0,0 +1,29 @@ +# RestCreateHostTokenRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | | [optional] + +## Example + +```python +from fluid.models.rest_create_host_token_request import RestCreateHostTokenRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of RestCreateHostTokenRequest from a JSON string +rest_create_host_token_request_instance = RestCreateHostTokenRequest.from_json(json) +# print the JSON string representation of the object +print(RestCreateHostTokenRequest.to_json()) + +# convert the object into a dict +rest_create_host_token_request_dict = rest_create_host_token_request_instance.to_dict() +# create an instance of RestCreateHostTokenRequest from a dict +rest_create_host_token_request_from_dict = RestCreateHostTokenRequest.from_dict(rest_create_host_token_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestCreateOrgRequest.md b/sdk/fluid-py/docs/RestCreateOrgRequest.md new file mode 100644 index 00000000..81c05327 --- /dev/null +++ b/sdk/fluid-py/docs/RestCreateOrgRequest.md @@ -0,0 +1,30 @@ +# RestCreateOrgRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | | [optional] +**slug** | **str** | | [optional] + +## Example + +```python +from fluid.models.rest_create_org_request import RestCreateOrgRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of RestCreateOrgRequest from a JSON string +rest_create_org_request_instance = RestCreateOrgRequest.from_json(json) +# print the JSON string representation of the object +print(RestCreateOrgRequest.to_json()) + +# convert the object into a dict +rest_create_org_request_dict = rest_create_org_request_instance.to_dict() +# create an instance of RestCreateOrgRequest from a dict +rest_create_org_request_from_dict = RestCreateOrgRequest.from_dict(rest_create_org_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestFreeTierInfo.md b/sdk/fluid-py/docs/RestFreeTierInfo.md new file mode 100644 index 00000000..7eab7e1a --- /dev/null +++ b/sdk/fluid-py/docs/RestFreeTierInfo.md @@ -0,0 +1,31 @@ +# RestFreeTierInfo + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**max_agent_hosts** | **int** | | [optional] +**max_concurrent_sandboxes** | **int** | | [optional] +**max_source_vms** | **int** | | [optional] + +## Example + +```python +from fluid.models.rest_free_tier_info import RestFreeTierInfo + +# TODO update the JSON string below +json = "{}" +# create an instance of RestFreeTierInfo from a JSON string +rest_free_tier_info_instance = RestFreeTierInfo.from_json(json) +# print the JSON string representation of the object +print(RestFreeTierInfo.to_json()) + +# convert the object into a dict +rest_free_tier_info_dict = rest_free_tier_info_instance.to_dict() +# create an instance of RestFreeTierInfo from a dict +rest_free_tier_info_from_dict = RestFreeTierInfo.from_dict(rest_free_tier_info_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestHostTokenResponse.md b/sdk/fluid-py/docs/RestHostTokenResponse.md new file mode 100644 index 00000000..6314d543 --- /dev/null +++ b/sdk/fluid-py/docs/RestHostTokenResponse.md @@ -0,0 +1,32 @@ +# RestHostTokenResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**created_at** | **str** | | [optional] +**id** | **str** | | [optional] +**name** | **str** | | [optional] +**token** | **str** | Only set on creation. | [optional] + +## Example + +```python +from fluid.models.rest_host_token_response import RestHostTokenResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of RestHostTokenResponse from a JSON string +rest_host_token_response_instance = RestHostTokenResponse.from_json(json) +# print the JSON string representation of the object +print(RestHostTokenResponse.to_json()) + +# convert the object into a dict +rest_host_token_response_dict = rest_host_token_response_instance.to_dict() +# create an instance of RestHostTokenResponse from a dict +rest_host_token_response_from_dict = RestHostTokenResponse.from_dict(rest_host_token_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestLoginRequest.md b/sdk/fluid-py/docs/RestLoginRequest.md new file mode 100644 index 00000000..51d1b9f7 --- /dev/null +++ b/sdk/fluid-py/docs/RestLoginRequest.md @@ -0,0 +1,30 @@ +# RestLoginRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**email** | **str** | | [optional] +**password** | **str** | | [optional] + +## Example + +```python +from fluid.models.rest_login_request import RestLoginRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of RestLoginRequest from a JSON string +rest_login_request_instance = RestLoginRequest.from_json(json) +# print the JSON string representation of the object +print(RestLoginRequest.to_json()) + +# convert the object into a dict +rest_login_request_dict = rest_login_request_instance.to_dict() +# create an instance of RestLoginRequest from a dict +rest_login_request_from_dict = RestLoginRequest.from_dict(rest_login_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestMemberResponse.md b/sdk/fluid-py/docs/RestMemberResponse.md new file mode 100644 index 00000000..a5924374 --- /dev/null +++ b/sdk/fluid-py/docs/RestMemberResponse.md @@ -0,0 +1,32 @@ +# RestMemberResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**created_at** | **str** | | [optional] +**id** | **str** | | [optional] +**role** | **str** | | [optional] +**user_id** | **str** | | [optional] + +## Example + +```python +from fluid.models.rest_member_response import RestMemberResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of RestMemberResponse from a JSON string +rest_member_response_instance = RestMemberResponse.from_json(json) +# print the JSON string representation of the object +print(RestMemberResponse.to_json()) + +# convert the object into a dict +rest_member_response_dict = rest_member_response_instance.to_dict() +# create an instance of RestMemberResponse from a dict +rest_member_response_from_dict = RestMemberResponse.from_dict(rest_member_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestOrgResponse.md b/sdk/fluid-py/docs/RestOrgResponse.md new file mode 100644 index 00000000..0366b49c --- /dev/null +++ b/sdk/fluid-py/docs/RestOrgResponse.md @@ -0,0 +1,34 @@ +# RestOrgResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**created_at** | **str** | | [optional] +**id** | **str** | | [optional] +**name** | **str** | | [optional] +**owner_id** | **str** | | [optional] +**slug** | **str** | | [optional] +**stripe_customer_id** | **str** | | [optional] + +## Example + +```python +from fluid.models.rest_org_response import RestOrgResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of RestOrgResponse from a JSON string +rest_org_response_instance = RestOrgResponse.from_json(json) +# print the JSON string representation of the object +print(RestOrgResponse.to_json()) + +# convert the object into a dict +rest_org_response_dict = rest_org_response_instance.to_dict() +# create an instance of RestOrgResponse from a dict +rest_org_response_from_dict = RestOrgResponse.from_dict(rest_org_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestRegisterRequest.md b/sdk/fluid-py/docs/RestRegisterRequest.md new file mode 100644 index 00000000..49422156 --- /dev/null +++ b/sdk/fluid-py/docs/RestRegisterRequest.md @@ -0,0 +1,31 @@ +# RestRegisterRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**display_name** | **str** | | [optional] +**email** | **str** | | [optional] +**password** | **str** | | [optional] + +## Example + +```python +from fluid.models.rest_register_request import RestRegisterRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of RestRegisterRequest from a JSON string +rest_register_request_instance = RestRegisterRequest.from_json(json) +# print the JSON string representation of the object +print(RestRegisterRequest.to_json()) + +# convert the object into a dict +rest_register_request_dict = rest_register_request_instance.to_dict() +# create an instance of RestRegisterRequest from a dict +rest_register_request_from_dict = RestRegisterRequest.from_dict(rest_register_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestSwaggerError.md b/sdk/fluid-py/docs/RestSwaggerError.md new file mode 100644 index 00000000..1b7f7196 --- /dev/null +++ b/sdk/fluid-py/docs/RestSwaggerError.md @@ -0,0 +1,31 @@ +# RestSwaggerError + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**code** | **int** | | [optional] +**details** | **str** | | [optional] +**error** | **str** | | [optional] + +## Example + +```python +from fluid.models.rest_swagger_error import RestSwaggerError + +# TODO update the JSON string below +json = "{}" +# create an instance of RestSwaggerError from a JSON string +rest_swagger_error_instance = RestSwaggerError.from_json(json) +# print the JSON string representation of the object +print(RestSwaggerError.to_json()) + +# convert the object into a dict +rest_swagger_error_dict = rest_swagger_error_instance.to_dict() +# create an instance of RestSwaggerError from a dict +rest_swagger_error_from_dict = RestSwaggerError.from_dict(rest_swagger_error_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestUpdateOrgRequest.md b/sdk/fluid-py/docs/RestUpdateOrgRequest.md new file mode 100644 index 00000000..54f69838 --- /dev/null +++ b/sdk/fluid-py/docs/RestUpdateOrgRequest.md @@ -0,0 +1,29 @@ +# RestUpdateOrgRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | | [optional] + +## Example + +```python +from fluid.models.rest_update_org_request import RestUpdateOrgRequest + +# TODO update the JSON string below +json = "{}" +# create an instance of RestUpdateOrgRequest from a JSON string +rest_update_org_request_instance = RestUpdateOrgRequest.from_json(json) +# print the JSON string representation of the object +print(RestUpdateOrgRequest.to_json()) + +# convert the object into a dict +rest_update_org_request_dict = rest_update_org_request_instance.to_dict() +# create an instance of RestUpdateOrgRequest from a dict +rest_update_org_request_from_dict = RestUpdateOrgRequest.from_dict(rest_update_org_request_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestUsageSummary.md b/sdk/fluid-py/docs/RestUsageSummary.md new file mode 100644 index 00000000..7ae32105 --- /dev/null +++ b/sdk/fluid-py/docs/RestUsageSummary.md @@ -0,0 +1,31 @@ +# RestUsageSummary + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**agent_hosts** | **float** | | [optional] +**sandbox_hours** | **float** | | [optional] +**source_vms** | **float** | | [optional] + +## Example + +```python +from fluid.models.rest_usage_summary import RestUsageSummary + +# TODO update the JSON string below +json = "{}" +# create an instance of RestUsageSummary from a JSON string +rest_usage_summary_instance = RestUsageSummary.from_json(json) +# print the JSON string representation of the object +print(RestUsageSummary.to_json()) + +# convert the object into a dict +rest_usage_summary_dict = rest_usage_summary_instance.to_dict() +# create an instance of RestUsageSummary from a dict +rest_usage_summary_from_dict = RestUsageSummary.from_dict(rest_usage_summary_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/RestUserResponse.md b/sdk/fluid-py/docs/RestUserResponse.md new file mode 100644 index 00000000..518e5c88 --- /dev/null +++ b/sdk/fluid-py/docs/RestUserResponse.md @@ -0,0 +1,33 @@ +# RestUserResponse + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**avatar_url** | **str** | | [optional] +**display_name** | **str** | | [optional] +**email** | **str** | | [optional] +**email_verified** | **bool** | | [optional] +**id** | **str** | | [optional] + +## Example + +```python +from fluid.models.rest_user_response import RestUserResponse + +# TODO update the JSON string below +json = "{}" +# create an instance of RestUserResponse from a JSON string +rest_user_response_instance = RestUserResponse.from_json(json) +# print the JSON string representation of the object +print(RestUserResponse.to_json()) + +# convert the object into a dict +rest_user_response_dict = rest_user_response_instance.to_dict() +# create an instance of RestUserResponse from a dict +rest_user_response_from_dict = RestUserResponse.from_dict(rest_user_response_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/SandboxesApi.md b/sdk/fluid-py/docs/SandboxesApi.md new file mode 100644 index 00000000..bfe495c4 --- /dev/null +++ b/sdk/fluid-py/docs/SandboxesApi.md @@ -0,0 +1,748 @@ +# fluid.SandboxesApi + +All URIs are relative to *http://localhost:8081/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**orgs_slug_sandboxes_get**](SandboxesApi.md#orgs_slug_sandboxes_get) | **GET** /orgs/{slug}/sandboxes | List sandboxes +[**orgs_slug_sandboxes_post**](SandboxesApi.md#orgs_slug_sandboxes_post) | **POST** /orgs/{slug}/sandboxes | Create sandbox +[**orgs_slug_sandboxes_sandbox_id_commands_get**](SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_commands_get) | **GET** /orgs/{slug}/sandboxes/{sandboxID}/commands | List commands +[**orgs_slug_sandboxes_sandbox_id_delete**](SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_delete) | **DELETE** /orgs/{slug}/sandboxes/{sandboxID} | Destroy sandbox +[**orgs_slug_sandboxes_sandbox_id_get**](SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_get) | **GET** /orgs/{slug}/sandboxes/{sandboxID} | Get sandbox +[**orgs_slug_sandboxes_sandbox_id_run_post**](SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_run_post) | **POST** /orgs/{slug}/sandboxes/{sandboxID}/run | Run command +[**orgs_slug_sandboxes_sandbox_id_snapshot_post**](SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_snapshot_post) | **POST** /orgs/{slug}/sandboxes/{sandboxID}/snapshot | Create snapshot +[**orgs_slug_sandboxes_sandbox_id_start_post**](SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_start_post) | **POST** /orgs/{slug}/sandboxes/{sandboxID}/start | Start sandbox +[**orgs_slug_sandboxes_sandbox_id_stop_post**](SandboxesApi.md#orgs_slug_sandboxes_sandbox_id_stop_post) | **POST** /orgs/{slug}/sandboxes/{sandboxID}/stop | Stop sandbox +[**orgs_slug_sandboxes_sandbox_idip_get**](SandboxesApi.md#orgs_slug_sandboxes_sandbox_idip_get) | **GET** /orgs/{slug}/sandboxes/{sandboxID}/ip | Get sandbox IP + + +# **orgs_slug_sandboxes_get** +> Dict[str, object] orgs_slug_sandboxes_get(slug) + +List sandboxes + +List all sandboxes in the organization + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SandboxesApi(api_client) + slug = 'slug_example' # str | Organization slug + + try: + # List sandboxes + api_response = api_instance.orgs_slug_sandboxes_get(slug) + print("The response of SandboxesApi->orgs_slug_sandboxes_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SandboxesApi->orgs_slug_sandboxes_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + +### Return type + +**Dict[str, object]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_sandboxes_post** +> StoreSandbox orgs_slug_sandboxes_post(slug, request) + +Create sandbox + +Create a new sandbox in the organization from a source VM or base image + +### Example + + +```python +import fluid +from fluid.models.orchestrator_create_sandbox_request import OrchestratorCreateSandboxRequest +from fluid.models.store_sandbox import StoreSandbox +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SandboxesApi(api_client) + slug = 'slug_example' # str | Organization slug + request = fluid.OrchestratorCreateSandboxRequest() # OrchestratorCreateSandboxRequest | Sandbox configuration + + try: + # Create sandbox + api_response = api_instance.orgs_slug_sandboxes_post(slug, request) + print("The response of SandboxesApi->orgs_slug_sandboxes_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SandboxesApi->orgs_slug_sandboxes_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **request** | [**OrchestratorCreateSandboxRequest**](OrchestratorCreateSandboxRequest.md)| Sandbox configuration | + +### Return type + +[**StoreSandbox**](StoreSandbox.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**201** | Created | - | +**400** | Bad Request | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_sandboxes_sandbox_id_commands_get** +> Dict[str, object] orgs_slug_sandboxes_sandbox_id_commands_get(slug, sandbox_id) + +List commands + +List all commands executed in a sandbox + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SandboxesApi(api_client) + slug = 'slug_example' # str | Organization slug + sandbox_id = 'sandbox_id_example' # str | Sandbox ID + + try: + # List commands + api_response = api_instance.orgs_slug_sandboxes_sandbox_id_commands_get(slug, sandbox_id) + print("The response of SandboxesApi->orgs_slug_sandboxes_sandbox_id_commands_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SandboxesApi->orgs_slug_sandboxes_sandbox_id_commands_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **sandbox_id** | **str**| Sandbox ID | + +### Return type + +**Dict[str, object]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_sandboxes_sandbox_id_delete** +> Dict[str, object] orgs_slug_sandboxes_sandbox_id_delete(slug, sandbox_id) + +Destroy sandbox + +Destroy a sandbox and release its resources + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SandboxesApi(api_client) + slug = 'slug_example' # str | Organization slug + sandbox_id = 'sandbox_id_example' # str | Sandbox ID + + try: + # Destroy sandbox + api_response = api_instance.orgs_slug_sandboxes_sandbox_id_delete(slug, sandbox_id) + print("The response of SandboxesApi->orgs_slug_sandboxes_sandbox_id_delete:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SandboxesApi->orgs_slug_sandboxes_sandbox_id_delete: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **sandbox_id** | **str**| Sandbox ID | + +### Return type + +**Dict[str, object]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_sandboxes_sandbox_id_get** +> StoreSandbox orgs_slug_sandboxes_sandbox_id_get(slug, sandbox_id) + +Get sandbox + +Get sandbox details by ID + +### Example + + +```python +import fluid +from fluid.models.store_sandbox import StoreSandbox +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SandboxesApi(api_client) + slug = 'slug_example' # str | Organization slug + sandbox_id = 'sandbox_id_example' # str | Sandbox ID + + try: + # Get sandbox + api_response = api_instance.orgs_slug_sandboxes_sandbox_id_get(slug, sandbox_id) + print("The response of SandboxesApi->orgs_slug_sandboxes_sandbox_id_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SandboxesApi->orgs_slug_sandboxes_sandbox_id_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **sandbox_id** | **str**| Sandbox ID | + +### Return type + +[**StoreSandbox**](StoreSandbox.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_sandboxes_sandbox_id_run_post** +> StoreCommand orgs_slug_sandboxes_sandbox_id_run_post(slug, sandbox_id, request) + +Run command + +Execute a command in a sandbox + +### Example + + +```python +import fluid +from fluid.models.orchestrator_run_command_request import OrchestratorRunCommandRequest +from fluid.models.store_command import StoreCommand +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SandboxesApi(api_client) + slug = 'slug_example' # str | Organization slug + sandbox_id = 'sandbox_id_example' # str | Sandbox ID + request = fluid.OrchestratorRunCommandRequest() # OrchestratorRunCommandRequest | Command to run + + try: + # Run command + api_response = api_instance.orgs_slug_sandboxes_sandbox_id_run_post(slug, sandbox_id, request) + print("The response of SandboxesApi->orgs_slug_sandboxes_sandbox_id_run_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SandboxesApi->orgs_slug_sandboxes_sandbox_id_run_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **sandbox_id** | **str**| Sandbox ID | + **request** | [**OrchestratorRunCommandRequest**](OrchestratorRunCommandRequest.md)| Command to run | + +### Return type + +[**StoreCommand**](StoreCommand.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**400** | Bad Request | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_sandboxes_sandbox_id_snapshot_post** +> OrchestratorSnapshotResponse orgs_slug_sandboxes_sandbox_id_snapshot_post(slug, sandbox_id, request) + +Create snapshot + +Create a snapshot of a sandbox + +### Example + + +```python +import fluid +from fluid.models.orchestrator_snapshot_request import OrchestratorSnapshotRequest +from fluid.models.orchestrator_snapshot_response import OrchestratorSnapshotResponse +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SandboxesApi(api_client) + slug = 'slug_example' # str | Organization slug + sandbox_id = 'sandbox_id_example' # str | Sandbox ID + request = fluid.OrchestratorSnapshotRequest() # OrchestratorSnapshotRequest | Snapshot details + + try: + # Create snapshot + api_response = api_instance.orgs_slug_sandboxes_sandbox_id_snapshot_post(slug, sandbox_id, request) + print("The response of SandboxesApi->orgs_slug_sandboxes_sandbox_id_snapshot_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SandboxesApi->orgs_slug_sandboxes_sandbox_id_snapshot_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **sandbox_id** | **str**| Sandbox ID | + **request** | [**OrchestratorSnapshotRequest**](OrchestratorSnapshotRequest.md)| Snapshot details | + +### Return type + +[**OrchestratorSnapshotResponse**](OrchestratorSnapshotResponse.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**201** | Created | - | +**400** | Bad Request | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_sandboxes_sandbox_id_start_post** +> Dict[str, object] orgs_slug_sandboxes_sandbox_id_start_post(slug, sandbox_id) + +Start sandbox + +Start a stopped sandbox + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SandboxesApi(api_client) + slug = 'slug_example' # str | Organization slug + sandbox_id = 'sandbox_id_example' # str | Sandbox ID + + try: + # Start sandbox + api_response = api_instance.orgs_slug_sandboxes_sandbox_id_start_post(slug, sandbox_id) + print("The response of SandboxesApi->orgs_slug_sandboxes_sandbox_id_start_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SandboxesApi->orgs_slug_sandboxes_sandbox_id_start_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **sandbox_id** | **str**| Sandbox ID | + +### Return type + +**Dict[str, object]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_sandboxes_sandbox_id_stop_post** +> Dict[str, object] orgs_slug_sandboxes_sandbox_id_stop_post(slug, sandbox_id) + +Stop sandbox + +Stop a running sandbox + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SandboxesApi(api_client) + slug = 'slug_example' # str | Organization slug + sandbox_id = 'sandbox_id_example' # str | Sandbox ID + + try: + # Stop sandbox + api_response = api_instance.orgs_slug_sandboxes_sandbox_id_stop_post(slug, sandbox_id) + print("The response of SandboxesApi->orgs_slug_sandboxes_sandbox_id_stop_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SandboxesApi->orgs_slug_sandboxes_sandbox_id_stop_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **sandbox_id** | **str**| Sandbox ID | + +### Return type + +**Dict[str, object]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_sandboxes_sandbox_idip_get** +> Dict[str, object] orgs_slug_sandboxes_sandbox_idip_get(slug, sandbox_id) + +Get sandbox IP + +Get the IP address of a sandbox + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SandboxesApi(api_client) + slug = 'slug_example' # str | Organization slug + sandbox_id = 'sandbox_id_example' # str | Sandbox ID + + try: + # Get sandbox IP + api_response = api_instance.orgs_slug_sandboxes_sandbox_idip_get(slug, sandbox_id) + print("The response of SandboxesApi->orgs_slug_sandboxes_sandbox_idip_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SandboxesApi->orgs_slug_sandboxes_sandbox_idip_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **sandbox_id** | **str**| Sandbox ID | + +### Return type + +**Dict[str, object]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdk/fluid-py/docs/SourceVMsApi.md b/sdk/fluid-py/docs/SourceVMsApi.md new file mode 100644 index 00000000..74fa9920 --- /dev/null +++ b/sdk/fluid-py/docs/SourceVMsApi.md @@ -0,0 +1,312 @@ +# fluid.SourceVMsApi + +All URIs are relative to *http://localhost:8081/v1* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**orgs_slug_sources_vm_prepare_post**](SourceVMsApi.md#orgs_slug_sources_vm_prepare_post) | **POST** /orgs/{slug}/sources/{vm}/prepare | Prepare source VM +[**orgs_slug_sources_vm_read_post**](SourceVMsApi.md#orgs_slug_sources_vm_read_post) | **POST** /orgs/{slug}/sources/{vm}/read | Read source file +[**orgs_slug_sources_vm_run_post**](SourceVMsApi.md#orgs_slug_sources_vm_run_post) | **POST** /orgs/{slug}/sources/{vm}/run | Run source command +[**orgs_slug_vms_get**](SourceVMsApi.md#orgs_slug_vms_get) | **GET** /orgs/{slug}/vms | List source VMs + + +# **orgs_slug_sources_vm_prepare_post** +> Dict[str, object] orgs_slug_sources_vm_prepare_post(slug, vm, request) + +Prepare source VM + +Prepare a source VM for sandbox cloning + +### Example + + +```python +import fluid +from fluid.models.orchestrator_prepare_request import OrchestratorPrepareRequest +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SourceVMsApi(api_client) + slug = 'slug_example' # str | Organization slug + vm = 'vm_example' # str | Source VM name + request = fluid.OrchestratorPrepareRequest() # OrchestratorPrepareRequest | SSH credentials + + try: + # Prepare source VM + api_response = api_instance.orgs_slug_sources_vm_prepare_post(slug, vm, request) + print("The response of SourceVMsApi->orgs_slug_sources_vm_prepare_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SourceVMsApi->orgs_slug_sources_vm_prepare_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **vm** | **str**| Source VM name | + **request** | [**OrchestratorPrepareRequest**](OrchestratorPrepareRequest.md)| SSH credentials | + +### Return type + +**Dict[str, object]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**400** | Bad Request | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_sources_vm_read_post** +> OrchestratorSourceFileResult orgs_slug_sources_vm_read_post(slug, vm, request) + +Read source file + +Read a file from a source VM + +### Example + + +```python +import fluid +from fluid.models.orchestrator_read_source_request import OrchestratorReadSourceRequest +from fluid.models.orchestrator_source_file_result import OrchestratorSourceFileResult +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SourceVMsApi(api_client) + slug = 'slug_example' # str | Organization slug + vm = 'vm_example' # str | Source VM name + request = fluid.OrchestratorReadSourceRequest() # OrchestratorReadSourceRequest | File path + + try: + # Read source file + api_response = api_instance.orgs_slug_sources_vm_read_post(slug, vm, request) + print("The response of SourceVMsApi->orgs_slug_sources_vm_read_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SourceVMsApi->orgs_slug_sources_vm_read_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **vm** | **str**| Source VM name | + **request** | [**OrchestratorReadSourceRequest**](OrchestratorReadSourceRequest.md)| File path | + +### Return type + +[**OrchestratorSourceFileResult**](OrchestratorSourceFileResult.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**400** | Bad Request | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_sources_vm_run_post** +> OrchestratorSourceCommandResult orgs_slug_sources_vm_run_post(slug, vm, request) + +Run source command + +Execute a read-only command on a source VM + +### Example + + +```python +import fluid +from fluid.models.orchestrator_run_source_request import OrchestratorRunSourceRequest +from fluid.models.orchestrator_source_command_result import OrchestratorSourceCommandResult +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SourceVMsApi(api_client) + slug = 'slug_example' # str | Organization slug + vm = 'vm_example' # str | Source VM name + request = fluid.OrchestratorRunSourceRequest() # OrchestratorRunSourceRequest | Command to run + + try: + # Run source command + api_response = api_instance.orgs_slug_sources_vm_run_post(slug, vm, request) + print("The response of SourceVMsApi->orgs_slug_sources_vm_run_post:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SourceVMsApi->orgs_slug_sources_vm_run_post: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + **vm** | **str**| Source VM name | + **request** | [**OrchestratorRunSourceRequest**](OrchestratorRunSourceRequest.md)| Command to run | + +### Return type + +[**OrchestratorSourceCommandResult**](OrchestratorSourceCommandResult.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**400** | Bad Request | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **orgs_slug_vms_get** +> Dict[str, object] orgs_slug_vms_get(slug) + +List source VMs + +List all source VMs across connected hosts + +### Example + + +```python +import fluid +from fluid.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to http://localhost:8081/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = fluid.Configuration( + host = "http://localhost:8081/v1" +) + + +# Enter a context with an instance of the API client +with fluid.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = fluid.SourceVMsApi(api_client) + slug = 'slug_example' # str | Organization slug + + try: + # List source VMs + api_response = api_instance.orgs_slug_vms_get(slug) + print("The response of SourceVMsApi->orgs_slug_vms_get:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling SourceVMsApi->orgs_slug_vms_get: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **slug** | **str**| Organization slug | + +### Return type + +**Dict[str, object]** + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | OK | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**500** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdk/fluid-py/docs/StoreCommand.md b/sdk/fluid-py/docs/StoreCommand.md new file mode 100644 index 00000000..63be299b --- /dev/null +++ b/sdk/fluid-py/docs/StoreCommand.md @@ -0,0 +1,37 @@ +# StoreCommand + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**command** | **str** | | [optional] +**duration_ms** | **int** | | [optional] +**ended_at** | **str** | | [optional] +**exit_code** | **int** | | [optional] +**id** | **str** | | [optional] +**sandbox_id** | **str** | | [optional] +**started_at** | **str** | | [optional] +**stderr** | **str** | | [optional] +**stdout** | **str** | | [optional] + +## Example + +```python +from fluid.models.store_command import StoreCommand + +# TODO update the JSON string below +json = "{}" +# create an instance of StoreCommand from a JSON string +store_command_instance = StoreCommand.from_json(json) +# print the JSON string representation of the object +print(StoreCommand.to_json()) + +# convert the object into a dict +store_command_dict = store_command_instance.to_dict() +# create an instance of StoreCommand from a dict +store_command_from_dict = StoreCommand.from_dict(store_command_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/StoreSandbox.md b/sdk/fluid-py/docs/StoreSandbox.md new file mode 100644 index 00000000..0bb21818 --- /dev/null +++ b/sdk/fluid-py/docs/StoreSandbox.md @@ -0,0 +1,46 @@ +# StoreSandbox + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**agent_id** | **str** | | [optional] +**base_image** | **str** | | [optional] +**bridge** | **str** | | [optional] +**created_at** | **str** | | [optional] +**deleted_at** | **str** | | [optional] +**host_id** | **str** | | [optional] +**id** | **str** | | [optional] +**ip_address** | **str** | | [optional] +**mac_address** | **str** | | [optional] +**memory_mb** | **int** | | [optional] +**name** | **str** | | [optional] +**org_id** | **str** | | [optional] +**source_vm** | **str** | | [optional] +**state** | [**StoreSandboxState**](StoreSandboxState.md) | | [optional] +**tap_device** | **str** | | [optional] +**ttl_seconds** | **int** | | [optional] +**updated_at** | **str** | | [optional] +**vcpus** | **int** | | [optional] + +## Example + +```python +from fluid.models.store_sandbox import StoreSandbox + +# TODO update the JSON string below +json = "{}" +# create an instance of StoreSandbox from a JSON string +store_sandbox_instance = StoreSandbox.from_json(json) +# print the JSON string representation of the object +print(StoreSandbox.to_json()) + +# convert the object into a dict +store_sandbox_dict = store_sandbox_instance.to_dict() +# create an instance of StoreSandbox from a dict +store_sandbox_from_dict = StoreSandbox.from_dict(store_sandbox_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/docs/StoreSandboxState.md b/sdk/fluid-py/docs/StoreSandboxState.md new file mode 100644 index 00000000..6d8bb627 --- /dev/null +++ b/sdk/fluid-py/docs/StoreSandboxState.md @@ -0,0 +1,18 @@ +# StoreSandboxState + + +## Enum + +* `SandboxStateCreating` (value: `'CREATING'`) + +* `SandboxStateRunning` (value: `'RUNNING'`) + +* `SandboxStateStopped` (value: `'STOPPED'`) + +* `SandboxStateDestroyed` (value: `'DESTROYED'`) + +* `SandboxStateError` (value: `'ERROR'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdk/fluid-py/fluid/__init__.py b/sdk/fluid-py/fluid/__init__.py index fec9322f..e041b251 100644 --- a/sdk/fluid-py/fluid/__init__.py +++ b/sdk/fluid-py/fluid/__init__.py @@ -3,11 +3,11 @@ # flake8: noqa """ - fluid-remote API + Fluid API - API for managing AI Agent VM sandboxes using libvirt + API for managing sandboxes, organizations, billing, and hosts - The version of the OpenAPI document: 0.1.0 + The version of the OpenAPI document: 1.0 Generated by OpenAPI Generator (https://openapi-generator.tech) Do not edit the class manually. @@ -19,12 +19,15 @@ # Define package exports __all__ = [ "Fluid", - "AccessApi", - "AnsibleApi", - "AnsiblePlaybooksApi", + "AuthApi", + "BillingApi", "HealthApi", - "SandboxApi", - "VMsApi", + "HostTokensApi", + "HostsApi", + "MembersApi", + "OrganizationsApi", + "SandboxesApi", + "SourceVMsApi", "ApiResponse", "ApiClient", "Configuration", @@ -34,137 +37,48 @@ "ApiKeyError", "ApiAttributeError", "ApiException", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskRequest", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskResponse", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookRequest", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookResponse", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleExportPlaybookResponse", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleGetPlaybookResponse", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleJob", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobRequest", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobResponse", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobStatus", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleListPlaybooksResponse", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleReorderTasksRequest", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskRequest", - "GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskResponse", - "GithubComAspectrrFluidShFluidRemoteInternalErrorErrorResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestAccessErrorResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestCaPublicKeyResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestCertificateResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxRequest", - "GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestDestroySandboxResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestDiffRequest", - "GithubComAspectrrFluidShFluidRemoteInternalRestDiffResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestDiscoverIPResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestErrorResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestGenerateResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestGetSandboxResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestHostError", - "GithubComAspectrrFluidShFluidRemoteInternalRestInjectSSHKeyRequest", - "GithubComAspectrrFluidShFluidRemoteInternalRestListCertificatesResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxCommandsResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxesResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestListSessionsResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestListVMsResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestPublishRequest", - "GithubComAspectrrFluidShFluidRemoteInternalRestPublishResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessRequest", - "GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateRequest", - "GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandRequest", - "GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestSandboxInfo", - "GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndRequest", - "GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestSessionResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartRequest", - "GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotRequest", - "GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxRequest", - "GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxResponse", - "GithubComAspectrrFluidShFluidRemoteInternalRestVmInfo", - "GithubComAspectrrFluidShFluidRemoteInternalStoreChangeDiff", - "GithubComAspectrrFluidShFluidRemoteInternalStoreCommand", - "GithubComAspectrrFluidShFluidRemoteInternalStoreCommandExecRecord", - "GithubComAspectrrFluidShFluidRemoteInternalStoreCommandSummary", - "GithubComAspectrrFluidShFluidRemoteInternalStoreDiff", - "GithubComAspectrrFluidShFluidRemoteInternalStorePackageInfo", - "GithubComAspectrrFluidShFluidRemoteInternalStorePlaybook", - "GithubComAspectrrFluidShFluidRemoteInternalStorePlaybookTask", - "GithubComAspectrrFluidShFluidRemoteInternalStoreSandbox", - "GithubComAspectrrFluidShFluidRemoteInternalStoreSandboxState", - "GithubComAspectrrFluidShFluidRemoteInternalStoreServiceChange", - "GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshot", - "GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshotKind", - "InternalAnsibleAddTaskRequest", - "InternalAnsibleAddTaskResponse", - "InternalAnsibleCreatePlaybookRequest", - "InternalAnsibleCreatePlaybookResponse", - "InternalAnsibleExportPlaybookResponse", - "InternalAnsibleGetPlaybookResponse", - "InternalAnsibleJob", - "InternalAnsibleJobRequest", - "InternalAnsibleJobResponse", - "InternalAnsibleJobStatus", - "InternalAnsibleListPlaybooksResponse", - "InternalAnsibleReorderTasksRequest", - "InternalAnsibleUpdateTaskRequest", - "InternalAnsibleUpdateTaskResponse", - "InternalRestAccessErrorResponse", - "InternalRestCaPublicKeyResponse", - "InternalRestCertificateResponse", - "InternalRestCreateSandboxRequest", - "InternalRestCreateSandboxResponse", - "InternalRestDestroySandboxResponse", - "InternalRestDiffRequest", - "InternalRestDiffResponse", - "InternalRestDiscoverIPResponse", - "InternalRestErrorResponse", - "InternalRestGenerateResponse", - "InternalRestGetSandboxResponse", - "InternalRestHealthResponse", - "InternalRestHostError", - "InternalRestInjectSSHKeyRequest", - "InternalRestListCertificatesResponse", - "InternalRestListSandboxCommandsResponse", - "InternalRestListSandboxesResponse", - "InternalRestListSessionsResponse", - "InternalRestListVMsResponse", - "InternalRestPublishRequest", - "InternalRestPublishResponse", - "InternalRestRequestAccessRequest", - "InternalRestRequestAccessResponse", - "InternalRestRevokeCertificateRequest", - "InternalRestRevokeCertificateResponse", - "InternalRestRunCommandRequest", - "InternalRestRunCommandResponse", - "InternalRestSandboxInfo", - "InternalRestSessionEndRequest", - "InternalRestSessionEndResponse", - "InternalRestSessionResponse", - "InternalRestSessionStartRequest", - "InternalRestSessionStartResponse", - "InternalRestSnapshotRequest", - "InternalRestSnapshotResponse", - "InternalRestStartSandboxRequest", - "InternalRestStartSandboxResponse", - "InternalRestVmInfo", - "TimeDuration", + "OrchestratorCreateSandboxRequest", + "OrchestratorHostInfo", + "OrchestratorPrepareRequest", + "OrchestratorReadSourceRequest", + "OrchestratorRunCommandRequest", + "OrchestratorRunSourceRequest", + "OrchestratorSnapshotRequest", + "OrchestratorSnapshotResponse", + "OrchestratorSourceCommandResult", + "OrchestratorSourceFileResult", + "RestAddMemberRequest", + "RestAuthResponse", + "RestBillingResponse", + "RestCalculatorRequest", + "RestCalculatorResponse", + "RestCreateHostTokenRequest", + "RestCreateOrgRequest", + "RestFreeTierInfo", + "RestHostTokenResponse", + "RestLoginRequest", + "RestMemberResponse", + "RestOrgResponse", + "RestRegisterRequest", + "RestSwaggerError", + "RestUpdateOrgRequest", + "RestUsageSummary", + "RestUserResponse", + "StoreCommand", + "StoreSandbox", + "StoreSandboxState", ] # import apis into sdk package -from fluid.api.access_api import AccessApi as AccessApi -from fluid.api.ansible_api import AnsibleApi as AnsibleApi -from fluid.api.ansible_playbooks_api import \ - AnsiblePlaybooksApi as AnsiblePlaybooksApi +from fluid.api.auth_api import AuthApi as AuthApi +from fluid.api.billing_api import BillingApi as BillingApi from fluid.api.health_api import HealthApi as HealthApi -from fluid.api.sandbox_api import SandboxApi as SandboxApi -from fluid.api.vms_api import VMsApi as VMsApi +from fluid.api.host_tokens_api import HostTokensApi as HostTokensApi +from fluid.api.hosts_api import HostsApi as HostsApi +from fluid.api.members_api import MembersApi as MembersApi +from fluid.api.organizations_api import OrganizationsApi as OrganizationsApi +from fluid.api.sandboxes_api import SandboxesApi as SandboxesApi +from fluid.api.source_vms_api import SourceVMsApi as SourceVMsApi from fluid.api_client import ApiClient as ApiClient # import ApiClient from fluid.api_response import ApiResponse as ApiResponse @@ -178,319 +92,60 @@ from fluid.exceptions import ApiValueError as ApiValueError from fluid.exceptions import OpenApiException as OpenApiException # import models into sdk package -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_add_task_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_add_task_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_create_playbook_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_create_playbook_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_export_playbook_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleExportPlaybookResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleExportPlaybookResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_get_playbook_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleGetPlaybookResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleGetPlaybookResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJob as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJob -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_status import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobStatus as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobStatus -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_list_playbooks_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleListPlaybooksResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleListPlaybooksResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_reorder_tasks_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleReorderTasksRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleReorderTasksRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_update_task_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_update_task_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_error_error_response import \ - GithubComAspectrrFluidShFluidRemoteInternalErrorErrorResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalErrorErrorResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_access_error_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestAccessErrorResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestAccessErrorResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_ca_public_key_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestCaPublicKeyResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestCaPublicKeyResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_certificate_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestCertificateResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestCertificateResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_create_sandbox_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_create_sandbox_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_destroy_sandbox_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestDestroySandboxResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestDestroySandboxResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_diff_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestDiffRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalRestDiffRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_diff_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestDiffResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestDiffResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_discover_ip_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestDiscoverIPResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestDiscoverIPResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_error_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestErrorResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestErrorResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_generate_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestGenerateResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestGenerateResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_get_sandbox_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestGetSandboxResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestGetSandboxResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_health_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_host_error import \ - GithubComAspectrrFluidShFluidRemoteInternalRestHostError as \ - GithubComAspectrrFluidShFluidRemoteInternalRestHostError -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_inject_ssh_key_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestInjectSSHKeyRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalRestInjectSSHKeyRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_certificates_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListCertificatesResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestListCertificatesResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sandbox_commands_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxCommandsResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxCommandsResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sandboxes_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxesResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxesResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sessions_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListSessionsResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestListSessionsResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_vms_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListVMsResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestListVMsResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_publish_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestPublishRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalRestPublishRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_publish_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestPublishResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestPublishResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_request_access_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_request_access_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_revoke_certificate_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_revoke_certificate_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_run_command_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_run_command_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_sandbox_info import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSandboxInfo as \ - GithubComAspectrrFluidShFluidRemoteInternalRestSandboxInfo -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_end_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_end_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_start_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_start_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_snapshot_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_snapshot_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_start_sandbox_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxRequest as \ - GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_start_sandbox_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxResponse as \ - GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_vm_info import \ - GithubComAspectrrFluidShFluidRemoteInternalRestVmInfo as \ - GithubComAspectrrFluidShFluidRemoteInternalRestVmInfo -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_change_diff import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreChangeDiff as \ - GithubComAspectrrFluidShFluidRemoteInternalStoreChangeDiff -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreCommand as \ - GithubComAspectrrFluidShFluidRemoteInternalStoreCommand -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command_exec_record import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreCommandExecRecord as \ - GithubComAspectrrFluidShFluidRemoteInternalStoreCommandExecRecord -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command_summary import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreCommandSummary as \ - GithubComAspectrrFluidShFluidRemoteInternalStoreCommandSummary -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_diff import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreDiff as \ - GithubComAspectrrFluidShFluidRemoteInternalStoreDiff -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_package_info import \ - GithubComAspectrrFluidShFluidRemoteInternalStorePackageInfo as \ - GithubComAspectrrFluidShFluidRemoteInternalStorePackageInfo -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_playbook import \ - GithubComAspectrrFluidShFluidRemoteInternalStorePlaybook as \ - GithubComAspectrrFluidShFluidRemoteInternalStorePlaybook -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_playbook_task import \ - GithubComAspectrrFluidShFluidRemoteInternalStorePlaybookTask as \ - GithubComAspectrrFluidShFluidRemoteInternalStorePlaybookTask -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_sandbox import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSandbox as \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSandbox -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_sandbox_state import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSandboxState as \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSandboxState -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_service_change import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreServiceChange as \ - GithubComAspectrrFluidShFluidRemoteInternalStoreServiceChange -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_snapshot import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshot as \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshot -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_snapshot_kind import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshotKind as \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshotKind -from fluid.models.internal_ansible_add_task_request import \ - InternalAnsibleAddTaskRequest as InternalAnsibleAddTaskRequest -from fluid.models.internal_ansible_add_task_response import \ - InternalAnsibleAddTaskResponse as InternalAnsibleAddTaskResponse -from fluid.models.internal_ansible_create_playbook_request import \ - InternalAnsibleCreatePlaybookRequest as \ - InternalAnsibleCreatePlaybookRequest -from fluid.models.internal_ansible_create_playbook_response import \ - InternalAnsibleCreatePlaybookResponse as \ - InternalAnsibleCreatePlaybookResponse -from fluid.models.internal_ansible_export_playbook_response import \ - InternalAnsibleExportPlaybookResponse as \ - InternalAnsibleExportPlaybookResponse -from fluid.models.internal_ansible_get_playbook_response import \ - InternalAnsibleGetPlaybookResponse as InternalAnsibleGetPlaybookResponse -from fluid.models.internal_ansible_job import \ - InternalAnsibleJob as InternalAnsibleJob -from fluid.models.internal_ansible_job_request import \ - InternalAnsibleJobRequest as InternalAnsibleJobRequest -from fluid.models.internal_ansible_job_response import \ - InternalAnsibleJobResponse as InternalAnsibleJobResponse -from fluid.models.internal_ansible_job_status import \ - InternalAnsibleJobStatus as InternalAnsibleJobStatus -from fluid.models.internal_ansible_list_playbooks_response import \ - InternalAnsibleListPlaybooksResponse as \ - InternalAnsibleListPlaybooksResponse -from fluid.models.internal_ansible_reorder_tasks_request import \ - InternalAnsibleReorderTasksRequest as InternalAnsibleReorderTasksRequest -from fluid.models.internal_ansible_update_task_request import \ - InternalAnsibleUpdateTaskRequest as InternalAnsibleUpdateTaskRequest -from fluid.models.internal_ansible_update_task_response import \ - InternalAnsibleUpdateTaskResponse as InternalAnsibleUpdateTaskResponse -from fluid.models.internal_rest_access_error_response import \ - InternalRestAccessErrorResponse as InternalRestAccessErrorResponse -from fluid.models.internal_rest_ca_public_key_response import \ - InternalRestCaPublicKeyResponse as InternalRestCaPublicKeyResponse -from fluid.models.internal_rest_certificate_response import \ - InternalRestCertificateResponse as InternalRestCertificateResponse -from fluid.models.internal_rest_create_sandbox_request import \ - InternalRestCreateSandboxRequest as InternalRestCreateSandboxRequest -from fluid.models.internal_rest_create_sandbox_response import \ - InternalRestCreateSandboxResponse as InternalRestCreateSandboxResponse -from fluid.models.internal_rest_destroy_sandbox_response import \ - InternalRestDestroySandboxResponse as InternalRestDestroySandboxResponse -from fluid.models.internal_rest_diff_request import \ - InternalRestDiffRequest as InternalRestDiffRequest -from fluid.models.internal_rest_diff_response import \ - InternalRestDiffResponse as InternalRestDiffResponse -from fluid.models.internal_rest_discover_ip_response import \ - InternalRestDiscoverIPResponse as InternalRestDiscoverIPResponse -from fluid.models.internal_rest_error_response import \ - InternalRestErrorResponse as InternalRestErrorResponse -from fluid.models.internal_rest_generate_response import \ - InternalRestGenerateResponse as InternalRestGenerateResponse -from fluid.models.internal_rest_get_sandbox_response import \ - InternalRestGetSandboxResponse as InternalRestGetSandboxResponse -from fluid.models.internal_rest_health_response import \ - InternalRestHealthResponse as InternalRestHealthResponse -from fluid.models.internal_rest_host_error import \ - InternalRestHostError as InternalRestHostError -from fluid.models.internal_rest_inject_ssh_key_request import \ - InternalRestInjectSSHKeyRequest as InternalRestInjectSSHKeyRequest -from fluid.models.internal_rest_list_certificates_response import \ - InternalRestListCertificatesResponse as \ - InternalRestListCertificatesResponse -from fluid.models.internal_rest_list_sandbox_commands_response import \ - InternalRestListSandboxCommandsResponse as \ - InternalRestListSandboxCommandsResponse -from fluid.models.internal_rest_list_sandboxes_response import \ - InternalRestListSandboxesResponse as InternalRestListSandboxesResponse -from fluid.models.internal_rest_list_sessions_response import \ - InternalRestListSessionsResponse as InternalRestListSessionsResponse -from fluid.models.internal_rest_list_vms_response import \ - InternalRestListVMsResponse as InternalRestListVMsResponse -from fluid.models.internal_rest_publish_request import \ - InternalRestPublishRequest as InternalRestPublishRequest -from fluid.models.internal_rest_publish_response import \ - InternalRestPublishResponse as InternalRestPublishResponse -from fluid.models.internal_rest_request_access_request import \ - InternalRestRequestAccessRequest as InternalRestRequestAccessRequest -from fluid.models.internal_rest_request_access_response import \ - InternalRestRequestAccessResponse as InternalRestRequestAccessResponse -from fluid.models.internal_rest_revoke_certificate_request import \ - InternalRestRevokeCertificateRequest as \ - InternalRestRevokeCertificateRequest -from fluid.models.internal_rest_revoke_certificate_response import \ - InternalRestRevokeCertificateResponse as \ - InternalRestRevokeCertificateResponse -from fluid.models.internal_rest_run_command_request import \ - InternalRestRunCommandRequest as InternalRestRunCommandRequest -from fluid.models.internal_rest_run_command_response import \ - InternalRestRunCommandResponse as InternalRestRunCommandResponse -from fluid.models.internal_rest_sandbox_info import \ - InternalRestSandboxInfo as InternalRestSandboxInfo -from fluid.models.internal_rest_session_end_request import \ - InternalRestSessionEndRequest as InternalRestSessionEndRequest -from fluid.models.internal_rest_session_end_response import \ - InternalRestSessionEndResponse as InternalRestSessionEndResponse -from fluid.models.internal_rest_session_response import \ - InternalRestSessionResponse as InternalRestSessionResponse -from fluid.models.internal_rest_session_start_request import \ - InternalRestSessionStartRequest as InternalRestSessionStartRequest -from fluid.models.internal_rest_session_start_response import \ - InternalRestSessionStartResponse as InternalRestSessionStartResponse -from fluid.models.internal_rest_snapshot_request import \ - InternalRestSnapshotRequest as InternalRestSnapshotRequest -from fluid.models.internal_rest_snapshot_response import \ - InternalRestSnapshotResponse as InternalRestSnapshotResponse -from fluid.models.internal_rest_start_sandbox_request import \ - InternalRestStartSandboxRequest as InternalRestStartSandboxRequest -from fluid.models.internal_rest_start_sandbox_response import \ - InternalRestStartSandboxResponse as InternalRestStartSandboxResponse -from fluid.models.internal_rest_vm_info import \ - InternalRestVmInfo as InternalRestVmInfo -from fluid.models.time_duration import TimeDuration as TimeDuration +from fluid.models.orchestrator_create_sandbox_request import \ + OrchestratorCreateSandboxRequest as OrchestratorCreateSandboxRequest +from fluid.models.orchestrator_host_info import \ + OrchestratorHostInfo as OrchestratorHostInfo +from fluid.models.orchestrator_prepare_request import \ + OrchestratorPrepareRequest as OrchestratorPrepareRequest +from fluid.models.orchestrator_read_source_request import \ + OrchestratorReadSourceRequest as OrchestratorReadSourceRequest +from fluid.models.orchestrator_run_command_request import \ + OrchestratorRunCommandRequest as OrchestratorRunCommandRequest +from fluid.models.orchestrator_run_source_request import \ + OrchestratorRunSourceRequest as OrchestratorRunSourceRequest +from fluid.models.orchestrator_snapshot_request import \ + OrchestratorSnapshotRequest as OrchestratorSnapshotRequest +from fluid.models.orchestrator_snapshot_response import \ + OrchestratorSnapshotResponse as OrchestratorSnapshotResponse +from fluid.models.orchestrator_source_command_result import \ + OrchestratorSourceCommandResult as OrchestratorSourceCommandResult +from fluid.models.orchestrator_source_file_result import \ + OrchestratorSourceFileResult as OrchestratorSourceFileResult +from fluid.models.rest_add_member_request import \ + RestAddMemberRequest as RestAddMemberRequest +from fluid.models.rest_auth_response import \ + RestAuthResponse as RestAuthResponse +from fluid.models.rest_billing_response import \ + RestBillingResponse as RestBillingResponse +from fluid.models.rest_calculator_request import \ + RestCalculatorRequest as RestCalculatorRequest +from fluid.models.rest_calculator_response import \ + RestCalculatorResponse as RestCalculatorResponse +from fluid.models.rest_create_host_token_request import \ + RestCreateHostTokenRequest as RestCreateHostTokenRequest +from fluid.models.rest_create_org_request import \ + RestCreateOrgRequest as RestCreateOrgRequest +from fluid.models.rest_free_tier_info import \ + RestFreeTierInfo as RestFreeTierInfo +from fluid.models.rest_host_token_response import \ + RestHostTokenResponse as RestHostTokenResponse +from fluid.models.rest_login_request import \ + RestLoginRequest as RestLoginRequest +from fluid.models.rest_member_response import \ + RestMemberResponse as RestMemberResponse +from fluid.models.rest_org_response import RestOrgResponse as RestOrgResponse +from fluid.models.rest_register_request import \ + RestRegisterRequest as RestRegisterRequest +from fluid.models.rest_swagger_error import \ + RestSwaggerError as RestSwaggerError +from fluid.models.rest_update_org_request import \ + RestUpdateOrgRequest as RestUpdateOrgRequest +from fluid.models.rest_usage_summary import \ + RestUsageSummary as RestUsageSummary +from fluid.models.rest_user_response import \ + RestUserResponse as RestUserResponse +from fluid.models.store_command import StoreCommand as StoreCommand +from fluid.models.store_sandbox import StoreSandbox as StoreSandbox +from fluid.models.store_sandbox_state import \ + StoreSandboxState as StoreSandboxState diff --git a/sdk/fluid-py/fluid/api/__init__.py b/sdk/fluid-py/fluid/api/__init__.py index 90c6583f..57c90c9a 100644 --- a/sdk/fluid-py/fluid/api/__init__.py +++ b/sdk/fluid-py/fluid/api/__init__.py @@ -1,9 +1,12 @@ # flake8: noqa # import apis into api package -from fluid.api.access_api import AccessApi -from fluid.api.ansible_api import AnsibleApi -from fluid.api.ansible_playbooks_api import AnsiblePlaybooksApi +from fluid.api.auth_api import AuthApi +from fluid.api.billing_api import BillingApi from fluid.api.health_api import HealthApi -from fluid.api.sandbox_api import SandboxApi -from fluid.api.vms_api import VMsApi +from fluid.api.host_tokens_api import HostTokensApi +from fluid.api.hosts_api import HostsApi +from fluid.api.members_api import MembersApi +from fluid.api.organizations_api import OrganizationsApi +from fluid.api.sandboxes_api import SandboxesApi +from fluid.api.source_vms_api import SourceVMsApi diff --git a/sdk/fluid-py/fluid/api/auth_api.py b/sdk/fluid-py/fluid/api/auth_api.py new file mode 100644 index 00000000..1562c581 --- /dev/null +++ b/sdk/fluid-py/fluid/api/auth_api.py @@ -0,0 +1,1648 @@ +# coding: utf-8 + +""" + Fluid API + API for managing sandboxes, organizations, billing, and hosts +""" + +from typing import Any, Dict, List, Optional, Tuple, Union + +from pydantic import Field, StrictStr +from typing_extensions import Annotated + +from fluid.api_client import ApiClient, RequestSerialized +from fluid.api_response import ApiResponse +from fluid.exceptions import ApiException +from fluid.models.rest_auth_response import RestAuthResponse +from fluid.models.rest_login_request import RestLoginRequest +from fluid.models.rest_register_request import RestRegisterRequest + + +class AuthApi: + """AuthApi service""" + + def __init__(self, api_client: Optional[ApiClient] = None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + def auth_github_callback_get( + self, + code: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> None: + """GitHub OAuth callback + + Handle GitHub OAuth callback, create or link user, set session cookie, and redirect to dashboard + + :param code: OAuth authorization code (required) + :type code: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._auth_github_callback_get_serialize( + code=code, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "302": None, + "400": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def auth_github_callback_get_with_http_info( + self, + code: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[None]: + """GitHub OAuth callback + + Handle GitHub OAuth callback, create or link user, set session cookie, and redirect to dashboard + + :param code: OAuth authorization code (required) + :type code: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._auth_github_callback_get_serialize( + code=code, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "302": None, + "400": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def auth_github_callback_get_without_preload_content( + self, + code: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """GitHub OAuth callback + + Handle GitHub OAuth callback, create or link user, set session cookie, and redirect to dashboard + + :param code: OAuth authorization code (required) + :type code: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._auth_github_callback_get_serialize( + code=code, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "302": None, + "400": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _auth_github_callback_get_serialize( + self, + code: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + # process the query parameters + if code is not None: + _query_params.append(("code", code)) + + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Auth" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept(["*/*"]) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/auth/github/callback", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def auth_github_get( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> None: + """GitHub OAuth login + + Redirect to GitHub OAuth authorization page + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._auth_github_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "302": None, + "501": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def auth_github_get_with_http_info( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[None]: + """GitHub OAuth login + + Redirect to GitHub OAuth authorization page + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._auth_github_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "302": None, + "501": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def auth_github_get_without_preload_content( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """GitHub OAuth login + + Redirect to GitHub OAuth authorization page + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._auth_github_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "302": None, + "501": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _auth_github_get_serialize( + self, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Auth" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept(["*/*"]) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/auth/github", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def auth_google_callback_get( + self, + code: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> None: + """Google OAuth callback + + Handle Google OAuth callback, create or link user, set session cookie, and redirect to dashboard + + :param code: OAuth authorization code (required) + :type code: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._auth_google_callback_get_serialize( + code=code, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "302": None, + "400": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def auth_google_callback_get_with_http_info( + self, + code: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[None]: + """Google OAuth callback + + Handle Google OAuth callback, create or link user, set session cookie, and redirect to dashboard + + :param code: OAuth authorization code (required) + :type code: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._auth_google_callback_get_serialize( + code=code, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "302": None, + "400": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def auth_google_callback_get_without_preload_content( + self, + code: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Google OAuth callback + + Handle Google OAuth callback, create or link user, set session cookie, and redirect to dashboard + + :param code: OAuth authorization code (required) + :type code: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._auth_google_callback_get_serialize( + code=code, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "302": None, + "400": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _auth_google_callback_get_serialize( + self, + code: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + # process the query parameters + if code is not None: + _query_params.append(("code", code)) + + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Auth" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept(["*/*"]) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/auth/google/callback", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def auth_google_get( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> None: + """Google OAuth login + + Redirect to Google OAuth authorization page + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._auth_google_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "302": None, + "501": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def auth_google_get_with_http_info( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[None]: + """Google OAuth login + + Redirect to Google OAuth authorization page + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._auth_google_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "302": None, + "501": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def auth_google_get_without_preload_content( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Google OAuth login + + Redirect to Google OAuth authorization page + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._auth_google_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "302": None, + "501": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _auth_google_get_serialize( + self, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Auth" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept(["*/*"]) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/auth/google", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def auth_login_post( + self, + request: RestLoginRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> RestAuthResponse: + """Log in + + Authenticate with email and password, returns a session cookie + + :param request: Login credentials (required) + :type request: RestLoginRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._auth_login_post_serialize( + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestAuthResponse", + "400": "RestSwaggerError", + "401": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def auth_login_post_with_http_info( + self, + request: RestLoginRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[RestAuthResponse]: + """Log in + + Authenticate with email and password, returns a session cookie + + :param request: Login credentials (required) + :type request: RestLoginRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._auth_login_post_serialize( + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestAuthResponse", + "400": "RestSwaggerError", + "401": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def auth_login_post_without_preload_content( + self, + request: RestLoginRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Log in + + Authenticate with email and password, returns a session cookie + + :param request: Login credentials (required) + :type request: RestLoginRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._auth_login_post_serialize( + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestAuthResponse", + "400": "RestSwaggerError", + "401": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _auth_login_post_serialize( + self, + request: RestLoginRequest, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if request is not None: + _body_params = request + + # set the HTTP header `Accept` + if "Auth" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/auth/login", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def auth_logout_post( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, str]: + """Log out + + Invalidate the current session and clear the session cookie + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._auth_logout_post_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def auth_logout_post_with_http_info( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, str]]: + """Log out + + Invalidate the current session and clear the session cookie + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._auth_logout_post_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def auth_logout_post_without_preload_content( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Log out + + Invalidate the current session and clear the session cookie + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._auth_logout_post_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _auth_logout_post_serialize( + self, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Auth" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/auth/logout", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def auth_me_get( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> RestAuthResponse: + """Get current user + + Return the currently authenticated user + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._auth_me_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestAuthResponse", + "401": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def auth_me_get_with_http_info( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[RestAuthResponse]: + """Get current user + + Return the currently authenticated user + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._auth_me_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestAuthResponse", + "401": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def auth_me_get_without_preload_content( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Get current user + + Return the currently authenticated user + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._auth_me_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestAuthResponse", + "401": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _auth_me_get_serialize( + self, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Auth" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/auth/me", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def auth_register_post( + self, + request: RestRegisterRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> RestAuthResponse: + """Register a new user + + Create a new user account and return a session cookie + + :param request: Registration details (required) + :type request: RestRegisterRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._auth_register_post_serialize( + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "RestAuthResponse", + "400": "RestSwaggerError", + "409": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def auth_register_post_with_http_info( + self, + request: RestRegisterRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[RestAuthResponse]: + """Register a new user + + Create a new user account and return a session cookie + + :param request: Registration details (required) + :type request: RestRegisterRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._auth_register_post_serialize( + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "RestAuthResponse", + "400": "RestSwaggerError", + "409": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def auth_register_post_without_preload_content( + self, + request: RestRegisterRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Register a new user + + Create a new user account and return a session cookie + + :param request: Registration details (required) + :type request: RestRegisterRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._auth_register_post_serialize( + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "RestAuthResponse", + "400": "RestSwaggerError", + "409": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _auth_register_post_serialize( + self, + request: RestRegisterRequest, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if request is not None: + _body_params = request + + # set the HTTP header `Accept` + if "Auth" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/auth/register", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/sdk/fluid-py/fluid/api/billing_api.py b/sdk/fluid-py/fluid/api/billing_api.py new file mode 100644 index 00000000..3ad25b89 --- /dev/null +++ b/sdk/fluid-py/fluid/api/billing_api.py @@ -0,0 +1,1283 @@ +# coding: utf-8 + +""" + Fluid API + API for managing sandboxes, organizations, billing, and hosts +""" + +from typing import Any, Dict, List, Optional, Tuple, Union + +from pydantic import Field, StrictStr +from typing_extensions import Annotated + +from fluid.api_client import ApiClient, RequestSerialized +from fluid.api_response import ApiResponse +from fluid.exceptions import ApiException +from fluid.models.rest_billing_response import RestBillingResponse +from fluid.models.rest_calculator_request import RestCalculatorRequest +from fluid.models.rest_calculator_response import RestCalculatorResponse + + +class BillingApi: + """BillingApi service""" + + def __init__(self, api_client: Optional[ApiClient] = None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + def billing_calculator_post( + self, + request: RestCalculatorRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> RestCalculatorResponse: + """Pricing calculator + + Calculate estimated monthly costs based on resource usage + + :param request: Resource quantities (required) + :type request: RestCalculatorRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._billing_calculator_post_serialize( + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestCalculatorResponse", + "400": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def billing_calculator_post_with_http_info( + self, + request: RestCalculatorRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[RestCalculatorResponse]: + """Pricing calculator + + Calculate estimated monthly costs based on resource usage + + :param request: Resource quantities (required) + :type request: RestCalculatorRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._billing_calculator_post_serialize( + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestCalculatorResponse", + "400": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def billing_calculator_post_without_preload_content( + self, + request: RestCalculatorRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Pricing calculator + + Calculate estimated monthly costs based on resource usage + + :param request: Resource quantities (required) + :type request: RestCalculatorRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._billing_calculator_post_serialize( + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestCalculatorResponse", + "400": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _billing_calculator_post_serialize( + self, + request: RestCalculatorRequest, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if request is not None: + _body_params = request + + # set the HTTP header `Accept` + if "Billing" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/billing/calculator", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_billing_get( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> RestBillingResponse: + """Get billing info + + Get the current billing plan, status, and usage summary for an organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_billing_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestBillingResponse", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_billing_get_with_http_info( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[RestBillingResponse]: + """Get billing info + + Get the current billing plan, status, and usage summary for an organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_billing_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestBillingResponse", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_billing_get_without_preload_content( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Get billing info + + Get the current billing plan, status, and usage summary for an organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_billing_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestBillingResponse", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_billing_get_serialize( + self, + slug: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Billing" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/orgs/{slug}/billing", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_billing_portal_post( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, str]: + """Billing portal + + Create a Stripe billing portal session (owner only) + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_billing_portal_post_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_billing_portal_post_with_http_info( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, str]]: + """Billing portal + + Create a Stripe billing portal session (owner only) + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_billing_portal_post_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_billing_portal_post_without_preload_content( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Billing portal + + Create a Stripe billing portal session (owner only) + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_billing_portal_post_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_billing_portal_post_serialize( + self, + slug: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Billing" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/orgs/{slug}/billing/portal", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_billing_subscribe_post( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, str]: + """Subscribe + + Create a Stripe checkout session for the organization (owner only) + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_billing_subscribe_post_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_billing_subscribe_post_with_http_info( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, str]]: + """Subscribe + + Create a Stripe checkout session for the organization (owner only) + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_billing_subscribe_post_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_billing_subscribe_post_without_preload_content( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Subscribe + + Create a Stripe checkout session for the organization (owner only) + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_billing_subscribe_post_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_billing_subscribe_post_serialize( + self, + slug: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Billing" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/orgs/{slug}/billing/subscribe", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_billing_usage_get( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, object]: + """Get usage + + Get current month usage records for the organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_billing_usage_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_billing_usage_get_with_http_info( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, object]]: + """Get usage + + Get current month usage records for the organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_billing_usage_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_billing_usage_get_without_preload_content( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Get usage + + Get current month usage records for the organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_billing_usage_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_billing_usage_get_serialize( + self, + slug: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Billing" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/orgs/{slug}/billing/usage", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def webhooks_stripe_post( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, str]: + """Stripe webhook + + Handle incoming Stripe webhook events + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._webhooks_stripe_post_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def webhooks_stripe_post_with_http_info( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, str]]: + """Stripe webhook + + Handle incoming Stripe webhook events + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._webhooks_stripe_post_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def webhooks_stripe_post_without_preload_content( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Stripe webhook + + Handle incoming Stripe webhook events + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._webhooks_stripe_post_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _webhooks_stripe_post_serialize( + self, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Billing" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/webhooks/stripe", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/sdk/fluid-py/fluid/api/health_api.py b/sdk/fluid-py/fluid/api/health_api.py index df9092fd..be2b4cb9 100644 --- a/sdk/fluid-py/fluid/api/health_api.py +++ b/sdk/fluid-py/fluid/api/health_api.py @@ -1,17 +1,17 @@ # coding: utf-8 """ - fluid-remote API - API for managing AI Agent VM sandboxes using libvirt + Fluid API + API for managing sandboxes, organizations, billing, and hosts """ from typing import Any, Dict, List, Optional, Tuple, Union +from pydantic import StrictStr + from fluid.api_client import ApiClient, RequestSerialized from fluid.api_response import ApiResponse from fluid.exceptions import ApiException -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_health_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse class HealthApi: @@ -22,17 +22,17 @@ def __init__(self, api_client: Optional[ApiClient] = None) -> None: api_client = ApiClient.get_default() self.api_client = api_client - def get_health( + def health_get( self, _request_timeout: Union[None, float, Tuple[float, float]] = None, _request_auth: Optional[Dict[str, Any]] = None, _content_type: Optional[str] = None, _headers: Optional[Dict[str, Any]] = None, _host_index: int = 0, - ) -> GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse: + ) -> Dict[str, str]: """Health check - Returns service health status + Returns API health status :param _request_timeout: Timeout setting for this request. If one number is provided, it will be the total request @@ -50,7 +50,7 @@ def get_health( :return: Returns the result object. """ - _param = self._get_health_serialize( + _param = self._health_get_serialize( _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, @@ -58,7 +58,7 @@ def get_health( ) _response_types_map: Dict[str, Optional[str]] = { - "200": "GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse", + "200": "Dict[str, str]", } response_data = self.api_client.call_api( *_param, _request_timeout=_request_timeout @@ -69,17 +69,17 @@ def get_health( response_types_map=_response_types_map, ).data - def get_health_with_http_info( + def health_get_with_http_info( self, _request_timeout: Union[None, float, Tuple[float, float]] = None, _request_auth: Optional[Dict[str, Any]] = None, _content_type: Optional[str] = None, _headers: Optional[Dict[str, Any]] = None, _host_index: int = 0, - ) -> ApiResponse[GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse]: + ) -> ApiResponse[Dict[str, str]]: """Health check - Returns service health status + Returns API health status :param _request_timeout: Timeout setting for this request. If one number is provided, it will be the total request @@ -97,7 +97,7 @@ def get_health_with_http_info( :return: Returns the result object with HTTP info. """ - _param = self._get_health_serialize( + _param = self._health_get_serialize( _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, @@ -105,7 +105,7 @@ def get_health_with_http_info( ) _response_types_map: Dict[str, Optional[str]] = { - "200": "GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse", + "200": "Dict[str, str]", } response_data = self.api_client.call_api( *_param, _request_timeout=_request_timeout @@ -116,7 +116,7 @@ def get_health_with_http_info( response_types_map=_response_types_map, ) - def get_health_without_preload_content( + def health_get_without_preload_content( self, _request_timeout: Union[None, float, Tuple[float, float]] = None, _request_auth: Optional[Dict[str, Any]] = None, @@ -126,7 +126,7 @@ def get_health_without_preload_content( ) -> Any: """Health check - Returns service health status + Returns API health status :param _request_timeout: Timeout setting for this request. If one number is provided, it will be the total request @@ -144,7 +144,7 @@ def get_health_without_preload_content( :return: Returns the result object without preloading content. """ - _param = self._get_health_serialize( + _param = self._health_get_serialize( _request_auth=_request_auth, _content_type=_content_type, _headers=_headers, @@ -152,14 +152,14 @@ def get_health_without_preload_content( ) _response_types_map: Dict[str, Optional[str]] = { - "200": "GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse", + "200": "Dict[str, str]", } response_data = self.api_client.call_api( *_param, _request_timeout=_request_timeout ) return response_data.response - def _get_health_serialize( + def _health_get_serialize( self, _request_auth: Optional[Dict[str, Any]], _content_type: Optional[str], @@ -198,7 +198,7 @@ def _get_health_serialize( return self.api_client.param_serialize( method="GET", - resource_path="/v1/health", + resource_path="/health", path_params=_path_params, query_params=_query_params, header_params=_header_params, diff --git a/sdk/fluid-py/fluid/api/host_tokens_api.py b/sdk/fluid-py/fluid/api/host_tokens_api.py new file mode 100644 index 00000000..fcf2be0c --- /dev/null +++ b/sdk/fluid-py/fluid/api/host_tokens_api.py @@ -0,0 +1,704 @@ +# coding: utf-8 + +""" + Fluid API + API for managing sandboxes, organizations, billing, and hosts +""" + +from typing import Any, Dict, List, Optional, Tuple, Union + +from pydantic import Field, StrictStr +from typing_extensions import Annotated + +from fluid.api_client import ApiClient, RequestSerialized +from fluid.api_response import ApiResponse +from fluid.exceptions import ApiException +from fluid.models.rest_create_host_token_request import \ + RestCreateHostTokenRequest +from fluid.models.rest_host_token_response import RestHostTokenResponse + + +class HostTokensApi: + """HostTokensApi service""" + + def __init__(self, api_client: Optional[ApiClient] = None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + def orgs_slug_hosts_tokens_get( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, object]: + """List host tokens + + List all host tokens for the organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_hosts_tokens_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_hosts_tokens_get_with_http_info( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, object]]: + """List host tokens + + List all host tokens for the organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_hosts_tokens_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_hosts_tokens_get_without_preload_content( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """List host tokens + + List all host tokens for the organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_hosts_tokens_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_hosts_tokens_get_serialize( + self, + slug: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Host_Tokens" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/orgs/{slug}/hosts/tokens", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_hosts_tokens_post( + self, + slug: str, + request: RestCreateHostTokenRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> RestHostTokenResponse: + """Create host token + + Generate a new host authentication token (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param request: Token details (required) + :type request: RestCreateHostTokenRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_hosts_tokens_post_serialize( + slug=slug, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "RestHostTokenResponse", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_hosts_tokens_post_with_http_info( + self, + slug: str, + request: RestCreateHostTokenRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[RestHostTokenResponse]: + """Create host token + + Generate a new host authentication token (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param request: Token details (required) + :type request: RestCreateHostTokenRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_hosts_tokens_post_serialize( + slug=slug, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "RestHostTokenResponse", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_hosts_tokens_post_without_preload_content( + self, + slug: str, + request: RestCreateHostTokenRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Create host token + + Generate a new host authentication token (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param request: Token details (required) + :type request: RestCreateHostTokenRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_hosts_tokens_post_serialize( + slug=slug, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "RestHostTokenResponse", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_hosts_tokens_post_serialize( + self, + slug: str, + request: RestCreateHostTokenRequest, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if request is not None: + _body_params = request + + # set the HTTP header `Accept` + if "Host_Tokens" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/orgs/{slug}/hosts/tokens", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_hosts_tokens_token_id_delete( + self, + slug: str, + token_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, str]: + """Delete host token + + Delete a host token (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param token_id: Token ID (required) + :type token_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_hosts_tokens_token_id_delete_serialize( + slug=slug, + token_id=token_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_hosts_tokens_token_id_delete_with_http_info( + self, + slug: str, + token_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, str]]: + """Delete host token + + Delete a host token (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param token_id: Token ID (required) + :type token_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_hosts_tokens_token_id_delete_serialize( + slug=slug, + token_id=token_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_hosts_tokens_token_id_delete_without_preload_content( + self, + slug: str, + token_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Delete host token + + Delete a host token (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param token_id: Token ID (required) + :type token_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_hosts_tokens_token_id_delete_serialize( + slug=slug, + token_id=token_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_hosts_tokens_token_id_delete_serialize( + self, + slug: str, + token_id: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if token_id is not None: + _path_params["tokenID"] = token_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Host_Tokens" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="DELETE", + resource_path="/orgs/{slug}/hosts/tokens/{tokenID}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/sdk/fluid-py/fluid/api/hosts_api.py b/sdk/fluid-py/fluid/api/hosts_api.py new file mode 100644 index 00000000..8b702d65 --- /dev/null +++ b/sdk/fluid-py/fluid/api/hosts_api.py @@ -0,0 +1,463 @@ +# coding: utf-8 + +""" + Fluid API + API for managing sandboxes, organizations, billing, and hosts +""" + +from typing import Any, Dict, List, Optional, Tuple, Union + +from pydantic import Field, StrictStr +from typing_extensions import Annotated + +from fluid.api_client import ApiClient, RequestSerialized +from fluid.api_response import ApiResponse +from fluid.exceptions import ApiException +from fluid.models.orchestrator_host_info import OrchestratorHostInfo + + +class HostsApi: + """HostsApi service""" + + def __init__(self, api_client: Optional[ApiClient] = None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + def orgs_slug_hosts_get( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, object]: + """List hosts + + List all connected sandbox hosts + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_hosts_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_hosts_get_with_http_info( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, object]]: + """List hosts + + List all connected sandbox hosts + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_hosts_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_hosts_get_without_preload_content( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """List hosts + + List all connected sandbox hosts + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_hosts_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_hosts_get_serialize( + self, + slug: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Hosts" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/orgs/{slug}/hosts", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_hosts_host_id_get( + self, + slug: str, + host_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> OrchestratorHostInfo: + """Get host + + Get details of a specific connected host + + :param slug: Organization slug (required) + :type slug: str + :param host_id: Host ID (required) + :type host_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_hosts_host_id_get_serialize( + slug=slug, + host_id=host_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "OrchestratorHostInfo", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_hosts_host_id_get_with_http_info( + self, + slug: str, + host_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[OrchestratorHostInfo]: + """Get host + + Get details of a specific connected host + + :param slug: Organization slug (required) + :type slug: str + :param host_id: Host ID (required) + :type host_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_hosts_host_id_get_serialize( + slug=slug, + host_id=host_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "OrchestratorHostInfo", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_hosts_host_id_get_without_preload_content( + self, + slug: str, + host_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Get host + + Get details of a specific connected host + + :param slug: Organization slug (required) + :type slug: str + :param host_id: Host ID (required) + :type host_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_hosts_host_id_get_serialize( + slug=slug, + host_id=host_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "OrchestratorHostInfo", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_hosts_host_id_get_serialize( + self, + slug: str, + host_id: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if host_id is not None: + _path_params["hostID"] = host_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Hosts" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/orgs/{slug}/hosts/{hostID}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/sdk/fluid-py/fluid/api/members_api.py b/sdk/fluid-py/fluid/api/members_api.py new file mode 100644 index 00000000..a6a29b54 --- /dev/null +++ b/sdk/fluid-py/fluid/api/members_api.py @@ -0,0 +1,709 @@ +# coding: utf-8 + +""" + Fluid API + API for managing sandboxes, organizations, billing, and hosts +""" + +from typing import Any, Dict, List, Optional, Tuple, Union + +from pydantic import Field, StrictStr +from typing_extensions import Annotated + +from fluid.api_client import ApiClient, RequestSerialized +from fluid.api_response import ApiResponse +from fluid.exceptions import ApiException +from fluid.models.rest_add_member_request import RestAddMemberRequest +from fluid.models.rest_member_response import RestMemberResponse + + +class MembersApi: + """MembersApi service""" + + def __init__(self, api_client: Optional[ApiClient] = None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + def orgs_slug_members_get( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, object]: + """List members + + List all members of an organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_members_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_members_get_with_http_info( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, object]]: + """List members + + List all members of an organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_members_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_members_get_without_preload_content( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """List members + + List all members of an organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_members_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_members_get_serialize( + self, + slug: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Members" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/orgs/{slug}/members", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_members_member_id_delete( + self, + slug: str, + member_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, str]: + """Remove member + + Remove a member from an organization (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param member_id: Member ID (required) + :type member_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_members_member_id_delete_serialize( + slug=slug, + member_id=member_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_members_member_id_delete_with_http_info( + self, + slug: str, + member_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, str]]: + """Remove member + + Remove a member from an organization (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param member_id: Member ID (required) + :type member_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_members_member_id_delete_serialize( + slug=slug, + member_id=member_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_members_member_id_delete_without_preload_content( + self, + slug: str, + member_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Remove member + + Remove a member from an organization (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param member_id: Member ID (required) + :type member_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_members_member_id_delete_serialize( + slug=slug, + member_id=member_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_members_member_id_delete_serialize( + self, + slug: str, + member_id: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if member_id is not None: + _path_params["memberID"] = member_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Members" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="DELETE", + resource_path="/orgs/{slug}/members/{memberID}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_members_post( + self, + slug: str, + request: RestAddMemberRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> RestMemberResponse: + """Add member + + Add a user to an organization (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param request: Member details (required) + :type request: RestAddMemberRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_members_post_serialize( + slug=slug, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "RestMemberResponse", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "409": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_members_post_with_http_info( + self, + slug: str, + request: RestAddMemberRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[RestMemberResponse]: + """Add member + + Add a user to an organization (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param request: Member details (required) + :type request: RestAddMemberRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_members_post_serialize( + slug=slug, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "RestMemberResponse", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "409": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_members_post_without_preload_content( + self, + slug: str, + request: RestAddMemberRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Add member + + Add a user to an organization (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param request: Member details (required) + :type request: RestAddMemberRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_members_post_serialize( + slug=slug, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "RestMemberResponse", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "409": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_members_post_serialize( + self, + slug: str, + request: RestAddMemberRequest, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if request is not None: + _body_params = request + + # set the HTTP header `Accept` + if "Members" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/orgs/{slug}/members", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/sdk/fluid-py/fluid/api/organizations_api.py b/sdk/fluid-py/fluid/api/organizations_api.py new file mode 100644 index 00000000..96e8aac5 --- /dev/null +++ b/sdk/fluid-py/fluid/api/organizations_api.py @@ -0,0 +1,1105 @@ +# coding: utf-8 + +""" + Fluid API + API for managing sandboxes, organizations, billing, and hosts +""" + +from typing import Any, Dict, List, Optional, Tuple, Union + +from pydantic import Field, StrictStr +from typing_extensions import Annotated + +from fluid.api_client import ApiClient, RequestSerialized +from fluid.api_response import ApiResponse +from fluid.exceptions import ApiException +from fluid.models.rest_create_org_request import RestCreateOrgRequest +from fluid.models.rest_org_response import RestOrgResponse +from fluid.models.rest_update_org_request import RestUpdateOrgRequest + + +class OrganizationsApi: + """OrganizationsApi service""" + + def __init__(self, api_client: Optional[ApiClient] = None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + def orgs_get( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, object]: + """List organizations + + List all organizations the current user belongs to + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_get_with_http_info( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, object]]: + """List organizations + + List all organizations the current user belongs to + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_get_without_preload_content( + self, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """List organizations + + List all organizations the current user belongs to + + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_get_serialize( + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_get_serialize( + self, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Organizations" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/orgs", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_post( + self, + request: RestCreateOrgRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> RestOrgResponse: + """Create organization + + Create a new organization and add the current user as owner + + :param request: Organization details (required) + :type request: RestCreateOrgRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_post_serialize( + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "RestOrgResponse", + "400": "RestSwaggerError", + "409": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_post_with_http_info( + self, + request: RestCreateOrgRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[RestOrgResponse]: + """Create organization + + Create a new organization and add the current user as owner + + :param request: Organization details (required) + :type request: RestCreateOrgRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_post_serialize( + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "RestOrgResponse", + "400": "RestSwaggerError", + "409": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_post_without_preload_content( + self, + request: RestCreateOrgRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Create organization + + Create a new organization and add the current user as owner + + :param request: Organization details (required) + :type request: RestCreateOrgRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_post_serialize( + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "RestOrgResponse", + "400": "RestSwaggerError", + "409": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_post_serialize( + self, + request: RestCreateOrgRequest, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if request is not None: + _body_params = request + + # set the HTTP header `Accept` + if "Organizations" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/orgs", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_delete( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, str]: + """Delete organization + + Delete an organization (owner only) + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_delete_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_delete_with_http_info( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, str]]: + """Delete organization + + Delete an organization (owner only) + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_delete_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_delete_without_preload_content( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Delete organization + + Delete an organization (owner only) + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_delete_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, str]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_delete_serialize( + self, + slug: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Organizations" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="DELETE", + resource_path="/orgs/{slug}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_get( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> RestOrgResponse: + """Get organization + + Get organization details by slug + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestOrgResponse", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_get_with_http_info( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[RestOrgResponse]: + """Get organization + + Get organization details by slug + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestOrgResponse", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_get_without_preload_content( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Get organization + + Get organization details by slug + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestOrgResponse", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_get_serialize( + self, + slug: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Organizations" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/orgs/{slug}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_patch( + self, + slug: str, + request: RestUpdateOrgRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> RestOrgResponse: + """Update organization + + Update organization details (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param request: Fields to update (required) + :type request: RestUpdateOrgRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_patch_serialize( + slug=slug, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestOrgResponse", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_patch_with_http_info( + self, + slug: str, + request: RestUpdateOrgRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[RestOrgResponse]: + """Update organization + + Update organization details (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param request: Fields to update (required) + :type request: RestUpdateOrgRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_patch_serialize( + slug=slug, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestOrgResponse", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_patch_without_preload_content( + self, + slug: str, + request: RestUpdateOrgRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Update organization + + Update organization details (owner or admin only) + + :param slug: Organization slug (required) + :type slug: str + :param request: Fields to update (required) + :type request: RestUpdateOrgRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_patch_serialize( + slug=slug, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "RestOrgResponse", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_patch_serialize( + self, + slug: str, + request: RestUpdateOrgRequest, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if request is not None: + _body_params = request + + # set the HTTP header `Accept` + if "Organizations" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="PATCH", + resource_path="/orgs/{slug}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/sdk/fluid-py/fluid/api/sandboxes_api.py b/sdk/fluid-py/fluid/api/sandboxes_api.py new file mode 100644 index 00000000..84c6ec1f --- /dev/null +++ b/sdk/fluid-py/fluid/api/sandboxes_api.py @@ -0,0 +1,2356 @@ +# coding: utf-8 + +""" + Fluid API + API for managing sandboxes, organizations, billing, and hosts +""" + +from typing import Any, Dict, List, Optional, Tuple, Union + +from pydantic import Field, StrictStr +from typing_extensions import Annotated + +from fluid.api_client import ApiClient, RequestSerialized +from fluid.api_response import ApiResponse +from fluid.exceptions import ApiException +from fluid.models.orchestrator_create_sandbox_request import \ + OrchestratorCreateSandboxRequest +from fluid.models.orchestrator_run_command_request import \ + OrchestratorRunCommandRequest +from fluid.models.orchestrator_snapshot_request import \ + OrchestratorSnapshotRequest +from fluid.models.orchestrator_snapshot_response import \ + OrchestratorSnapshotResponse +from fluid.models.store_command import StoreCommand +from fluid.models.store_sandbox import StoreSandbox + + +class SandboxesApi: + """SandboxesApi service""" + + def __init__(self, api_client: Optional[ApiClient] = None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + def orgs_slug_sandboxes_get( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, object]: + """List sandboxes + + List all sandboxes in the organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_sandboxes_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_sandboxes_get_with_http_info( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, object]]: + """List sandboxes + + List all sandboxes in the organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_sandboxes_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_sandboxes_get_without_preload_content( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """List sandboxes + + List all sandboxes in the organization + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_sandboxes_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_sandboxes_get_serialize( + self, + slug: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Sandboxes" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/orgs/{slug}/sandboxes", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_sandboxes_post( + self, + slug: str, + request: OrchestratorCreateSandboxRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> StoreSandbox: + """Create sandbox + + Create a new sandbox in the organization from a source VM or base image + + :param slug: Organization slug (required) + :type slug: str + :param request: Sandbox configuration (required) + :type request: OrchestratorCreateSandboxRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_sandboxes_post_serialize( + slug=slug, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "StoreSandbox", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_sandboxes_post_with_http_info( + self, + slug: str, + request: OrchestratorCreateSandboxRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[StoreSandbox]: + """Create sandbox + + Create a new sandbox in the organization from a source VM or base image + + :param slug: Organization slug (required) + :type slug: str + :param request: Sandbox configuration (required) + :type request: OrchestratorCreateSandboxRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_sandboxes_post_serialize( + slug=slug, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "StoreSandbox", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_sandboxes_post_without_preload_content( + self, + slug: str, + request: OrchestratorCreateSandboxRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Create sandbox + + Create a new sandbox in the organization from a source VM or base image + + :param slug: Organization slug (required) + :type slug: str + :param request: Sandbox configuration (required) + :type request: OrchestratorCreateSandboxRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_sandboxes_post_serialize( + slug=slug, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "StoreSandbox", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_sandboxes_post_serialize( + self, + slug: str, + request: OrchestratorCreateSandboxRequest, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if request is not None: + _body_params = request + + # set the HTTP header `Accept` + if "Sandboxes" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/orgs/{slug}/sandboxes", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_sandboxes_sandbox_id_commands_get( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, object]: + """List commands + + List all commands executed in a sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_commands_get_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_sandboxes_sandbox_id_commands_get_with_http_info( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, object]]: + """List commands + + List all commands executed in a sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_commands_get_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_sandboxes_sandbox_id_commands_get_without_preload_content( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """List commands + + List all commands executed in a sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_commands_get_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_sandboxes_sandbox_id_commands_get_serialize( + self, + slug: str, + sandbox_id: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if sandbox_id is not None: + _path_params["sandboxID"] = sandbox_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Sandboxes" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/orgs/{slug}/sandboxes/{sandboxID}/commands", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_sandboxes_sandbox_id_delete( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, object]: + """Destroy sandbox + + Destroy a sandbox and release its resources + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_delete_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_sandboxes_sandbox_id_delete_with_http_info( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, object]]: + """Destroy sandbox + + Destroy a sandbox and release its resources + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_delete_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_sandboxes_sandbox_id_delete_without_preload_content( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Destroy sandbox + + Destroy a sandbox and release its resources + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_delete_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_sandboxes_sandbox_id_delete_serialize( + self, + slug: str, + sandbox_id: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if sandbox_id is not None: + _path_params["sandboxID"] = sandbox_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Sandboxes" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="DELETE", + resource_path="/orgs/{slug}/sandboxes/{sandboxID}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_sandboxes_sandbox_id_get( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> StoreSandbox: + """Get sandbox + + Get sandbox details by ID + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_get_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "StoreSandbox", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_sandboxes_sandbox_id_get_with_http_info( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[StoreSandbox]: + """Get sandbox + + Get sandbox details by ID + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_get_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "StoreSandbox", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_sandboxes_sandbox_id_get_without_preload_content( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Get sandbox + + Get sandbox details by ID + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_get_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "StoreSandbox", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_sandboxes_sandbox_id_get_serialize( + self, + slug: str, + sandbox_id: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if sandbox_id is not None: + _path_params["sandboxID"] = sandbox_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Sandboxes" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/orgs/{slug}/sandboxes/{sandboxID}", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_sandboxes_sandbox_id_run_post( + self, + slug: str, + sandbox_id: str, + request: OrchestratorRunCommandRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> StoreCommand: + """Run command + + Execute a command in a sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param request: Command to run (required) + :type request: OrchestratorRunCommandRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_run_post_serialize( + slug=slug, + sandbox_id=sandbox_id, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "StoreCommand", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_sandboxes_sandbox_id_run_post_with_http_info( + self, + slug: str, + sandbox_id: str, + request: OrchestratorRunCommandRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[StoreCommand]: + """Run command + + Execute a command in a sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param request: Command to run (required) + :type request: OrchestratorRunCommandRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_run_post_serialize( + slug=slug, + sandbox_id=sandbox_id, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "StoreCommand", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_sandboxes_sandbox_id_run_post_without_preload_content( + self, + slug: str, + sandbox_id: str, + request: OrchestratorRunCommandRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Run command + + Execute a command in a sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param request: Command to run (required) + :type request: OrchestratorRunCommandRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_run_post_serialize( + slug=slug, + sandbox_id=sandbox_id, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "StoreCommand", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_sandboxes_sandbox_id_run_post_serialize( + self, + slug: str, + sandbox_id: str, + request: OrchestratorRunCommandRequest, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if sandbox_id is not None: + _path_params["sandboxID"] = sandbox_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if request is not None: + _body_params = request + + # set the HTTP header `Accept` + if "Sandboxes" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/orgs/{slug}/sandboxes/{sandboxID}/run", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_sandboxes_sandbox_id_snapshot_post( + self, + slug: str, + sandbox_id: str, + request: OrchestratorSnapshotRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> OrchestratorSnapshotResponse: + """Create snapshot + + Create a snapshot of a sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param request: Snapshot details (required) + :type request: OrchestratorSnapshotRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_snapshot_post_serialize( + slug=slug, + sandbox_id=sandbox_id, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "OrchestratorSnapshotResponse", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_sandboxes_sandbox_id_snapshot_post_with_http_info( + self, + slug: str, + sandbox_id: str, + request: OrchestratorSnapshotRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[OrchestratorSnapshotResponse]: + """Create snapshot + + Create a snapshot of a sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param request: Snapshot details (required) + :type request: OrchestratorSnapshotRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_snapshot_post_serialize( + slug=slug, + sandbox_id=sandbox_id, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "OrchestratorSnapshotResponse", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_sandboxes_sandbox_id_snapshot_post_without_preload_content( + self, + slug: str, + sandbox_id: str, + request: OrchestratorSnapshotRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Create snapshot + + Create a snapshot of a sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param request: Snapshot details (required) + :type request: OrchestratorSnapshotRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_snapshot_post_serialize( + slug=slug, + sandbox_id=sandbox_id, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "201": "OrchestratorSnapshotResponse", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_sandboxes_sandbox_id_snapshot_post_serialize( + self, + slug: str, + sandbox_id: str, + request: OrchestratorSnapshotRequest, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if sandbox_id is not None: + _path_params["sandboxID"] = sandbox_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if request is not None: + _body_params = request + + # set the HTTP header `Accept` + if "Sandboxes" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/orgs/{slug}/sandboxes/{sandboxID}/snapshot", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_sandboxes_sandbox_id_start_post( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, object]: + """Start sandbox + + Start a stopped sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_start_post_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_sandboxes_sandbox_id_start_post_with_http_info( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, object]]: + """Start sandbox + + Start a stopped sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_start_post_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_sandboxes_sandbox_id_start_post_without_preload_content( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Start sandbox + + Start a stopped sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_start_post_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_sandboxes_sandbox_id_start_post_serialize( + self, + slug: str, + sandbox_id: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if sandbox_id is not None: + _path_params["sandboxID"] = sandbox_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Sandboxes" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/orgs/{slug}/sandboxes/{sandboxID}/start", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_sandboxes_sandbox_id_stop_post( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, object]: + """Stop sandbox + + Stop a running sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_stop_post_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_sandboxes_sandbox_id_stop_post_with_http_info( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, object]]: + """Stop sandbox + + Stop a running sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_stop_post_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_sandboxes_sandbox_id_stop_post_without_preload_content( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Stop sandbox + + Stop a running sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_sandboxes_sandbox_id_stop_post_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_sandboxes_sandbox_id_stop_post_serialize( + self, + slug: str, + sandbox_id: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if sandbox_id is not None: + _path_params["sandboxID"] = sandbox_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Sandboxes" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/orgs/{slug}/sandboxes/{sandboxID}/stop", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_sandboxes_sandbox_idip_get( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, object]: + """Get sandbox IP + + Get the IP address of a sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_sandboxes_sandbox_idip_get_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_sandboxes_sandbox_idip_get_with_http_info( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, object]]: + """Get sandbox IP + + Get the IP address of a sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_sandboxes_sandbox_idip_get_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_sandboxes_sandbox_idip_get_without_preload_content( + self, + slug: str, + sandbox_id: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Get sandbox IP + + Get the IP address of a sandbox + + :param slug: Organization slug (required) + :type slug: str + :param sandbox_id: Sandbox ID (required) + :type sandbox_id: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_sandboxes_sandbox_idip_get_serialize( + slug=slug, + sandbox_id=sandbox_id, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_sandboxes_sandbox_idip_get_serialize( + self, + slug: str, + sandbox_id: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if sandbox_id is not None: + _path_params["sandboxID"] = sandbox_id + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Sandboxes" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/orgs/{slug}/sandboxes/{sandboxID}/ip", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/sdk/fluid-py/fluid/api/source_vms_api.py b/sdk/fluid-py/fluid/api/source_vms_api.py new file mode 100644 index 00000000..5a5dd80b --- /dev/null +++ b/sdk/fluid-py/fluid/api/source_vms_api.py @@ -0,0 +1,1009 @@ +# coding: utf-8 + +""" + Fluid API + API for managing sandboxes, organizations, billing, and hosts +""" + +from typing import Any, Dict, List, Optional, Tuple, Union + +from pydantic import Field, StrictStr +from typing_extensions import Annotated + +from fluid.api_client import ApiClient, RequestSerialized +from fluid.api_response import ApiResponse +from fluid.exceptions import ApiException +from fluid.models.orchestrator_prepare_request import \ + OrchestratorPrepareRequest +from fluid.models.orchestrator_read_source_request import \ + OrchestratorReadSourceRequest +from fluid.models.orchestrator_run_source_request import \ + OrchestratorRunSourceRequest +from fluid.models.orchestrator_source_command_result import \ + OrchestratorSourceCommandResult +from fluid.models.orchestrator_source_file_result import \ + OrchestratorSourceFileResult + + +class SourceVMsApi: + """SourceVMsApi service""" + + def __init__(self, api_client: Optional[ApiClient] = None) -> None: + if api_client is None: + api_client = ApiClient.get_default() + self.api_client = api_client + + def orgs_slug_sources_vm_prepare_post( + self, + slug: str, + vm: str, + request: OrchestratorPrepareRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, object]: + """Prepare source VM + + Prepare a source VM for sandbox cloning + + :param slug: Organization slug (required) + :type slug: str + :param vm: Source VM name (required) + :type vm: str + :param request: SSH credentials (required) + :type request: OrchestratorPrepareRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_sources_vm_prepare_post_serialize( + slug=slug, + vm=vm, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_sources_vm_prepare_post_with_http_info( + self, + slug: str, + vm: str, + request: OrchestratorPrepareRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, object]]: + """Prepare source VM + + Prepare a source VM for sandbox cloning + + :param slug: Organization slug (required) + :type slug: str + :param vm: Source VM name (required) + :type vm: str + :param request: SSH credentials (required) + :type request: OrchestratorPrepareRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_sources_vm_prepare_post_serialize( + slug=slug, + vm=vm, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_sources_vm_prepare_post_without_preload_content( + self, + slug: str, + vm: str, + request: OrchestratorPrepareRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Prepare source VM + + Prepare a source VM for sandbox cloning + + :param slug: Organization slug (required) + :type slug: str + :param vm: Source VM name (required) + :type vm: str + :param request: SSH credentials (required) + :type request: OrchestratorPrepareRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_sources_vm_prepare_post_serialize( + slug=slug, + vm=vm, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_sources_vm_prepare_post_serialize( + self, + slug: str, + vm: str, + request: OrchestratorPrepareRequest, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if vm is not None: + _path_params["vm"] = vm + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if request is not None: + _body_params = request + + # set the HTTP header `Accept` + if "Source_VMs" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/orgs/{slug}/sources/{vm}/prepare", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_sources_vm_read_post( + self, + slug: str, + vm: str, + request: OrchestratorReadSourceRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> OrchestratorSourceFileResult: + """Read source file + + Read a file from a source VM + + :param slug: Organization slug (required) + :type slug: str + :param vm: Source VM name (required) + :type vm: str + :param request: File path (required) + :type request: OrchestratorReadSourceRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_sources_vm_read_post_serialize( + slug=slug, + vm=vm, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "OrchestratorSourceFileResult", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_sources_vm_read_post_with_http_info( + self, + slug: str, + vm: str, + request: OrchestratorReadSourceRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[OrchestratorSourceFileResult]: + """Read source file + + Read a file from a source VM + + :param slug: Organization slug (required) + :type slug: str + :param vm: Source VM name (required) + :type vm: str + :param request: File path (required) + :type request: OrchestratorReadSourceRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_sources_vm_read_post_serialize( + slug=slug, + vm=vm, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "OrchestratorSourceFileResult", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_sources_vm_read_post_without_preload_content( + self, + slug: str, + vm: str, + request: OrchestratorReadSourceRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Read source file + + Read a file from a source VM + + :param slug: Organization slug (required) + :type slug: str + :param vm: Source VM name (required) + :type vm: str + :param request: File path (required) + :type request: OrchestratorReadSourceRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_sources_vm_read_post_serialize( + slug=slug, + vm=vm, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "OrchestratorSourceFileResult", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_sources_vm_read_post_serialize( + self, + slug: str, + vm: str, + request: OrchestratorReadSourceRequest, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if vm is not None: + _path_params["vm"] = vm + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if request is not None: + _body_params = request + + # set the HTTP header `Accept` + if "Source_VMs" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/orgs/{slug}/sources/{vm}/read", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_sources_vm_run_post( + self, + slug: str, + vm: str, + request: OrchestratorRunSourceRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> OrchestratorSourceCommandResult: + """Run source command + + Execute a read-only command on a source VM + + :param slug: Organization slug (required) + :type slug: str + :param vm: Source VM name (required) + :type vm: str + :param request: Command to run (required) + :type request: OrchestratorRunSourceRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_sources_vm_run_post_serialize( + slug=slug, + vm=vm, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "OrchestratorSourceCommandResult", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_sources_vm_run_post_with_http_info( + self, + slug: str, + vm: str, + request: OrchestratorRunSourceRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[OrchestratorSourceCommandResult]: + """Run source command + + Execute a read-only command on a source VM + + :param slug: Organization slug (required) + :type slug: str + :param vm: Source VM name (required) + :type vm: str + :param request: Command to run (required) + :type request: OrchestratorRunSourceRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_sources_vm_run_post_serialize( + slug=slug, + vm=vm, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "OrchestratorSourceCommandResult", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_sources_vm_run_post_without_preload_content( + self, + slug: str, + vm: str, + request: OrchestratorRunSourceRequest, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """Run source command + + Execute a read-only command on a source VM + + :param slug: Organization slug (required) + :type slug: str + :param vm: Source VM name (required) + :type vm: str + :param request: Command to run (required) + :type request: OrchestratorRunSourceRequest + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_sources_vm_run_post_serialize( + slug=slug, + vm=vm, + request=request, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "OrchestratorSourceCommandResult", + "400": "RestSwaggerError", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_sources_vm_run_post_serialize( + self, + slug: str, + vm: str, + request: OrchestratorRunSourceRequest, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + if vm is not None: + _path_params["vm"] = vm + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + if request is not None: + _body_params = request + + # set the HTTP header `Accept` + if "Source_VMs" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + if _content_type: + _header_params["Content-Type"] = _content_type + else: + _default_content_type = self.api_client.select_header_content_type( + ["application/json"] + ) + if _default_content_type is not None: + _header_params["Content-Type"] = _default_content_type + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="POST", + resource_path="/orgs/{slug}/sources/{vm}/run", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) + + def orgs_slug_vms_get( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Dict[str, object]: + """List source VMs + + List all source VMs across connected hosts + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object. + """ + + _param = self._orgs_slug_vms_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ).data + + def orgs_slug_vms_get_with_http_info( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> ApiResponse[Dict[str, object]]: + """List source VMs + + List all source VMs across connected hosts + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object with HTTP info. + """ + + _param = self._orgs_slug_vms_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + response_data.read() + return self.api_client.response_deserialize( + response_data=response_data, + response_types_map=_response_types_map, + ) + + def orgs_slug_vms_get_without_preload_content( + self, + slug: str, + _request_timeout: Union[None, float, Tuple[float, float]] = None, + _request_auth: Optional[Dict[str, Any]] = None, + _content_type: Optional[str] = None, + _headers: Optional[Dict[str, Any]] = None, + _host_index: int = 0, + ) -> Any: + """List source VMs + + List all source VMs across connected hosts + + :param slug: Organization slug (required) + :type slug: str + :param _request_timeout: Timeout setting for this request. If one + number is provided, it will be the total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :type _request_timeout: int, tuple(int, int), optional + :param _request_auth: Override the auth_settings for a single request. + :type _request_auth: dict, optional + :param _content_type: Force content-type for the request. + :type _content_type: str, optional + :param _headers: Override headers for a single request. + :type _headers: dict, optional + :param _host_index: Override host index for a single request. + :type _host_index: int, optional + :return: Returns the result object without preloading content. + """ + + _param = self._orgs_slug_vms_get_serialize( + slug=slug, + _request_auth=_request_auth, + _content_type=_content_type, + _headers=_headers, + _host_index=_host_index, + ) + + _response_types_map: Dict[str, Optional[str]] = { + "200": "Dict[str, object]", + "403": "RestSwaggerError", + "404": "RestSwaggerError", + "500": "RestSwaggerError", + } + response_data = self.api_client.call_api( + *_param, _request_timeout=_request_timeout + ) + return response_data.response + + def _orgs_slug_vms_get_serialize( + self, + slug: str, + _request_auth: Optional[Dict[str, Any]], + _content_type: Optional[str], + _headers: Optional[Dict[str, Any]], + _host_index: int, + ) -> RequestSerialized: + _host = None + + _collection_formats: Dict[str, str] = {} + + _path_params: Dict[str, str] = {} + _query_params: List[Tuple[str, str]] = [] + _header_params: Dict[str, Optional[str]] = _headers or {} + _form_params: List[Tuple[str, str]] = [] + _files: Dict[ + str, Union[str, bytes, List[str], List[bytes], List[Tuple[str, bytes]]] + ] = {} + _body_params: Any = None + + # process the path parameters + if slug is not None: + _path_params["slug"] = slug + # process the query parameters + # process the header parameters + # process the form parameters + # process the body parameter + + # set the HTTP header `Accept` + if "Source_VMs" not in _header_params: + _header_params["Accept"] = self.api_client.select_header_accept( + ["application/json"] + ) + + # set the HTTP header `Content-Type` + + # authentication setting + _auth_settings: List[str] = [] + + return self.api_client.param_serialize( + method="GET", + resource_path="/orgs/{slug}/vms", + path_params=_path_params, + query_params=_query_params, + header_params=_header_params, + body=_body_params, + post_params=_form_params, + files=_files, + auth_settings=_auth_settings, + collection_formats=_collection_formats, + _host=_host, + _request_auth=_request_auth, + ) diff --git a/sdk/fluid-py/fluid/api_client.py b/sdk/fluid-py/fluid/api_client.py index 044ec0d4..d35ddd08 100644 --- a/sdk/fluid-py/fluid/api_client.py +++ b/sdk/fluid-py/fluid/api_client.py @@ -1,11 +1,11 @@ # coding: utf-8 """ - fluid-remote API + Fluid API - API for managing AI Agent VM sandboxes using libvirt + API for managing sandboxes, organizations, billing, and hosts - The version of the OpenAPI document: 0.1.0 + The version of the OpenAPI document: 1.0 Generated by OpenAPI Generator (https://openapi-generator.tech) Do not edit the class manually. diff --git a/sdk/fluid-py/fluid/client.py b/sdk/fluid-py/fluid/client.py index 957afc64..2ea2faca 100644 --- a/sdk/fluid-py/fluid/client.py +++ b/sdk/fluid-py/fluid/client.py @@ -23,8 +23,16 @@ from fluid.api.access_api import AccessApi from fluid.api.ansible_api import AnsibleApi from fluid.api.ansible_playbooks_api import AnsiblePlaybooksApi +from fluid.api.auth_api import AuthApi +from fluid.api.billing_api import BillingApi from fluid.api.health_api import HealthApi +from fluid.api.host_tokens_api import HostTokensApi +from fluid.api.hosts_api import HostsApi +from fluid.api.members_api import MembersApi +from fluid.api.organizations_api import OrganizationsApi from fluid.api.sandbox_api import SandboxApi +from fluid.api.sandboxes_api import SandboxesApi +from fluid.api.source_vms_api import SourceVMsApi from fluid.api.vms_api import VMsApi from fluid.api_client import ApiClient from fluid.configuration import Configuration @@ -32,12 +40,6 @@ GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskRequest from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_add_task_response import \ GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_create_playbook_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_create_playbook_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_export_playbook_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleExportPlaybookResponse from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_get_playbook_response import \ GithubComAspectrrFluidShFluidRemoteInternalAnsibleGetPlaybookResponse from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job import \ @@ -46,8 +48,6 @@ GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobRequest from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_response import \ GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_list_playbooks_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleListPlaybooksResponse from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_reorder_tasks_request import \ GithubComAspectrrFluidShFluidRemoteInternalAnsibleReorderTasksRequest from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_update_task_request import \ @@ -68,12 +68,8 @@ GithubComAspectrrFluidShFluidRemoteInternalRestDiscoverIPResponse from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_get_sandbox_response import \ GithubComAspectrrFluidShFluidRemoteInternalRestGetSandboxResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_health_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_inject_ssh_key_request import \ GithubComAspectrrFluidShFluidRemoteInternalRestInjectSSHKeyRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sandbox_commands_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxCommandsResponse from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sandboxes_response import \ GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxesResponse from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_vms_response import \ @@ -88,8 +84,6 @@ GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotRequest from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_snapshot_response import \ GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_start_sandbox_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxRequest from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_start_sandbox_response import \ GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxResponse from fluid.models.internal_rest_ca_public_key_response import \ @@ -116,6 +110,41 @@ InternalRestSessionStartRequest from fluid.models.internal_rest_session_start_response import \ InternalRestSessionStartResponse +from fluid.models.orchestrator_create_sandbox_request import \ + OrchestratorCreateSandboxRequest +from fluid.models.orchestrator_host_info import OrchestratorHostInfo +from fluid.models.orchestrator_prepare_request import \ + OrchestratorPrepareRequest +from fluid.models.orchestrator_read_source_request import \ + OrchestratorReadSourceRequest +from fluid.models.orchestrator_run_command_request import \ + OrchestratorRunCommandRequest +from fluid.models.orchestrator_run_source_request import \ + OrchestratorRunSourceRequest +from fluid.models.orchestrator_snapshot_request import \ + OrchestratorSnapshotRequest +from fluid.models.orchestrator_snapshot_response import \ + OrchestratorSnapshotResponse +from fluid.models.orchestrator_source_command_result import \ + OrchestratorSourceCommandResult +from fluid.models.orchestrator_source_file_result import \ + OrchestratorSourceFileResult +from fluid.models.rest_add_member_request import RestAddMemberRequest +from fluid.models.rest_auth_response import RestAuthResponse +from fluid.models.rest_billing_response import RestBillingResponse +from fluid.models.rest_calculator_request import RestCalculatorRequest +from fluid.models.rest_calculator_response import RestCalculatorResponse +from fluid.models.rest_create_host_token_request import \ + RestCreateHostTokenRequest +from fluid.models.rest_create_org_request import RestCreateOrgRequest +from fluid.models.rest_host_token_response import RestHostTokenResponse +from fluid.models.rest_login_request import RestLoginRequest +from fluid.models.rest_member_response import RestMemberResponse +from fluid.models.rest_org_response import RestOrgResponse +from fluid.models.rest_register_request import RestRegisterRequest +from fluid.models.rest_update_org_request import RestUpdateOrgRequest +from fluid.models.store_command import StoreCommand +from fluid.models.store_sandbox import StoreSandbox class AccessOperations: @@ -393,27 +422,15 @@ def add_playbook_task( def create_playbook( self, - become: Optional[bool] = None, - hosts: Optional[str] = None, - name: Optional[str] = None, ) -> GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookResponse: """Create playbook - Args: - become: become - hosts: hosts - name: name - Returns: GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookResponse: Pydantic model with full IDE autocomplete. Call .model_dump() to convert to dict if needed. """ request = ( - GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookRequest( - become=become, - hosts=hosts, - name=name, - ) + GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookRequest() ) return self._api.create_playbook(request=request) @@ -533,22 +550,474 @@ def update_playbook_task( ) +class AuthOperations: + """Wrapper for AuthApi with simplified method signatures.""" + + def __init__(self, api: AuthApi): + self._api = api + + def auth_github_callback_get( + self, + code: str, + ) -> None: + """GitHub OAuth callback + + Args: + code: str + """ + return self._api.auth_github_callback_get(code=code) + + def auth_github_get(self) -> None: + """GitHub OAuth login""" + return self._api.auth_github_get() + + def auth_google_callback_get( + self, + code: str, + ) -> None: + """Google OAuth callback + + Args: + code: str + """ + return self._api.auth_google_callback_get(code=code) + + def auth_google_get(self) -> None: + """Google OAuth login""" + return self._api.auth_google_get() + + def auth_login_post( + self, + email: Optional[str] = None, + password: Optional[str] = None, + ) -> RestAuthResponse: + """Log in + + Args: + email: email + password: password + + Returns: + RestAuthResponse: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + request = RestLoginRequest( + email=email, + password=password, + ) + return self._api.auth_login_post(request=request) + + def auth_logout_post(self) -> Dict[str, str]: + """Log out + + Returns: + Dict[str, str]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.auth_logout_post() + + def auth_me_get(self) -> RestAuthResponse: + """Get current user + + Returns: + RestAuthResponse: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.auth_me_get() + + def auth_register_post( + self, + display_name: Optional[str] = None, + email: Optional[str] = None, + password: Optional[str] = None, + ) -> RestAuthResponse: + """Register a new user + + Args: + display_name: display_name + email: email + password: password + + Returns: + RestAuthResponse: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + request = RestRegisterRequest( + display_name=display_name, + email=email, + password=password, + ) + return self._api.auth_register_post(request=request) + + +class BillingOperations: + """Wrapper for BillingApi with simplified method signatures.""" + + def __init__(self, api: BillingApi): + self._api = api + + def billing_calculator_post( + self, + agent_hosts: Optional[int] = None, + concurrent_sandboxes: Optional[int] = None, + hours_per_month: Optional[Union[float, int]] = None, + source_vms: Optional[int] = None, + ) -> RestCalculatorResponse: + """Pricing calculator + + Args: + agent_hosts: agent_hosts + concurrent_sandboxes: concurrent_sandboxes + hours_per_month: hours_per_month + source_vms: source_vms + + Returns: + RestCalculatorResponse: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + request = RestCalculatorRequest( + agent_hosts=agent_hosts, + concurrent_sandboxes=concurrent_sandboxes, + hours_per_month=hours_per_month, + source_vms=source_vms, + ) + return self._api.billing_calculator_post(request=request) + + def orgs_slug_billing_get( + self, + slug: str, + ) -> RestBillingResponse: + """Get billing info + + Args: + slug: str + + Returns: + RestBillingResponse: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_billing_get(slug=slug) + + def orgs_slug_billing_portal_post( + self, + slug: str, + ) -> Dict[str, str]: + """Billing portal + + Args: + slug: str + + Returns: + Dict[str, str]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_billing_portal_post(slug=slug) + + def orgs_slug_billing_subscribe_post( + self, + slug: str, + ) -> Dict[str, str]: + """Subscribe + + Args: + slug: str + + Returns: + Dict[str, str]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_billing_subscribe_post(slug=slug) + + def orgs_slug_billing_usage_get( + self, + slug: str, + ) -> Dict[str, object]: + """Get usage + + Args: + slug: str + + Returns: + Dict[str, object]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_billing_usage_get(slug=slug) + + def webhooks_stripe_post(self) -> Dict[str, str]: + """Stripe webhook + + Returns: + Dict[str, str]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.webhooks_stripe_post() + + class HealthOperations: """Wrapper for HealthApi with simplified method signatures.""" def __init__(self, api: HealthApi): self._api = api - def get_health( - self, - ) -> GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse: + def health_get(self) -> Dict[str, str]: """Health check Returns: - GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse: Pydantic model with full IDE autocomplete. + Dict[str, str]: Pydantic model with full IDE autocomplete. Call .model_dump() to convert to dict if needed. """ - return self._api.get_health() + return self._api.health_get() + + +class HostTokensOperations: + """Wrapper for HostTokensApi with simplified method signatures.""" + + def __init__(self, api: HostTokensApi): + self._api = api + + def orgs_slug_hosts_tokens_get( + self, + slug: str, + ) -> Dict[str, object]: + """List host tokens + + Args: + slug: str + + Returns: + Dict[str, object]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_hosts_tokens_get(slug=slug) + + def orgs_slug_hosts_tokens_post( + self, + slug: str, + name: Optional[str] = None, + ) -> RestHostTokenResponse: + """Create host token + + Args: + slug: str + name: name + + Returns: + RestHostTokenResponse: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + request = RestCreateHostTokenRequest( + name=name, + ) + return self._api.orgs_slug_hosts_tokens_post(slug=slug, request=request) + + def orgs_slug_hosts_tokens_token_id_delete( + self, + slug: str, + token_id: str, + ) -> Dict[str, str]: + """Delete host token + + Args: + slug: str + token_id: str + + Returns: + Dict[str, str]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_hosts_tokens_token_id_delete( + slug=slug, token_id=token_id + ) + + +class HostsOperations: + """Wrapper for HostsApi with simplified method signatures.""" + + def __init__(self, api: HostsApi): + self._api = api + + def orgs_slug_hosts_get( + self, + slug: str, + ) -> Dict[str, object]: + """List hosts + + Args: + slug: str + + Returns: + Dict[str, object]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_hosts_get(slug=slug) + + def orgs_slug_hosts_host_id_get( + self, + slug: str, + host_id: str, + ) -> OrchestratorHostInfo: + """Get host + + Args: + slug: str + host_id: str + + Returns: + OrchestratorHostInfo: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_hosts_host_id_get(slug=slug, host_id=host_id) + + +class MembersOperations: + """Wrapper for MembersApi with simplified method signatures.""" + + def __init__(self, api: MembersApi): + self._api = api + + def orgs_slug_members_get( + self, + slug: str, + ) -> Dict[str, object]: + """List members + + Args: + slug: str + + Returns: + Dict[str, object]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_members_get(slug=slug) + + def orgs_slug_members_member_id_delete( + self, + slug: str, + member_id: str, + ) -> Dict[str, str]: + """Remove member + + Args: + slug: str + member_id: str + + Returns: + Dict[str, str]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_members_member_id_delete( + slug=slug, member_id=member_id + ) + + def orgs_slug_members_post( + self, + slug: str, + email: Optional[str] = None, + role: Optional[str] = None, + ) -> RestMemberResponse: + """Add member + + Args: + slug: str + email: email + role: role + + Returns: + RestMemberResponse: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + request = RestAddMemberRequest( + email=email, + role=role, + ) + return self._api.orgs_slug_members_post(slug=slug, request=request) + + +class OrganizationsOperations: + """Wrapper for OrganizationsApi with simplified method signatures.""" + + def __init__(self, api: OrganizationsApi): + self._api = api + + def orgs_get(self) -> Dict[str, object]: + """List organizations + + Returns: + Dict[str, object]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_get() + + def orgs_post( + self, + name: Optional[str] = None, + slug: Optional[str] = None, + ) -> RestOrgResponse: + """Create organization + + Args: + name: name + slug: slug + + Returns: + RestOrgResponse: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + request = RestCreateOrgRequest( + name=name, + slug=slug, + ) + return self._api.orgs_post(request=request) + + def orgs_slug_delete( + self, + slug: str, + ) -> Dict[str, str]: + """Delete organization + + Args: + slug: str + + Returns: + Dict[str, str]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_delete(slug=slug) + + def orgs_slug_get( + self, + slug: str, + ) -> RestOrgResponse: + """Get organization + + Args: + slug: str + + Returns: + RestOrgResponse: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_get(slug=slug) + + def orgs_slug_patch( + self, + slug: str, + name: Optional[str] = None, + ) -> RestOrgResponse: + """Update organization + + Args: + slug: str + name: name + + Returns: + RestOrgResponse: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + request = RestUpdateOrgRequest( + name=name, + ) + return self._api.orgs_slug_patch(slug=slug, request=request) class SandboxOperations: @@ -714,7 +1183,7 @@ def inject_ssh_key( Args: id: str public_key: required - username: required (explicit); typical: \ + username: required (explicit); typical: """ request = GithubComAspectrrFluidShFluidRemoteInternalRestInjectSSHKeyRequest( public_key=public_key, @@ -816,7 +1285,7 @@ def run_sandbox_command( env: optional private_key_path: optional; if empty, uses managed credentials (requires SSH CA) timeout_sec: optional; default from service config - user: optional; defaults to \ + user: optional; defaults to request_timeout: HTTP request timeout in seconds. Can be a single float for total timeout, or a tuple (connect_timeout, read_timeout). For operations with wait_for_ip=True, set this to at least 180 seconds. Returns: @@ -837,26 +1306,19 @@ def run_sandbox_command( def start_sandbox( self, id: str, - wait_for_ip: Optional[bool] = None, request_timeout: Union[None, float, Tuple[float, float]] = None, ) -> GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxResponse: """Start sandbox Args: id: str - wait_for_ip: optional; default false. When True, consider setting request_timeout to accommodate IP discovery (server default is 120s) request_timeout: HTTP request timeout in seconds. Can be a single float for total timeout, or a tuple (connect_timeout, read_timeout). For operations with wait_for_ip=True, set this to at least 180 seconds. Returns: GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxResponse: Pydantic model with full IDE autocomplete. Call .model_dump() to convert to dict if needed. """ - request = GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxRequest( - wait_for_ip=wait_for_ip, - ) - return self._api.start_sandbox( - id=id, request=request, _request_timeout=request_timeout - ) + return self._api.start_sandbox(id=id, _request_timeout=request_timeout) def stream_sandbox_activity( self, @@ -870,6 +1332,340 @@ def stream_sandbox_activity( return self._api.stream_sandbox_activity(id=id) +class SandboxesOperations: + """Wrapper for SandboxesApi with simplified method signatures.""" + + def __init__(self, api: SandboxesApi): + self._api = api + + def orgs_slug_sandboxes_get( + self, + slug: str, + ) -> Dict[str, object]: + """List sandboxes + + Args: + slug: str + + Returns: + Dict[str, object]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_sandboxes_get(slug=slug) + + def orgs_slug_sandboxes_post( + self, + slug: str, + agent_id: Optional[str] = None, + base_image: Optional[str] = None, + memory_mb: Optional[int] = None, + name: Optional[str] = None, + network: Optional[str] = None, + org_id: Optional[str] = None, + source_vm: Optional[str] = None, + ttl_seconds: Optional[int] = None, + vcpus: Optional[int] = None, + ) -> StoreSandbox: + """Create sandbox + + Args: + slug: str + agent_id: agent_id + base_image: base_image + memory_mb: memory_mb + name: name + network: network + org_id: org_id + source_vm: source_vm + ttl_seconds: ttl_seconds + vcpus: vcpus + + Returns: + StoreSandbox: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + request = OrchestratorCreateSandboxRequest( + agent_id=agent_id, + base_image=base_image, + memory_mb=memory_mb, + name=name, + network=network, + org_id=org_id, + source_vm=source_vm, + ttl_seconds=ttl_seconds, + vcpus=vcpus, + ) + return self._api.orgs_slug_sandboxes_post(slug=slug, request=request) + + def orgs_slug_sandboxes_sandbox_id_commands_get( + self, + slug: str, + sandbox_id: str, + ) -> Dict[str, object]: + """List commands + + Args: + slug: str + sandbox_id: str + + Returns: + Dict[str, object]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_sandboxes_sandbox_id_commands_get( + slug=slug, sandbox_id=sandbox_id + ) + + def orgs_slug_sandboxes_sandbox_id_delete( + self, + slug: str, + sandbox_id: str, + ) -> Dict[str, object]: + """Destroy sandbox + + Args: + slug: str + sandbox_id: str + + Returns: + Dict[str, object]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_sandboxes_sandbox_id_delete( + slug=slug, sandbox_id=sandbox_id + ) + + def orgs_slug_sandboxes_sandbox_id_get( + self, + slug: str, + sandbox_id: str, + ) -> StoreSandbox: + """Get sandbox + + Args: + slug: str + sandbox_id: str + + Returns: + StoreSandbox: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_sandboxes_sandbox_id_get( + slug=slug, sandbox_id=sandbox_id + ) + + def orgs_slug_sandboxes_sandbox_id_run_post( + self, + slug: str, + sandbox_id: str, + command: Optional[str] = None, + env: Optional[Dict[str, str]] = None, + timeout_seconds: Optional[int] = None, + ) -> StoreCommand: + """Run command + + Args: + slug: str + sandbox_id: str + command: command + env: env + timeout_seconds: timeout_seconds + + Returns: + StoreCommand: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + request = OrchestratorRunCommandRequest( + command=command, + env=env, + timeout_seconds=timeout_seconds, + ) + return self._api.orgs_slug_sandboxes_sandbox_id_run_post( + slug=slug, sandbox_id=sandbox_id, request=request + ) + + def orgs_slug_sandboxes_sandbox_id_snapshot_post( + self, + slug: str, + sandbox_id: str, + name: Optional[str] = None, + ) -> OrchestratorSnapshotResponse: + """Create snapshot + + Args: + slug: str + sandbox_id: str + name: name + + Returns: + OrchestratorSnapshotResponse: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + request = OrchestratorSnapshotRequest( + name=name, + ) + return self._api.orgs_slug_sandboxes_sandbox_id_snapshot_post( + slug=slug, sandbox_id=sandbox_id, request=request + ) + + def orgs_slug_sandboxes_sandbox_id_start_post( + self, + slug: str, + sandbox_id: str, + ) -> Dict[str, object]: + """Start sandbox + + Args: + slug: str + sandbox_id: str + + Returns: + Dict[str, object]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_sandboxes_sandbox_id_start_post( + slug=slug, sandbox_id=sandbox_id + ) + + def orgs_slug_sandboxes_sandbox_id_stop_post( + self, + slug: str, + sandbox_id: str, + ) -> Dict[str, object]: + """Stop sandbox + + Args: + slug: str + sandbox_id: str + + Returns: + Dict[str, object]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_sandboxes_sandbox_id_stop_post( + slug=slug, sandbox_id=sandbox_id + ) + + def orgs_slug_sandboxes_sandbox_idip_get( + self, + slug: str, + sandbox_id: str, + ) -> Dict[str, object]: + """Get sandbox IP + + Args: + slug: str + sandbox_id: str + + Returns: + Dict[str, object]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_sandboxes_sandbox_idip_get( + slug=slug, sandbox_id=sandbox_id + ) + + +class SourceVMsOperations: + """Wrapper for SourceVMsApi with simplified method signatures.""" + + def __init__(self, api: SourceVMsApi): + self._api = api + + def orgs_slug_sources_vm_prepare_post( + self, + slug: str, + vm: str, + ssh_key_path: Optional[str] = None, + ssh_user: Optional[str] = None, + ) -> Dict[str, object]: + """Prepare source VM + + Args: + slug: str + vm: str + ssh_key_path: ssh_key_path + ssh_user: ssh_user + + Returns: + Dict[str, object]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + request = OrchestratorPrepareRequest( + ssh_key_path=ssh_key_path, + ssh_user=ssh_user, + ) + return self._api.orgs_slug_sources_vm_prepare_post( + slug=slug, vm=vm, request=request + ) + + def orgs_slug_sources_vm_read_post( + self, + slug: str, + vm: str, + path: Optional[str] = None, + ) -> OrchestratorSourceFileResult: + """Read source file + + Args: + slug: str + vm: str + path: path + + Returns: + OrchestratorSourceFileResult: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + request = OrchestratorReadSourceRequest( + path=path, + ) + return self._api.orgs_slug_sources_vm_read_post( + slug=slug, vm=vm, request=request + ) + + def orgs_slug_sources_vm_run_post( + self, + slug: str, + vm: str, + command: Optional[str] = None, + timeout_seconds: Optional[int] = None, + ) -> OrchestratorSourceCommandResult: + """Run source command + + Args: + slug: str + vm: str + command: command + timeout_seconds: timeout_seconds + + Returns: + OrchestratorSourceCommandResult: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + request = OrchestratorRunSourceRequest( + command=command, + timeout_seconds=timeout_seconds, + ) + return self._api.orgs_slug_sources_vm_run_post( + slug=slug, vm=vm, request=request + ) + + def orgs_slug_vms_get( + self, + slug: str, + ) -> Dict[str, object]: + """List source VMs + + Args: + slug: str + + Returns: + Dict[str, object]: Pydantic model with full IDE autocomplete. + Call .model_dump() to convert to dict if needed. + """ + return self._api.orgs_slug_vms_get(slug=slug) + + class VMsOperations: """Wrapper for VMsApi with simplified method signatures.""" @@ -932,8 +1728,16 @@ def __init__( self._access: Optional[AccessOperations] = None self._ansible: Optional[AnsibleOperations] = None self._ansible_playbooks: Optional[AnsiblePlaybooksOperations] = None + self._auth: Optional[AuthOperations] = None + self._billing: Optional[BillingOperations] = None self._health: Optional[HealthOperations] = None + self._host_tokens: Optional[HostTokensOperations] = None + self._hosts: Optional[HostsOperations] = None + self._members: Optional[MembersOperations] = None + self._organizations: Optional[OrganizationsOperations] = None self._sandbox: Optional[SandboxOperations] = None + self._sandboxes: Optional[SandboxesOperations] = None + self._source_vms: Optional[SourceVMsOperations] = None self._vms: Optional[VMsOperations] = None @property @@ -960,6 +1764,22 @@ def ansible_playbooks(self) -> AnsiblePlaybooksOperations: self._ansible_playbooks = AnsiblePlaybooksOperations(api) return self._ansible_playbooks + @property + def auth(self) -> AuthOperations: + """Access AuthApi operations.""" + if self._auth is None: + api = AuthApi(api_client=self._main_api_client) + self._auth = AuthOperations(api) + return self._auth + + @property + def billing(self) -> BillingOperations: + """Access BillingApi operations.""" + if self._billing is None: + api = BillingApi(api_client=self._main_api_client) + self._billing = BillingOperations(api) + return self._billing + @property def health(self) -> HealthOperations: """Access HealthApi operations.""" @@ -968,6 +1788,38 @@ def health(self) -> HealthOperations: self._health = HealthOperations(api) return self._health + @property + def host_tokens(self) -> HostTokensOperations: + """Access HostTokensApi operations.""" + if self._host_tokens is None: + api = HostTokensApi(api_client=self._main_api_client) + self._host_tokens = HostTokensOperations(api) + return self._host_tokens + + @property + def hosts(self) -> HostsOperations: + """Access HostsApi operations.""" + if self._hosts is None: + api = HostsApi(api_client=self._main_api_client) + self._hosts = HostsOperations(api) + return self._hosts + + @property + def members(self) -> MembersOperations: + """Access MembersApi operations.""" + if self._members is None: + api = MembersApi(api_client=self._main_api_client) + self._members = MembersOperations(api) + return self._members + + @property + def organizations(self) -> OrganizationsOperations: + """Access OrganizationsApi operations.""" + if self._organizations is None: + api = OrganizationsApi(api_client=self._main_api_client) + self._organizations = OrganizationsOperations(api) + return self._organizations + @property def sandbox(self) -> SandboxOperations: """Access SandboxApi operations.""" @@ -976,6 +1828,22 @@ def sandbox(self) -> SandboxOperations: self._sandbox = SandboxOperations(api) return self._sandbox + @property + def sandboxes(self) -> SandboxesOperations: + """Access SandboxesApi operations.""" + if self._sandboxes is None: + api = SandboxesApi(api_client=self._main_api_client) + self._sandboxes = SandboxesOperations(api) + return self._sandboxes + + @property + def source_vms(self) -> SourceVMsOperations: + """Access SourceVMsApi operations.""" + if self._source_vms is None: + api = SourceVMsApi(api_client=self._main_api_client) + self._source_vms = SourceVMsOperations(api) + return self._source_vms + @property def vms(self) -> VMsOperations: """Access VMsApi operations.""" diff --git a/sdk/fluid-py/fluid/configuration.py b/sdk/fluid-py/fluid/configuration.py index e9563cdc..9c2b5ccb 100644 --- a/sdk/fluid-py/fluid/configuration.py +++ b/sdk/fluid-py/fluid/configuration.py @@ -60,7 +60,7 @@ def __init__( ca_cert_data: Optional[Union[str, bytes]] = None, ) -> None: """Initialize configuration.""" - self._base_path = "http://localhost" if host is None else host + self._base_path = "http://localhost:8080/v1" if host is None else host """Default Base url. """ @@ -350,7 +350,7 @@ def to_debug_report(self) -> str: f"Python SDK Debug Report:\n" f"OS: {sys.platform}\n" f"Python Version: {sys.version}\n" - f"Version of the API: 0.1.0\n" + f"Version of the API: 1.0\n" f"SDK Package Version: 0.1.0" ) @@ -361,7 +361,7 @@ def get_host_settings(self) -> List[Dict[str, Any]]: """ return [ { - "url": "", + "url": "//localhost:8080/v1", "description": "No description provided", } ] diff --git a/sdk/fluid-py/fluid/exceptions.py b/sdk/fluid-py/fluid/exceptions.py index ada7d384..e7190c39 100644 --- a/sdk/fluid-py/fluid/exceptions.py +++ b/sdk/fluid-py/fluid/exceptions.py @@ -1,11 +1,11 @@ # coding: utf-8 """ - fluid-remote API + Fluid API - API for managing AI Agent VM sandboxes using libvirt + API for managing sandboxes, organizations, billing, and hosts - The version of the OpenAPI document: 0.1.0 + The version of the OpenAPI document: 1.0 Generated by OpenAPI Generator (https://openapi-generator.tech) Do not edit the class manually. diff --git a/sdk/fluid-py/fluid/models/__init__.py b/sdk/fluid-py/fluid/models/__init__.py index 2203f8f9..5366383a 100644 --- a/sdk/fluid-py/fluid/models/__init__.py +++ b/sdk/fluid-py/fluid/models/__init__.py @@ -2,246 +2,54 @@ # flake8: noqa """ - fluid-remote API + Fluid API - API for managing AI Agent VM sandboxes using libvirt + API for managing sandboxes, organizations, billing, and hosts - The version of the OpenAPI document: 0.1.0 + The version of the OpenAPI document: 1.0 Generated by OpenAPI Generator (https://openapi-generator.tech) Do not edit the class manually. """ # noqa: E501 # import models into model package -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_add_task_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_add_task_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_create_playbook_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_create_playbook_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_export_playbook_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleExportPlaybookResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_get_playbook_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleGetPlaybookResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJob -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_status import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobStatus -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_list_playbooks_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleListPlaybooksResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_reorder_tasks_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleReorderTasksRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_update_task_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_update_task_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_error_error_response import \ - GithubComAspectrrFluidShFluidRemoteInternalErrorErrorResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_access_error_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestAccessErrorResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_ca_public_key_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestCaPublicKeyResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_certificate_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestCertificateResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_create_sandbox_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_create_sandbox_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_destroy_sandbox_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestDestroySandboxResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_diff_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestDiffRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_diff_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestDiffResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_discover_ip_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestDiscoverIPResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_error_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestErrorResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_generate_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestGenerateResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_get_sandbox_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestGetSandboxResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_health_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_host_error import \ - GithubComAspectrrFluidShFluidRemoteInternalRestHostError -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_inject_ssh_key_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestInjectSSHKeyRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_certificates_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListCertificatesResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sandbox_commands_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxCommandsResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sandboxes_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxesResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sessions_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListSessionsResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_vms_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListVMsResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_publish_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestPublishRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_publish_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestPublishResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_request_access_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_request_access_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_revoke_certificate_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_revoke_certificate_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_run_command_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_run_command_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_sandbox_info import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSandboxInfo -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_end_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_end_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_start_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_start_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_snapshot_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_snapshot_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_start_sandbox_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_start_sandbox_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_vm_info import \ - GithubComAspectrrFluidShFluidRemoteInternalRestVmInfo -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_change_diff import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreChangeDiff -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreCommand -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command_exec_record import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreCommandExecRecord -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command_summary import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreCommandSummary -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_diff import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreDiff -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_package_info import \ - GithubComAspectrrFluidShFluidRemoteInternalStorePackageInfo -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_playbook import \ - GithubComAspectrrFluidShFluidRemoteInternalStorePlaybook -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_playbook_task import \ - GithubComAspectrrFluidShFluidRemoteInternalStorePlaybookTask -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_sandbox import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSandbox -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_sandbox_state import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSandboxState -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_service_change import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreServiceChange -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_snapshot import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshot -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_snapshot_kind import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshotKind -from fluid.models.internal_ansible_add_task_request import \ - InternalAnsibleAddTaskRequest -from fluid.models.internal_ansible_add_task_response import \ - InternalAnsibleAddTaskResponse -from fluid.models.internal_ansible_create_playbook_request import \ - InternalAnsibleCreatePlaybookRequest -from fluid.models.internal_ansible_create_playbook_response import \ - InternalAnsibleCreatePlaybookResponse -from fluid.models.internal_ansible_export_playbook_response import \ - InternalAnsibleExportPlaybookResponse -from fluid.models.internal_ansible_get_playbook_response import \ - InternalAnsibleGetPlaybookResponse -from fluid.models.internal_ansible_job import InternalAnsibleJob -from fluid.models.internal_ansible_job_request import InternalAnsibleJobRequest -from fluid.models.internal_ansible_job_response import \ - InternalAnsibleJobResponse -from fluid.models.internal_ansible_job_status import InternalAnsibleJobStatus -from fluid.models.internal_ansible_list_playbooks_response import \ - InternalAnsibleListPlaybooksResponse -from fluid.models.internal_ansible_reorder_tasks_request import \ - InternalAnsibleReorderTasksRequest -from fluid.models.internal_ansible_update_task_request import \ - InternalAnsibleUpdateTaskRequest -from fluid.models.internal_ansible_update_task_response import \ - InternalAnsibleUpdateTaskResponse -from fluid.models.internal_rest_access_error_response import \ - InternalRestAccessErrorResponse -from fluid.models.internal_rest_ca_public_key_response import \ - InternalRestCaPublicKeyResponse -from fluid.models.internal_rest_certificate_response import \ - InternalRestCertificateResponse -from fluid.models.internal_rest_create_sandbox_request import \ - InternalRestCreateSandboxRequest -from fluid.models.internal_rest_create_sandbox_response import \ - InternalRestCreateSandboxResponse -from fluid.models.internal_rest_destroy_sandbox_response import \ - InternalRestDestroySandboxResponse -from fluid.models.internal_rest_diff_request import InternalRestDiffRequest -from fluid.models.internal_rest_diff_response import InternalRestDiffResponse -from fluid.models.internal_rest_discover_ip_response import \ - InternalRestDiscoverIPResponse -from fluid.models.internal_rest_error_response import InternalRestErrorResponse -from fluid.models.internal_rest_generate_response import \ - InternalRestGenerateResponse -from fluid.models.internal_rest_get_sandbox_response import \ - InternalRestGetSandboxResponse -from fluid.models.internal_rest_health_response import \ - InternalRestHealthResponse -from fluid.models.internal_rest_host_error import InternalRestHostError -from fluid.models.internal_rest_inject_ssh_key_request import \ - InternalRestInjectSSHKeyRequest -from fluid.models.internal_rest_list_certificates_response import \ - InternalRestListCertificatesResponse -from fluid.models.internal_rest_list_sandbox_commands_response import \ - InternalRestListSandboxCommandsResponse -from fluid.models.internal_rest_list_sandboxes_response import \ - InternalRestListSandboxesResponse -from fluid.models.internal_rest_list_sessions_response import \ - InternalRestListSessionsResponse -from fluid.models.internal_rest_list_vms_response import \ - InternalRestListVMsResponse -from fluid.models.internal_rest_publish_request import \ - InternalRestPublishRequest -from fluid.models.internal_rest_publish_response import \ - InternalRestPublishResponse -from fluid.models.internal_rest_request_access_request import \ - InternalRestRequestAccessRequest -from fluid.models.internal_rest_request_access_response import \ - InternalRestRequestAccessResponse -from fluid.models.internal_rest_revoke_certificate_request import \ - InternalRestRevokeCertificateRequest -from fluid.models.internal_rest_revoke_certificate_response import \ - InternalRestRevokeCertificateResponse -from fluid.models.internal_rest_run_command_request import \ - InternalRestRunCommandRequest -from fluid.models.internal_rest_run_command_response import \ - InternalRestRunCommandResponse -from fluid.models.internal_rest_sandbox_info import InternalRestSandboxInfo -from fluid.models.internal_rest_session_end_request import \ - InternalRestSessionEndRequest -from fluid.models.internal_rest_session_end_response import \ - InternalRestSessionEndResponse -from fluid.models.internal_rest_session_response import \ - InternalRestSessionResponse -from fluid.models.internal_rest_session_start_request import \ - InternalRestSessionStartRequest -from fluid.models.internal_rest_session_start_response import \ - InternalRestSessionStartResponse -from fluid.models.internal_rest_snapshot_request import \ - InternalRestSnapshotRequest -from fluid.models.internal_rest_snapshot_response import \ - InternalRestSnapshotResponse -from fluid.models.internal_rest_start_sandbox_request import \ - InternalRestStartSandboxRequest -from fluid.models.internal_rest_start_sandbox_response import \ - InternalRestStartSandboxResponse -from fluid.models.internal_rest_vm_info import InternalRestVmInfo -from fluid.models.time_duration import TimeDuration +from fluid.models.orchestrator_create_sandbox_request import \ + OrchestratorCreateSandboxRequest +from fluid.models.orchestrator_host_info import OrchestratorHostInfo +from fluid.models.orchestrator_prepare_request import \ + OrchestratorPrepareRequest +from fluid.models.orchestrator_read_source_request import \ + OrchestratorReadSourceRequest +from fluid.models.orchestrator_run_command_request import \ + OrchestratorRunCommandRequest +from fluid.models.orchestrator_run_source_request import \ + OrchestratorRunSourceRequest +from fluid.models.orchestrator_snapshot_request import \ + OrchestratorSnapshotRequest +from fluid.models.orchestrator_snapshot_response import \ + OrchestratorSnapshotResponse +from fluid.models.orchestrator_source_command_result import \ + OrchestratorSourceCommandResult +from fluid.models.orchestrator_source_file_result import \ + OrchestratorSourceFileResult +from fluid.models.rest_add_member_request import RestAddMemberRequest +from fluid.models.rest_auth_response import RestAuthResponse +from fluid.models.rest_billing_response import RestBillingResponse +from fluid.models.rest_calculator_request import RestCalculatorRequest +from fluid.models.rest_calculator_response import RestCalculatorResponse +from fluid.models.rest_create_host_token_request import \ + RestCreateHostTokenRequest +from fluid.models.rest_create_org_request import RestCreateOrgRequest +from fluid.models.rest_free_tier_info import RestFreeTierInfo +from fluid.models.rest_host_token_response import RestHostTokenResponse +from fluid.models.rest_login_request import RestLoginRequest +from fluid.models.rest_member_response import RestMemberResponse +from fluid.models.rest_org_response import RestOrgResponse +from fluid.models.rest_register_request import RestRegisterRequest +from fluid.models.rest_swagger_error import RestSwaggerError +from fluid.models.rest_update_org_request import RestUpdateOrgRequest +from fluid.models.rest_usage_summary import RestUsageSummary +from fluid.models.rest_user_response import RestUserResponse +from fluid.models.store_command import StoreCommand +from fluid.models.store_sandbox import StoreSandbox +from fluid.models.store_sandbox_state import StoreSandboxState diff --git a/sdk/fluid-py/fluid/models/orchestrator_create_sandbox_request.py b/sdk/fluid-py/fluid/models/orchestrator_create_sandbox_request.py new file mode 100644 index 00000000..6a743479 --- /dev/null +++ b/sdk/fluid-py/fluid/models/orchestrator_create_sandbox_request.py @@ -0,0 +1,112 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing_extensions import Self + + +class OrchestratorCreateSandboxRequest(BaseModel): + """ + OrchestratorCreateSandboxRequest + """ # noqa: E501 + + agent_id: Optional[StrictStr] = None + base_image: Optional[StrictStr] = None + memory_mb: Optional[StrictInt] = None + name: Optional[StrictStr] = None + network: Optional[StrictStr] = None + org_id: Optional[StrictStr] = None + source_vm: Optional[StrictStr] = None + ttl_seconds: Optional[StrictInt] = None + vcpus: Optional[StrictInt] = None + __properties: ClassVar[List[str]] = [ + "agent_id", + "base_image", + "memory_mb", + "name", + "network", + "org_id", + "source_vm", + "ttl_seconds", + "vcpus", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of OrchestratorCreateSandboxRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of OrchestratorCreateSandboxRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "agent_id": obj.get("agent_id"), + "base_image": obj.get("base_image"), + "memory_mb": obj.get("memory_mb"), + "name": obj.get("name"), + "network": obj.get("network"), + "org_id": obj.get("org_id"), + "source_vm": obj.get("source_vm"), + "ttl_seconds": obj.get("ttl_seconds"), + "vcpus": obj.get("vcpus"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/orchestrator_host_info.py b/sdk/fluid-py/fluid/models/orchestrator_host_info.py new file mode 100644 index 00000000..84540b63 --- /dev/null +++ b/sdk/fluid-py/fluid/models/orchestrator_host_info.py @@ -0,0 +1,112 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing_extensions import Self + + +class OrchestratorHostInfo(BaseModel): + """ + OrchestratorHostInfo + """ # noqa: E501 + + active_sandboxes: Optional[StrictInt] = None + available_cpus: Optional[StrictInt] = None + available_disk_mb: Optional[StrictInt] = None + available_memory_mb: Optional[StrictInt] = None + base_images: Optional[List[StrictStr]] = None + host_id: Optional[StrictStr] = None + hostname: Optional[StrictStr] = None + last_heartbeat: Optional[StrictStr] = None + status: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = [ + "active_sandboxes", + "available_cpus", + "available_disk_mb", + "available_memory_mb", + "base_images", + "host_id", + "hostname", + "last_heartbeat", + "status", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of OrchestratorHostInfo from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of OrchestratorHostInfo from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "active_sandboxes": obj.get("active_sandboxes"), + "available_cpus": obj.get("available_cpus"), + "available_disk_mb": obj.get("available_disk_mb"), + "available_memory_mb": obj.get("available_memory_mb"), + "base_images": obj.get("base_images"), + "host_id": obj.get("host_id"), + "hostname": obj.get("hostname"), + "last_heartbeat": obj.get("last_heartbeat"), + "status": obj.get("status"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/orchestrator_prepare_request.py b/sdk/fluid-py/fluid/models/orchestrator_prepare_request.py new file mode 100644 index 00000000..c7d35881 --- /dev/null +++ b/sdk/fluid-py/fluid/models/orchestrator_prepare_request.py @@ -0,0 +1,85 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + + +class OrchestratorPrepareRequest(BaseModel): + """ + OrchestratorPrepareRequest + """ # noqa: E501 + + ssh_key_path: Optional[StrictStr] = None + ssh_user: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["ssh_key_path", "ssh_user"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of OrchestratorPrepareRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of OrchestratorPrepareRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + {"ssh_key_path": obj.get("ssh_key_path"), "ssh_user": obj.get("ssh_user")} + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/orchestrator_read_source_request.py b/sdk/fluid-py/fluid/models/orchestrator_read_source_request.py new file mode 100644 index 00000000..f7f01d24 --- /dev/null +++ b/sdk/fluid-py/fluid/models/orchestrator_read_source_request.py @@ -0,0 +1,82 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + + +class OrchestratorReadSourceRequest(BaseModel): + """ + OrchestratorReadSourceRequest + """ # noqa: E501 + + path: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["path"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of OrchestratorReadSourceRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of OrchestratorReadSourceRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"path": obj.get("path")}) + return _obj diff --git a/sdk/fluid-py/fluid/models/orchestrator_run_command_request.py b/sdk/fluid-py/fluid/models/orchestrator_run_command_request.py new file mode 100644 index 00000000..aa5d2b60 --- /dev/null +++ b/sdk/fluid-py/fluid/models/orchestrator_run_command_request.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing_extensions import Self + + +class OrchestratorRunCommandRequest(BaseModel): + """ + OrchestratorRunCommandRequest + """ # noqa: E501 + + command: Optional[StrictStr] = None + env: Optional[Dict[str, StrictStr]] = None + timeout_seconds: Optional[StrictInt] = None + __properties: ClassVar[List[str]] = ["command", "env", "timeout_seconds"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of OrchestratorRunCommandRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of OrchestratorRunCommandRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "command": obj.get("command"), + "env": obj.get("env"), + "timeout_seconds": obj.get("timeout_seconds"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/orchestrator_run_source_request.py b/sdk/fluid-py/fluid/models/orchestrator_run_source_request.py new file mode 100644 index 00000000..74cad237 --- /dev/null +++ b/sdk/fluid-py/fluid/models/orchestrator_run_source_request.py @@ -0,0 +1,88 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing_extensions import Self + + +class OrchestratorRunSourceRequest(BaseModel): + """ + OrchestratorRunSourceRequest + """ # noqa: E501 + + command: Optional[StrictStr] = None + timeout_seconds: Optional[StrictInt] = None + __properties: ClassVar[List[str]] = ["command", "timeout_seconds"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of OrchestratorRunSourceRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of OrchestratorRunSourceRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "command": obj.get("command"), + "timeout_seconds": obj.get("timeout_seconds"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/orchestrator_snapshot_request.py b/sdk/fluid-py/fluid/models/orchestrator_snapshot_request.py new file mode 100644 index 00000000..5adbd3d4 --- /dev/null +++ b/sdk/fluid-py/fluid/models/orchestrator_snapshot_request.py @@ -0,0 +1,82 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + + +class OrchestratorSnapshotRequest(BaseModel): + """ + OrchestratorSnapshotRequest + """ # noqa: E501 + + name: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["name"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of OrchestratorSnapshotRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of OrchestratorSnapshotRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"name": obj.get("name")}) + return _obj diff --git a/sdk/fluid-py/fluid/models/orchestrator_snapshot_response.py b/sdk/fluid-py/fluid/models/orchestrator_snapshot_response.py new file mode 100644 index 00000000..5b932a80 --- /dev/null +++ b/sdk/fluid-py/fluid/models/orchestrator_snapshot_response.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + + +class OrchestratorSnapshotResponse(BaseModel): + """ + OrchestratorSnapshotResponse + """ # noqa: E501 + + created_at: Optional[StrictStr] = None + sandbox_id: Optional[StrictStr] = None + snapshot_id: Optional[StrictStr] = None + snapshot_name: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = [ + "created_at", + "sandbox_id", + "snapshot_id", + "snapshot_name", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of OrchestratorSnapshotResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of OrchestratorSnapshotResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "created_at": obj.get("created_at"), + "sandbox_id": obj.get("sandbox_id"), + "snapshot_id": obj.get("snapshot_id"), + "snapshot_name": obj.get("snapshot_name"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/orchestrator_source_command_result.py b/sdk/fluid-py/fluid/models/orchestrator_source_command_result.py new file mode 100644 index 00000000..e9c502ed --- /dev/null +++ b/sdk/fluid-py/fluid/models/orchestrator_source_command_result.py @@ -0,0 +1,92 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing_extensions import Self + + +class OrchestratorSourceCommandResult(BaseModel): + """ + OrchestratorSourceCommandResult + """ # noqa: E501 + + exit_code: Optional[StrictInt] = None + source_vm: Optional[StrictStr] = None + stderr: Optional[StrictStr] = None + stdout: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["exit_code", "source_vm", "stderr", "stdout"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of OrchestratorSourceCommandResult from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of OrchestratorSourceCommandResult from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "exit_code": obj.get("exit_code"), + "source_vm": obj.get("source_vm"), + "stderr": obj.get("stderr"), + "stdout": obj.get("stdout"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/orchestrator_source_file_result.py b/sdk/fluid-py/fluid/models/orchestrator_source_file_result.py new file mode 100644 index 00000000..a7ccf794 --- /dev/null +++ b/sdk/fluid-py/fluid/models/orchestrator_source_file_result.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + + +class OrchestratorSourceFileResult(BaseModel): + """ + OrchestratorSourceFileResult + """ # noqa: E501 + + content: Optional[StrictStr] = None + path: Optional[StrictStr] = None + source_vm: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["content", "path", "source_vm"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of OrchestratorSourceFileResult from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of OrchestratorSourceFileResult from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "content": obj.get("content"), + "path": obj.get("path"), + "source_vm": obj.get("source_vm"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_add_member_request.py b/sdk/fluid-py/fluid/models/rest_add_member_request.py new file mode 100644 index 00000000..de07676f --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_add_member_request.py @@ -0,0 +1,83 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + + +class RestAddMemberRequest(BaseModel): + """ + RestAddMemberRequest + """ # noqa: E501 + + email: Optional[StrictStr] = None + role: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["email", "role"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestAddMemberRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestAddMemberRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"email": obj.get("email"), "role": obj.get("role")}) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_auth_response.py b/sdk/fluid-py/fluid/models/rest_auth_response.py new file mode 100644 index 00000000..a104e6fb --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_auth_response.py @@ -0,0 +1,93 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict +from typing_extensions import Self + +from fluid.models.rest_user_response import RestUserResponse + + +class RestAuthResponse(BaseModel): + """ + RestAuthResponse + """ # noqa: E501 + + user: Optional[RestUserResponse] = None + __properties: ClassVar[List[str]] = ["user"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestAuthResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of user + if self.user: + _dict["user"] = self.user.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestAuthResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "user": RestUserResponse.from_dict(obj["user"]) + if obj.get("user") is not None + else None + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_billing_response.py b/sdk/fluid-py/fluid/models/rest_billing_response.py new file mode 100644 index 00000000..29515d5b --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_billing_response.py @@ -0,0 +1,105 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + +from fluid.models.rest_free_tier_info import RestFreeTierInfo +from fluid.models.rest_usage_summary import RestUsageSummary + + +class RestBillingResponse(BaseModel): + """ + RestBillingResponse + """ # noqa: E501 + + free_tier: Optional[RestFreeTierInfo] = None + plan: Optional[StrictStr] = None + status: Optional[StrictStr] = None + usage: Optional[RestUsageSummary] = None + __properties: ClassVar[List[str]] = ["free_tier", "plan", "status", "usage"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestBillingResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + # override the default output from pydantic by calling `to_dict()` of free_tier + if self.free_tier: + _dict["free_tier"] = self.free_tier.to_dict() + # override the default output from pydantic by calling `to_dict()` of usage + if self.usage: + _dict["usage"] = self.usage.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestBillingResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "free_tier": RestFreeTierInfo.from_dict(obj["free_tier"]) + if obj.get("free_tier") is not None + else None, + "plan": obj.get("plan"), + "status": obj.get("status"), + "usage": RestUsageSummary.from_dict(obj["usage"]) + if obj.get("usage") is not None + else None, + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_calculator_request.py b/sdk/fluid-py/fluid/models/rest_calculator_request.py new file mode 100644 index 00000000..239e13af --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_calculator_request.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set, Union + +from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt +from typing_extensions import Self + + +class RestCalculatorRequest(BaseModel): + """ + RestCalculatorRequest + """ # noqa: E501 + + agent_hosts: Optional[StrictInt] = None + concurrent_sandboxes: Optional[StrictInt] = None + hours_per_month: Optional[Union[StrictFloat, StrictInt]] = None + source_vms: Optional[StrictInt] = None + __properties: ClassVar[List[str]] = [ + "agent_hosts", + "concurrent_sandboxes", + "hours_per_month", + "source_vms", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestCalculatorRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestCalculatorRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "agent_hosts": obj.get("agent_hosts"), + "concurrent_sandboxes": obj.get("concurrent_sandboxes"), + "hours_per_month": obj.get("hours_per_month"), + "source_vms": obj.get("source_vms"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_calculator_response.py b/sdk/fluid-py/fluid/models/rest_calculator_response.py new file mode 100644 index 00000000..80b5e240 --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_calculator_response.py @@ -0,0 +1,100 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set, Union + +from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt, StrictStr +from typing_extensions import Self + + +class RestCalculatorResponse(BaseModel): + """ + RestCalculatorResponse + """ # noqa: E501 + + agent_host_cost: Optional[Union[StrictFloat, StrictInt]] = None + currency: Optional[StrictStr] = None + sandbox_cost: Optional[Union[StrictFloat, StrictInt]] = None + source_vm_cost: Optional[Union[StrictFloat, StrictInt]] = None + total_monthly: Optional[Union[StrictFloat, StrictInt]] = None + __properties: ClassVar[List[str]] = [ + "agent_host_cost", + "currency", + "sandbox_cost", + "source_vm_cost", + "total_monthly", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestCalculatorResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestCalculatorResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "agent_host_cost": obj.get("agent_host_cost"), + "currency": obj.get("currency"), + "sandbox_cost": obj.get("sandbox_cost"), + "source_vm_cost": obj.get("source_vm_cost"), + "total_monthly": obj.get("total_monthly"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_create_host_token_request.py b/sdk/fluid-py/fluid/models/rest_create_host_token_request.py new file mode 100644 index 00000000..6fb23104 --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_create_host_token_request.py @@ -0,0 +1,82 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + + +class RestCreateHostTokenRequest(BaseModel): + """ + RestCreateHostTokenRequest + """ # noqa: E501 + + name: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["name"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestCreateHostTokenRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestCreateHostTokenRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"name": obj.get("name")}) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_create_org_request.py b/sdk/fluid-py/fluid/models/rest_create_org_request.py new file mode 100644 index 00000000..8d2e7787 --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_create_org_request.py @@ -0,0 +1,83 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + + +class RestCreateOrgRequest(BaseModel): + """ + RestCreateOrgRequest + """ # noqa: E501 + + name: Optional[StrictStr] = None + slug: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["name", "slug"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestCreateOrgRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestCreateOrgRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"name": obj.get("name"), "slug": obj.get("slug")}) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_free_tier_info.py b/sdk/fluid-py/fluid/models/rest_free_tier_info.py new file mode 100644 index 00000000..ee15f898 --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_free_tier_info.py @@ -0,0 +1,94 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictInt +from typing_extensions import Self + + +class RestFreeTierInfo(BaseModel): + """ + RestFreeTierInfo + """ # noqa: E501 + + max_agent_hosts: Optional[StrictInt] = None + max_concurrent_sandboxes: Optional[StrictInt] = None + max_source_vms: Optional[StrictInt] = None + __properties: ClassVar[List[str]] = [ + "max_agent_hosts", + "max_concurrent_sandboxes", + "max_source_vms", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestFreeTierInfo from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestFreeTierInfo from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "max_agent_hosts": obj.get("max_agent_hosts"), + "max_concurrent_sandboxes": obj.get("max_concurrent_sandboxes"), + "max_source_vms": obj.get("max_source_vms"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_host_token_response.py b/sdk/fluid-py/fluid/models/rest_host_token_response.py new file mode 100644 index 00000000..9490e7dc --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_host_token_response.py @@ -0,0 +1,94 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, Field, StrictStr +from typing_extensions import Self + + +class RestHostTokenResponse(BaseModel): + """ + RestHostTokenResponse + """ # noqa: E501 + + created_at: Optional[StrictStr] = None + id: Optional[StrictStr] = None + name: Optional[StrictStr] = None + token: Optional[StrictStr] = Field( + default=None, description="Only set on creation." + ) + __properties: ClassVar[List[str]] = ["created_at", "id", "name", "token"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestHostTokenResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestHostTokenResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "created_at": obj.get("created_at"), + "id": obj.get("id"), + "name": obj.get("name"), + "token": obj.get("token"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_login_request.py b/sdk/fluid-py/fluid/models/rest_login_request.py new file mode 100644 index 00000000..6aaa4047 --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_login_request.py @@ -0,0 +1,85 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + + +class RestLoginRequest(BaseModel): + """ + RestLoginRequest + """ # noqa: E501 + + email: Optional[StrictStr] = None + password: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["email", "password"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestLoginRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestLoginRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + {"email": obj.get("email"), "password": obj.get("password")} + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_member_response.py b/sdk/fluid-py/fluid/models/rest_member_response.py new file mode 100644 index 00000000..dc5891e9 --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_member_response.py @@ -0,0 +1,92 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + + +class RestMemberResponse(BaseModel): + """ + RestMemberResponse + """ # noqa: E501 + + created_at: Optional[StrictStr] = None + id: Optional[StrictStr] = None + role: Optional[StrictStr] = None + user_id: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["created_at", "id", "role", "user_id"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestMemberResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestMemberResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "created_at": obj.get("created_at"), + "id": obj.get("id"), + "role": obj.get("role"), + "user_id": obj.get("user_id"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_org_response.py b/sdk/fluid-py/fluid/models/rest_org_response.py new file mode 100644 index 00000000..fc396d69 --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_org_response.py @@ -0,0 +1,103 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + + +class RestOrgResponse(BaseModel): + """ + RestOrgResponse + """ # noqa: E501 + + created_at: Optional[StrictStr] = None + id: Optional[StrictStr] = None + name: Optional[StrictStr] = None + owner_id: Optional[StrictStr] = None + slug: Optional[StrictStr] = None + stripe_customer_id: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = [ + "created_at", + "id", + "name", + "owner_id", + "slug", + "stripe_customer_id", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestOrgResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestOrgResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "created_at": obj.get("created_at"), + "id": obj.get("id"), + "name": obj.get("name"), + "owner_id": obj.get("owner_id"), + "slug": obj.get("slug"), + "stripe_customer_id": obj.get("stripe_customer_id"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_register_request.py b/sdk/fluid-py/fluid/models/rest_register_request.py new file mode 100644 index 00000000..2c6fa848 --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_register_request.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + + +class RestRegisterRequest(BaseModel): + """ + RestRegisterRequest + """ # noqa: E501 + + display_name: Optional[StrictStr] = None + email: Optional[StrictStr] = None + password: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["display_name", "email", "password"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestRegisterRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestRegisterRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "display_name": obj.get("display_name"), + "email": obj.get("email"), + "password": obj.get("password"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_swagger_error.py b/sdk/fluid-py/fluid/models/rest_swagger_error.py new file mode 100644 index 00000000..0c200b20 --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_swagger_error.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing_extensions import Self + + +class RestSwaggerError(BaseModel): + """ + RestSwaggerError + """ # noqa: E501 + + code: Optional[StrictInt] = None + details: Optional[StrictStr] = None + error: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["code", "details", "error"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestSwaggerError from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestSwaggerError from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "code": obj.get("code"), + "details": obj.get("details"), + "error": obj.get("error"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_update_org_request.py b/sdk/fluid-py/fluid/models/rest_update_org_request.py new file mode 100644 index 00000000..5269eb7c --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_update_org_request.py @@ -0,0 +1,82 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictStr +from typing_extensions import Self + + +class RestUpdateOrgRequest(BaseModel): + """ + RestUpdateOrgRequest + """ # noqa: E501 + + name: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = ["name"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestUpdateOrgRequest from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestUpdateOrgRequest from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate({"name": obj.get("name")}) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_usage_summary.py b/sdk/fluid-py/fluid/models/rest_usage_summary.py new file mode 100644 index 00000000..875762c3 --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_usage_summary.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set, Union + +from pydantic import BaseModel, ConfigDict, StrictFloat, StrictInt +from typing_extensions import Self + + +class RestUsageSummary(BaseModel): + """ + RestUsageSummary + """ # noqa: E501 + + agent_hosts: Optional[Union[StrictFloat, StrictInt]] = None + sandbox_hours: Optional[Union[StrictFloat, StrictInt]] = None + source_vms: Optional[Union[StrictFloat, StrictInt]] = None + __properties: ClassVar[List[str]] = ["agent_hosts", "sandbox_hours", "source_vms"] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestUsageSummary from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestUsageSummary from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "agent_hosts": obj.get("agent_hosts"), + "sandbox_hours": obj.get("sandbox_hours"), + "source_vms": obj.get("source_vms"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/rest_user_response.py b/sdk/fluid-py/fluid/models/rest_user_response.py new file mode 100644 index 00000000..a1e90d0c --- /dev/null +++ b/sdk/fluid-py/fluid/models/rest_user_response.py @@ -0,0 +1,100 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictBool, StrictStr +from typing_extensions import Self + + +class RestUserResponse(BaseModel): + """ + RestUserResponse + """ # noqa: E501 + + avatar_url: Optional[StrictStr] = None + display_name: Optional[StrictStr] = None + email: Optional[StrictStr] = None + email_verified: Optional[StrictBool] = None + id: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = [ + "avatar_url", + "display_name", + "email", + "email_verified", + "id", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of RestUserResponse from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of RestUserResponse from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "avatar_url": obj.get("avatar_url"), + "display_name": obj.get("display_name"), + "email": obj.get("email"), + "email_verified": obj.get("email_verified"), + "id": obj.get("id"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/store_command.py b/sdk/fluid-py/fluid/models/store_command.py new file mode 100644 index 00000000..00a92908 --- /dev/null +++ b/sdk/fluid-py/fluid/models/store_command.py @@ -0,0 +1,112 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing_extensions import Self + + +class StoreCommand(BaseModel): + """ + StoreCommand + """ # noqa: E501 + + command: Optional[StrictStr] = None + duration_ms: Optional[StrictInt] = None + ended_at: Optional[StrictStr] = None + exit_code: Optional[StrictInt] = None + id: Optional[StrictStr] = None + sandbox_id: Optional[StrictStr] = None + started_at: Optional[StrictStr] = None + stderr: Optional[StrictStr] = None + stdout: Optional[StrictStr] = None + __properties: ClassVar[List[str]] = [ + "command", + "duration_ms", + "ended_at", + "exit_code", + "id", + "sandbox_id", + "started_at", + "stderr", + "stdout", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of StoreCommand from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of StoreCommand from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "command": obj.get("command"), + "duration_ms": obj.get("duration_ms"), + "ended_at": obj.get("ended_at"), + "exit_code": obj.get("exit_code"), + "id": obj.get("id"), + "sandbox_id": obj.get("sandbox_id"), + "started_at": obj.get("started_at"), + "stderr": obj.get("stderr"), + "stdout": obj.get("stdout"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/store_sandbox.py b/sdk/fluid-py/fluid/models/store_sandbox.py new file mode 100644 index 00000000..00a8398e --- /dev/null +++ b/sdk/fluid-py/fluid/models/store_sandbox.py @@ -0,0 +1,141 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +import pprint +from typing import Any, ClassVar, Dict, List, Optional, Set + +from pydantic import BaseModel, ConfigDict, StrictInt, StrictStr +from typing_extensions import Self + +from fluid.models.store_sandbox_state import StoreSandboxState + + +class StoreSandbox(BaseModel): + """ + StoreSandbox + """ # noqa: E501 + + agent_id: Optional[StrictStr] = None + base_image: Optional[StrictStr] = None + bridge: Optional[StrictStr] = None + created_at: Optional[StrictStr] = None + deleted_at: Optional[StrictStr] = None + host_id: Optional[StrictStr] = None + id: Optional[StrictStr] = None + ip_address: Optional[StrictStr] = None + mac_address: Optional[StrictStr] = None + memory_mb: Optional[StrictInt] = None + name: Optional[StrictStr] = None + org_id: Optional[StrictStr] = None + source_vm: Optional[StrictStr] = None + state: Optional[StoreSandboxState] = None + tap_device: Optional[StrictStr] = None + ttl_seconds: Optional[StrictInt] = None + updated_at: Optional[StrictStr] = None + vcpus: Optional[StrictInt] = None + __properties: ClassVar[List[str]] = [ + "agent_id", + "base_image", + "bridge", + "created_at", + "deleted_at", + "host_id", + "id", + "ip_address", + "mac_address", + "memory_mb", + "name", + "org_id", + "source_vm", + "state", + "tap_device", + "ttl_seconds", + "updated_at", + "vcpus", + ] + + model_config = ConfigDict( + populate_by_name=True, + validate_assignment=True, + protected_namespaces=(), + ) + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.model_dump(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + # TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> Optional[Self]: + """Create an instance of StoreSandbox from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self) -> Dict[str, Any]: + """Return the dictionary representation of the model using alias. + + This has the following differences from calling pydantic's + `self.model_dump(by_alias=True)`: + + * `None` is only added to the output dict for nullable fields that + were set at model initialization. Other fields with value `None` + are ignored. + """ + excluded_fields: Set[str] = set([]) + + _dict = self.model_dump( + by_alias=True, + exclude=excluded_fields, + exclude_none=True, + ) + return _dict + + @classmethod + def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]: + """Create an instance of StoreSandbox from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return cls.model_validate(obj) + + _obj = cls.model_validate( + { + "agent_id": obj.get("agent_id"), + "base_image": obj.get("base_image"), + "bridge": obj.get("bridge"), + "created_at": obj.get("created_at"), + "deleted_at": obj.get("deleted_at"), + "host_id": obj.get("host_id"), + "id": obj.get("id"), + "ip_address": obj.get("ip_address"), + "mac_address": obj.get("mac_address"), + "memory_mb": obj.get("memory_mb"), + "name": obj.get("name"), + "org_id": obj.get("org_id"), + "source_vm": obj.get("source_vm"), + "state": obj.get("state"), + "tap_device": obj.get("tap_device"), + "ttl_seconds": obj.get("ttl_seconds"), + "updated_at": obj.get("updated_at"), + "vcpus": obj.get("vcpus"), + } + ) + return _obj diff --git a/sdk/fluid-py/fluid/models/store_sandbox_state.py b/sdk/fluid-py/fluid/models/store_sandbox_state.py new file mode 100644 index 00000000..e20069da --- /dev/null +++ b/sdk/fluid-py/fluid/models/store_sandbox_state.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations + +import json +from enum import Enum + +from typing_extensions import Self + + +class StoreSandboxState(str, Enum): + """ + StoreSandboxState + """ + + """ + allowed enum values + """ + SandboxStateCreating = "CREATING" + SandboxStateRunning = "RUNNING" + SandboxStateStopped = "STOPPED" + SandboxStateDestroyed = "DESTROYED" + SandboxStateError = "ERROR" + + @classmethod + def from_json(cls, json_str: str) -> Self: + """Create an instance of StoreSandboxState from a JSON string""" + return cls(json.loads(json_str)) diff --git a/sdk/fluid-py/fluid/rest.py b/sdk/fluid-py/fluid/rest.py index 2f53cdd1..f3f03245 100644 --- a/sdk/fluid-py/fluid/rest.py +++ b/sdk/fluid-py/fluid/rest.py @@ -1,11 +1,11 @@ # coding: utf-8 """ - fluid-remote API + Fluid API - API for managing AI Agent VM sandboxes using libvirt + API for managing sandboxes, organizations, billing, and hosts - The version of the OpenAPI document: 0.1.0 + The version of the OpenAPI document: 1.0 Generated by OpenAPI Generator (https://openapi-generator.tech) Do not edit the class manually. diff --git a/sdk/fluid-py/pyproject.toml b/sdk/fluid-py/pyproject.toml index 5dacd9c0..09751dbe 100644 --- a/sdk/fluid-py/pyproject.toml +++ b/sdk/fluid-py/pyproject.toml @@ -1,12 +1,12 @@ [project] name = "fluid" version = "0.1.0" -description = "fluid-remote API" +description = "Fluid API" authors = [ {name = "OpenAPI Generator Community",email = "team@openapitools.org"}, ] readme = "README.md" -keywords = ["OpenAPI", "OpenAPI-Generator", "fluid-remote API"] +keywords = ["OpenAPI", "OpenAPI-Generator", "Fluid API"] requires-python = ">=3.9" dependencies = [ diff --git a/sdk/fluid-py/setup.py b/sdk/fluid-py/setup.py index 64060d32..e01a4a7d 100644 --- a/sdk/fluid-py/setup.py +++ b/sdk/fluid-py/setup.py @@ -8,7 +8,7 @@ version="0.1.0", author="Collin Pfeifer", author_email="cpfeifer@madcactus.org", - description="API for managing AI Agent VM sandboxes using libvirt", + description="API for managing sandboxes, organizations, billing, and hosts", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/aspectrr/fluid.sh", diff --git a/sdk/fluid-py/test/__init__.py b/sdk/fluid-py/test/__init__.py index bac86c58..2962d8f7 100644 --- a/sdk/fluid-py/test/__init__.py +++ b/sdk/fluid-py/test/__init__.py @@ -1,7 +1,7 @@ """ Fluid -API for managing AI Agent VM sandboxes using libvirt +API for managing sandboxes, organizations, billing, and hosts Installation: pip install fluid @@ -16,257 +16,71 @@ __version__ = "0.1.0" # Import all API classes -from fluid.api.access_api import AccessApi -from fluid.api.ansible_api import AnsibleApi -from fluid.api.ansible_playbooks_api import AnsiblePlaybooksApi +from fluid.api.auth_api import AuthApi +from fluid.api.billing_api import BillingApi from fluid.api.health_api import HealthApi -from fluid.api.sandbox_api import SandboxApi -from fluid.api.vms_api import VMsApi +from fluid.api.host_tokens_api import HostTokensApi +from fluid.api.hosts_api import HostsApi +from fluid.api.members_api import MembersApi +from fluid.api.organizations_api import OrganizationsApi +from fluid.api.sandboxes_api import SandboxesApi +from fluid.api.source_vms_api import SourceVMsApi from fluid.api_client import ApiClient from fluid.configuration import Configuration from fluid.exceptions import ApiException # Import all models -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_add_task_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_add_task_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleAddTaskResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_create_playbook_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_create_playbook_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleCreatePlaybookResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_export_playbook_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleExportPlaybookResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_get_playbook_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleGetPlaybookResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJob -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_job_status import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleJobStatus -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_list_playbooks_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleListPlaybooksResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_reorder_tasks_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleReorderTasksRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_update_task_request import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_ansible_update_task_response import \ - GithubComAspectrrFluidShFluidRemoteInternalAnsibleUpdateTaskResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_error_error_response import \ - GithubComAspectrrFluidShFluidRemoteInternalErrorErrorResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_access_error_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestAccessErrorResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_ca_public_key_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestCaPublicKeyResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_certificate_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestCertificateResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_create_sandbox_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_create_sandbox_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestCreateSandboxResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_destroy_sandbox_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestDestroySandboxResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_diff_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestDiffRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_diff_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestDiffResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_discover_ip_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestDiscoverIPResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_error_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestErrorResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_generate_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestGenerateResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_get_sandbox_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestGetSandboxResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_health_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestHealthResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_host_error import \ - GithubComAspectrrFluidShFluidRemoteInternalRestHostError -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_inject_ssh_key_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestInjectSSHKeyRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_certificates_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListCertificatesResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sandbox_commands_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxCommandsResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sandboxes_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListSandboxesResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_sessions_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListSessionsResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_list_vms_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestListVMsResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_publish_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestPublishRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_publish_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestPublishResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_request_access_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_request_access_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRequestAccessResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_revoke_certificate_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_revoke_certificate_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRevokeCertificateResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_run_command_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_run_command_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestRunCommandResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_sandbox_info import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSandboxInfo -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_end_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_end_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionEndResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_start_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_session_start_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSessionStartResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_snapshot_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_snapshot_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestSnapshotResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_start_sandbox_request import \ - GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxRequest -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_start_sandbox_response import \ - GithubComAspectrrFluidShFluidRemoteInternalRestStartSandboxResponse -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_rest_vm_info import \ - GithubComAspectrrFluidShFluidRemoteInternalRestVmInfo -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_change_diff import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreChangeDiff -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreCommand -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command_exec_record import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreCommandExecRecord -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_command_summary import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreCommandSummary -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_diff import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreDiff -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_package_info import \ - GithubComAspectrrFluidShFluidRemoteInternalStorePackageInfo -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_playbook import \ - GithubComAspectrrFluidShFluidRemoteInternalStorePlaybook -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_playbook_task import \ - GithubComAspectrrFluidShFluidRemoteInternalStorePlaybookTask -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_sandbox import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSandbox -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_sandbox_state import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSandboxState -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_service_change import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreServiceChange -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_snapshot import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshot -from fluid.models.github_com_aspectrr_fluid_sh_fluid_remote_internal_store_snapshot_kind import \ - GithubComAspectrrFluidShFluidRemoteInternalStoreSnapshotKind -from fluid.models.internal_ansible_add_task_request import \ - InternalAnsibleAddTaskRequest -from fluid.models.internal_ansible_add_task_response import \ - InternalAnsibleAddTaskResponse -from fluid.models.internal_ansible_create_playbook_request import \ - InternalAnsibleCreatePlaybookRequest -from fluid.models.internal_ansible_create_playbook_response import \ - InternalAnsibleCreatePlaybookResponse -from fluid.models.internal_ansible_export_playbook_response import \ - InternalAnsibleExportPlaybookResponse -from fluid.models.internal_ansible_get_playbook_response import \ - InternalAnsibleGetPlaybookResponse -from fluid.models.internal_ansible_job import InternalAnsibleJob -from fluid.models.internal_ansible_job_request import InternalAnsibleJobRequest -from fluid.models.internal_ansible_job_response import \ - InternalAnsibleJobResponse -from fluid.models.internal_ansible_job_status import InternalAnsibleJobStatus -from fluid.models.internal_ansible_list_playbooks_response import \ - InternalAnsibleListPlaybooksResponse -from fluid.models.internal_ansible_reorder_tasks_request import \ - InternalAnsibleReorderTasksRequest -from fluid.models.internal_ansible_update_task_request import \ - InternalAnsibleUpdateTaskRequest -from fluid.models.internal_ansible_update_task_response import \ - InternalAnsibleUpdateTaskResponse -from fluid.models.internal_rest_access_error_response import \ - InternalRestAccessErrorResponse -from fluid.models.internal_rest_ca_public_key_response import \ - InternalRestCaPublicKeyResponse -from fluid.models.internal_rest_certificate_response import \ - InternalRestCertificateResponse -from fluid.models.internal_rest_create_sandbox_request import \ - InternalRestCreateSandboxRequest -from fluid.models.internal_rest_create_sandbox_response import \ - InternalRestCreateSandboxResponse -from fluid.models.internal_rest_destroy_sandbox_response import \ - InternalRestDestroySandboxResponse -from fluid.models.internal_rest_diff_request import InternalRestDiffRequest -from fluid.models.internal_rest_diff_response import InternalRestDiffResponse -from fluid.models.internal_rest_discover_ip_response import \ - InternalRestDiscoverIPResponse -from fluid.models.internal_rest_error_response import InternalRestErrorResponse -from fluid.models.internal_rest_generate_response import \ - InternalRestGenerateResponse -from fluid.models.internal_rest_get_sandbox_response import \ - InternalRestGetSandboxResponse -from fluid.models.internal_rest_health_response import \ - InternalRestHealthResponse -from fluid.models.internal_rest_host_error import InternalRestHostError -from fluid.models.internal_rest_inject_ssh_key_request import \ - InternalRestInjectSSHKeyRequest -from fluid.models.internal_rest_list_certificates_response import \ - InternalRestListCertificatesResponse -from fluid.models.internal_rest_list_sandbox_commands_response import \ - InternalRestListSandboxCommandsResponse -from fluid.models.internal_rest_list_sandboxes_response import \ - InternalRestListSandboxesResponse -from fluid.models.internal_rest_list_sessions_response import \ - InternalRestListSessionsResponse -from fluid.models.internal_rest_list_vms_response import \ - InternalRestListVMsResponse -from fluid.models.internal_rest_publish_request import \ - InternalRestPublishRequest -from fluid.models.internal_rest_publish_response import \ - InternalRestPublishResponse -from fluid.models.internal_rest_request_access_request import \ - InternalRestRequestAccessRequest -from fluid.models.internal_rest_request_access_response import \ - InternalRestRequestAccessResponse -from fluid.models.internal_rest_revoke_certificate_request import \ - InternalRestRevokeCertificateRequest -from fluid.models.internal_rest_revoke_certificate_response import \ - InternalRestRevokeCertificateResponse -from fluid.models.internal_rest_run_command_request import \ - InternalRestRunCommandRequest -from fluid.models.internal_rest_run_command_response import \ - InternalRestRunCommandResponse -from fluid.models.internal_rest_sandbox_info import InternalRestSandboxInfo -from fluid.models.internal_rest_session_end_request import \ - InternalRestSessionEndRequest -from fluid.models.internal_rest_session_end_response import \ - InternalRestSessionEndResponse -from fluid.models.internal_rest_session_response import \ - InternalRestSessionResponse -from fluid.models.internal_rest_session_start_request import \ - InternalRestSessionStartRequest -from fluid.models.internal_rest_session_start_response import \ - InternalRestSessionStartResponse -from fluid.models.internal_rest_snapshot_request import \ - InternalRestSnapshotRequest -from fluid.models.internal_rest_snapshot_response import \ - InternalRestSnapshotResponse -from fluid.models.internal_rest_start_sandbox_request import \ - InternalRestStartSandboxRequest -from fluid.models.internal_rest_start_sandbox_response import \ - InternalRestStartSandboxResponse -from fluid.models.internal_rest_vm_info import InternalRestVmInfo -from fluid.models.time_duration import TimeDuration +from fluid.models.orchestrator_create_sandbox_request import \ + OrchestratorCreateSandboxRequest +from fluid.models.orchestrator_host_info import OrchestratorHostInfo +from fluid.models.orchestrator_prepare_request import \ + OrchestratorPrepareRequest +from fluid.models.orchestrator_read_source_request import \ + OrchestratorReadSourceRequest +from fluid.models.orchestrator_run_command_request import \ + OrchestratorRunCommandRequest +from fluid.models.orchestrator_run_source_request import \ + OrchestratorRunSourceRequest +from fluid.models.orchestrator_snapshot_request import \ + OrchestratorSnapshotRequest +from fluid.models.orchestrator_snapshot_response import \ + OrchestratorSnapshotResponse +from fluid.models.orchestrator_source_command_result import \ + OrchestratorSourceCommandResult +from fluid.models.orchestrator_source_file_result import \ + OrchestratorSourceFileResult +from fluid.models.rest_add_member_request import RestAddMemberRequest +from fluid.models.rest_auth_response import RestAuthResponse +from fluid.models.rest_billing_response import RestBillingResponse +from fluid.models.rest_calculator_request import RestCalculatorRequest +from fluid.models.rest_calculator_response import RestCalculatorResponse +from fluid.models.rest_create_host_token_request import \ + RestCreateHostTokenRequest +from fluid.models.rest_create_org_request import RestCreateOrgRequest +from fluid.models.rest_free_tier_info import RestFreeTierInfo +from fluid.models.rest_host_token_response import RestHostTokenResponse +from fluid.models.rest_login_request import RestLoginRequest +from fluid.models.rest_member_response import RestMemberResponse +from fluid.models.rest_org_response import RestOrgResponse +from fluid.models.rest_register_request import RestRegisterRequest +from fluid.models.rest_swagger_error import RestSwaggerError +from fluid.models.rest_update_org_request import RestUpdateOrgRequest +from fluid.models.rest_usage_summary import RestUsageSummary +from fluid.models.rest_user_response import RestUserResponse +from fluid.models.store_command import StoreCommand +from fluid.models.store_sandbox import StoreSandbox +from fluid.models.store_sandbox_state import StoreSandboxState __all__ = [ "Configuration", "ApiClient", "ApiException", - "AccessApi", - "AnsibleApi", - "AnsiblePlaybooksApi", + "AuthApi", + "BillingApi", "HealthApi", - "SandboxApi", - "VMsApi", + "HostTokensApi", + "HostsApi", + "MembersApi", + "OrganizationsApi", + "SandboxesApi", + "SourceVMsApi", ] diff --git a/sdk/fluid-py/test/test_auth_api.py b/sdk/fluid-py/test/test_auth_api.py new file mode 100644 index 00000000..1ba190aa --- /dev/null +++ b/sdk/fluid-py/test/test_auth_api.py @@ -0,0 +1,87 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.api.auth_api import AuthApi + + +class TestAuthApi(unittest.TestCase): + """AuthApi unit test stubs""" + + def setUp(self) -> None: + self.api = AuthApi() + + def tearDown(self) -> None: + pass + + def test_auth_github_callback_get(self) -> None: + """Test case for auth_github_callback_get + + GitHub OAuth callback + """ + pass + + def test_auth_github_get(self) -> None: + """Test case for auth_github_get + + GitHub OAuth login + """ + pass + + def test_auth_google_callback_get(self) -> None: + """Test case for auth_google_callback_get + + Google OAuth callback + """ + pass + + def test_auth_google_get(self) -> None: + """Test case for auth_google_get + + Google OAuth login + """ + pass + + def test_auth_login_post(self) -> None: + """Test case for auth_login_post + + Log in + """ + pass + + def test_auth_logout_post(self) -> None: + """Test case for auth_logout_post + + Log out + """ + pass + + def test_auth_me_get(self) -> None: + """Test case for auth_me_get + + Get current user + """ + pass + + def test_auth_register_post(self) -> None: + """Test case for auth_register_post + + Register a new user + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_billing_api.py b/sdk/fluid-py/test/test_billing_api.py new file mode 100644 index 00000000..23e4a060 --- /dev/null +++ b/sdk/fluid-py/test/test_billing_api.py @@ -0,0 +1,73 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.api.billing_api import BillingApi + + +class TestBillingApi(unittest.TestCase): + """BillingApi unit test stubs""" + + def setUp(self) -> None: + self.api = BillingApi() + + def tearDown(self) -> None: + pass + + def test_billing_calculator_post(self) -> None: + """Test case for billing_calculator_post + + Pricing calculator + """ + pass + + def test_orgs_slug_billing_get(self) -> None: + """Test case for orgs_slug_billing_get + + Get billing info + """ + pass + + def test_orgs_slug_billing_portal_post(self) -> None: + """Test case for orgs_slug_billing_portal_post + + Billing portal + """ + pass + + def test_orgs_slug_billing_subscribe_post(self) -> None: + """Test case for orgs_slug_billing_subscribe_post + + Subscribe + """ + pass + + def test_orgs_slug_billing_usage_get(self) -> None: + """Test case for orgs_slug_billing_usage_get + + Get usage + """ + pass + + def test_webhooks_stripe_post(self) -> None: + """Test case for webhooks_stripe_post + + Stripe webhook + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_host_tokens_api.py b/sdk/fluid-py/test/test_host_tokens_api.py new file mode 100644 index 00000000..b59a83f3 --- /dev/null +++ b/sdk/fluid-py/test/test_host_tokens_api.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.api.host_tokens_api import HostTokensApi + + +class TestHostTokensApi(unittest.TestCase): + """HostTokensApi unit test stubs""" + + def setUp(self) -> None: + self.api = HostTokensApi() + + def tearDown(self) -> None: + pass + + def test_orgs_slug_hosts_tokens_get(self) -> None: + """Test case for orgs_slug_hosts_tokens_get + + List host tokens + """ + pass + + def test_orgs_slug_hosts_tokens_post(self) -> None: + """Test case for orgs_slug_hosts_tokens_post + + Create host token + """ + pass + + def test_orgs_slug_hosts_tokens_token_id_delete(self) -> None: + """Test case for orgs_slug_hosts_tokens_token_id_delete + + Delete host token + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_hosts_api.py b/sdk/fluid-py/test/test_hosts_api.py new file mode 100644 index 00000000..5a7175a9 --- /dev/null +++ b/sdk/fluid-py/test/test_hosts_api.py @@ -0,0 +1,45 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.api.hosts_api import HostsApi + + +class TestHostsApi(unittest.TestCase): + """HostsApi unit test stubs""" + + def setUp(self) -> None: + self.api = HostsApi() + + def tearDown(self) -> None: + pass + + def test_orgs_slug_hosts_get(self) -> None: + """Test case for orgs_slug_hosts_get + + List hosts + """ + pass + + def test_orgs_slug_hosts_host_id_get(self) -> None: + """Test case for orgs_slug_hosts_host_id_get + + Get host + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_members_api.py b/sdk/fluid-py/test/test_members_api.py new file mode 100644 index 00000000..69c43fe9 --- /dev/null +++ b/sdk/fluid-py/test/test_members_api.py @@ -0,0 +1,52 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.api.members_api import MembersApi + + +class TestMembersApi(unittest.TestCase): + """MembersApi unit test stubs""" + + def setUp(self) -> None: + self.api = MembersApi() + + def tearDown(self) -> None: + pass + + def test_orgs_slug_members_get(self) -> None: + """Test case for orgs_slug_members_get + + List members + """ + pass + + def test_orgs_slug_members_member_id_delete(self) -> None: + """Test case for orgs_slug_members_member_id_delete + + Remove member + """ + pass + + def test_orgs_slug_members_post(self) -> None: + """Test case for orgs_slug_members_post + + Add member + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_orchestrator_create_sandbox_request.py b/sdk/fluid-py/test/test_orchestrator_create_sandbox_request.py new file mode 100644 index 00000000..c9f073b1 --- /dev/null +++ b/sdk/fluid-py/test/test_orchestrator_create_sandbox_request.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.orchestrator_create_sandbox_request import \ + OrchestratorCreateSandboxRequest + + +class TestOrchestratorCreateSandboxRequest(unittest.TestCase): + """OrchestratorCreateSandboxRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> OrchestratorCreateSandboxRequest: + """Test OrchestratorCreateSandboxRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `OrchestratorCreateSandboxRequest` + """ + model = OrchestratorCreateSandboxRequest() + if include_optional: + return OrchestratorCreateSandboxRequest( + agent_id = '', + base_image = '', + memory_mb = 56, + name = '', + network = '', + org_id = '', + source_vm = '', + ttl_seconds = 56, + vcpus = 56 + ) + else: + return OrchestratorCreateSandboxRequest( + ) + """ + + def testOrchestratorCreateSandboxRequest(self): + """Test OrchestratorCreateSandboxRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_orchestrator_host_info.py b/sdk/fluid-py/test/test_orchestrator_host_info.py new file mode 100644 index 00000000..5eb685ec --- /dev/null +++ b/sdk/fluid-py/test/test_orchestrator_host_info.py @@ -0,0 +1,63 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.orchestrator_host_info import OrchestratorHostInfo + + +class TestOrchestratorHostInfo(unittest.TestCase): + """OrchestratorHostInfo unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> OrchestratorHostInfo: + """Test OrchestratorHostInfo + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `OrchestratorHostInfo` + """ + model = OrchestratorHostInfo() + if include_optional: + return OrchestratorHostInfo( + active_sandboxes = 56, + available_cpus = 56, + available_disk_mb = 56, + available_memory_mb = 56, + base_images = [ + '' + ], + host_id = '', + hostname = '', + last_heartbeat = '', + status = '' + ) + else: + return OrchestratorHostInfo( + ) + """ + + def testOrchestratorHostInfo(self): + """Test OrchestratorHostInfo""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_orchestrator_prepare_request.py b/sdk/fluid-py/test/test_orchestrator_prepare_request.py new file mode 100644 index 00000000..e774100a --- /dev/null +++ b/sdk/fluid-py/test/test_orchestrator_prepare_request.py @@ -0,0 +1,55 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.orchestrator_prepare_request import \ + OrchestratorPrepareRequest + + +class TestOrchestratorPrepareRequest(unittest.TestCase): + """OrchestratorPrepareRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> OrchestratorPrepareRequest: + """Test OrchestratorPrepareRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `OrchestratorPrepareRequest` + """ + model = OrchestratorPrepareRequest() + if include_optional: + return OrchestratorPrepareRequest( + ssh_key_path = '', + ssh_user = '' + ) + else: + return OrchestratorPrepareRequest( + ) + """ + + def testOrchestratorPrepareRequest(self): + """Test OrchestratorPrepareRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_orchestrator_read_source_request.py b/sdk/fluid-py/test/test_orchestrator_read_source_request.py new file mode 100644 index 00000000..4a03bb15 --- /dev/null +++ b/sdk/fluid-py/test/test_orchestrator_read_source_request.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.orchestrator_read_source_request import \ + OrchestratorReadSourceRequest + + +class TestOrchestratorReadSourceRequest(unittest.TestCase): + """OrchestratorReadSourceRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> OrchestratorReadSourceRequest: + """Test OrchestratorReadSourceRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `OrchestratorReadSourceRequest` + """ + model = OrchestratorReadSourceRequest() + if include_optional: + return OrchestratorReadSourceRequest( + path = '' + ) + else: + return OrchestratorReadSourceRequest( + ) + """ + + def testOrchestratorReadSourceRequest(self): + """Test OrchestratorReadSourceRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_orchestrator_run_command_request.py b/sdk/fluid-py/test/test_orchestrator_run_command_request.py new file mode 100644 index 00000000..4d692efb --- /dev/null +++ b/sdk/fluid-py/test/test_orchestrator_run_command_request.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.orchestrator_run_command_request import \ + OrchestratorRunCommandRequest + + +class TestOrchestratorRunCommandRequest(unittest.TestCase): + """OrchestratorRunCommandRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> OrchestratorRunCommandRequest: + """Test OrchestratorRunCommandRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `OrchestratorRunCommandRequest` + """ + model = OrchestratorRunCommandRequest() + if include_optional: + return OrchestratorRunCommandRequest( + command = '', + env = { + 'key' : '' + }, + timeout_seconds = 56 + ) + else: + return OrchestratorRunCommandRequest( + ) + """ + + def testOrchestratorRunCommandRequest(self): + """Test OrchestratorRunCommandRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_orchestrator_run_source_request.py b/sdk/fluid-py/test/test_orchestrator_run_source_request.py new file mode 100644 index 00000000..92d317f9 --- /dev/null +++ b/sdk/fluid-py/test/test_orchestrator_run_source_request.py @@ -0,0 +1,55 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.orchestrator_run_source_request import \ + OrchestratorRunSourceRequest + + +class TestOrchestratorRunSourceRequest(unittest.TestCase): + """OrchestratorRunSourceRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> OrchestratorRunSourceRequest: + """Test OrchestratorRunSourceRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `OrchestratorRunSourceRequest` + """ + model = OrchestratorRunSourceRequest() + if include_optional: + return OrchestratorRunSourceRequest( + command = '', + timeout_seconds = 56 + ) + else: + return OrchestratorRunSourceRequest( + ) + """ + + def testOrchestratorRunSourceRequest(self): + """Test OrchestratorRunSourceRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_orchestrator_snapshot_request.py b/sdk/fluid-py/test/test_orchestrator_snapshot_request.py new file mode 100644 index 00000000..32032e99 --- /dev/null +++ b/sdk/fluid-py/test/test_orchestrator_snapshot_request.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.orchestrator_snapshot_request import \ + OrchestratorSnapshotRequest + + +class TestOrchestratorSnapshotRequest(unittest.TestCase): + """OrchestratorSnapshotRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> OrchestratorSnapshotRequest: + """Test OrchestratorSnapshotRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `OrchestratorSnapshotRequest` + """ + model = OrchestratorSnapshotRequest() + if include_optional: + return OrchestratorSnapshotRequest( + name = '' + ) + else: + return OrchestratorSnapshotRequest( + ) + """ + + def testOrchestratorSnapshotRequest(self): + """Test OrchestratorSnapshotRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_orchestrator_snapshot_response.py b/sdk/fluid-py/test/test_orchestrator_snapshot_response.py new file mode 100644 index 00000000..b63f96bd --- /dev/null +++ b/sdk/fluid-py/test/test_orchestrator_snapshot_response.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.orchestrator_snapshot_response import \ + OrchestratorSnapshotResponse + + +class TestOrchestratorSnapshotResponse(unittest.TestCase): + """OrchestratorSnapshotResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> OrchestratorSnapshotResponse: + """Test OrchestratorSnapshotResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `OrchestratorSnapshotResponse` + """ + model = OrchestratorSnapshotResponse() + if include_optional: + return OrchestratorSnapshotResponse( + created_at = '', + sandbox_id = '', + snapshot_id = '', + snapshot_name = '' + ) + else: + return OrchestratorSnapshotResponse( + ) + """ + + def testOrchestratorSnapshotResponse(self): + """Test OrchestratorSnapshotResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_orchestrator_source_command_result.py b/sdk/fluid-py/test/test_orchestrator_source_command_result.py new file mode 100644 index 00000000..7f7e6c56 --- /dev/null +++ b/sdk/fluid-py/test/test_orchestrator_source_command_result.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.orchestrator_source_command_result import \ + OrchestratorSourceCommandResult + + +class TestOrchestratorSourceCommandResult(unittest.TestCase): + """OrchestratorSourceCommandResult unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> OrchestratorSourceCommandResult: + """Test OrchestratorSourceCommandResult + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `OrchestratorSourceCommandResult` + """ + model = OrchestratorSourceCommandResult() + if include_optional: + return OrchestratorSourceCommandResult( + exit_code = 56, + source_vm = '', + stderr = '', + stdout = '' + ) + else: + return OrchestratorSourceCommandResult( + ) + """ + + def testOrchestratorSourceCommandResult(self): + """Test OrchestratorSourceCommandResult""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_orchestrator_source_file_result.py b/sdk/fluid-py/test/test_orchestrator_source_file_result.py new file mode 100644 index 00000000..7d274021 --- /dev/null +++ b/sdk/fluid-py/test/test_orchestrator_source_file_result.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.orchestrator_source_file_result import \ + OrchestratorSourceFileResult + + +class TestOrchestratorSourceFileResult(unittest.TestCase): + """OrchestratorSourceFileResult unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> OrchestratorSourceFileResult: + """Test OrchestratorSourceFileResult + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `OrchestratorSourceFileResult` + """ + model = OrchestratorSourceFileResult() + if include_optional: + return OrchestratorSourceFileResult( + content = '', + path = '', + source_vm = '' + ) + else: + return OrchestratorSourceFileResult( + ) + """ + + def testOrchestratorSourceFileResult(self): + """Test OrchestratorSourceFileResult""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_organizations_api.py b/sdk/fluid-py/test/test_organizations_api.py new file mode 100644 index 00000000..f64a3eb8 --- /dev/null +++ b/sdk/fluid-py/test/test_organizations_api.py @@ -0,0 +1,66 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.api.organizations_api import OrganizationsApi + + +class TestOrganizationsApi(unittest.TestCase): + """OrganizationsApi unit test stubs""" + + def setUp(self) -> None: + self.api = OrganizationsApi() + + def tearDown(self) -> None: + pass + + def test_orgs_get(self) -> None: + """Test case for orgs_get + + List organizations + """ + pass + + def test_orgs_post(self) -> None: + """Test case for orgs_post + + Create organization + """ + pass + + def test_orgs_slug_delete(self) -> None: + """Test case for orgs_slug_delete + + Delete organization + """ + pass + + def test_orgs_slug_get(self) -> None: + """Test case for orgs_slug_get + + Get organization + """ + pass + + def test_orgs_slug_patch(self) -> None: + """Test case for orgs_slug_patch + + Update organization + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_add_member_request.py b/sdk/fluid-py/test/test_rest_add_member_request.py new file mode 100644 index 00000000..f33f9d68 --- /dev/null +++ b/sdk/fluid-py/test/test_rest_add_member_request.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_add_member_request import RestAddMemberRequest + + +class TestRestAddMemberRequest(unittest.TestCase): + """RestAddMemberRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestAddMemberRequest: + """Test RestAddMemberRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestAddMemberRequest` + """ + model = RestAddMemberRequest() + if include_optional: + return RestAddMemberRequest( + email = '', + role = '' + ) + else: + return RestAddMemberRequest( + ) + """ + + def testRestAddMemberRequest(self): + """Test RestAddMemberRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_auth_response.py b/sdk/fluid-py/test/test_rest_auth_response.py new file mode 100644 index 00000000..89b0b285 --- /dev/null +++ b/sdk/fluid-py/test/test_rest_auth_response.py @@ -0,0 +1,53 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_auth_response import RestAuthResponse + + +class TestRestAuthResponse(unittest.TestCase): + """RestAuthResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestAuthResponse: + """Test RestAuthResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestAuthResponse` + """ + model = RestAuthResponse() + if include_optional: + return RestAuthResponse( + user = {"email_verified":true,"avatar_url":"avatar_url","id":"id","display_name":"display_name","email":"email"} + ) + else: + return RestAuthResponse( + ) + """ + + def testRestAuthResponse(self): + """Test RestAuthResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_billing_response.py b/sdk/fluid-py/test/test_rest_billing_response.py new file mode 100644 index 00000000..a14805c3 --- /dev/null +++ b/sdk/fluid-py/test/test_rest_billing_response.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_billing_response import RestBillingResponse + + +class TestRestBillingResponse(unittest.TestCase): + """RestBillingResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestBillingResponse: + """Test RestBillingResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestBillingResponse` + """ + model = RestBillingResponse() + if include_optional: + return RestBillingResponse( + free_tier = {"max_agent_hosts":0,"max_concurrent_sandboxes":6,"max_source_vms":1}, + plan = '', + status = '', + usage = {"agent_hosts":5.962133916683182,"sandbox_hours":5.637376656633329,"source_vms":2.3021358869347655} + ) + else: + return RestBillingResponse( + ) + """ + + def testRestBillingResponse(self): + """Test RestBillingResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_calculator_request.py b/sdk/fluid-py/test/test_rest_calculator_request.py new file mode 100644 index 00000000..36c180e6 --- /dev/null +++ b/sdk/fluid-py/test/test_rest_calculator_request.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_calculator_request import RestCalculatorRequest + + +class TestRestCalculatorRequest(unittest.TestCase): + """RestCalculatorRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestCalculatorRequest: + """Test RestCalculatorRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestCalculatorRequest` + """ + model = RestCalculatorRequest() + if include_optional: + return RestCalculatorRequest( + agent_hosts = 56, + concurrent_sandboxes = 56, + hours_per_month = 1.337, + source_vms = 56 + ) + else: + return RestCalculatorRequest( + ) + """ + + def testRestCalculatorRequest(self): + """Test RestCalculatorRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_calculator_response.py b/sdk/fluid-py/test/test_rest_calculator_response.py new file mode 100644 index 00000000..40feb309 --- /dev/null +++ b/sdk/fluid-py/test/test_rest_calculator_response.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_calculator_response import RestCalculatorResponse + + +class TestRestCalculatorResponse(unittest.TestCase): + """RestCalculatorResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestCalculatorResponse: + """Test RestCalculatorResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestCalculatorResponse` + """ + model = RestCalculatorResponse() + if include_optional: + return RestCalculatorResponse( + agent_host_cost = 1.337, + currency = '', + sandbox_cost = 1.337, + source_vm_cost = 1.337, + total_monthly = 1.337 + ) + else: + return RestCalculatorResponse( + ) + """ + + def testRestCalculatorResponse(self): + """Test RestCalculatorResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_create_host_token_request.py b/sdk/fluid-py/test/test_rest_create_host_token_request.py new file mode 100644 index 00000000..e2e75853 --- /dev/null +++ b/sdk/fluid-py/test/test_rest_create_host_token_request.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_create_host_token_request import \ + RestCreateHostTokenRequest + + +class TestRestCreateHostTokenRequest(unittest.TestCase): + """RestCreateHostTokenRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestCreateHostTokenRequest: + """Test RestCreateHostTokenRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestCreateHostTokenRequest` + """ + model = RestCreateHostTokenRequest() + if include_optional: + return RestCreateHostTokenRequest( + name = '' + ) + else: + return RestCreateHostTokenRequest( + ) + """ + + def testRestCreateHostTokenRequest(self): + """Test RestCreateHostTokenRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_create_org_request.py b/sdk/fluid-py/test/test_rest_create_org_request.py new file mode 100644 index 00000000..3500c061 --- /dev/null +++ b/sdk/fluid-py/test/test_rest_create_org_request.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_create_org_request import RestCreateOrgRequest + + +class TestRestCreateOrgRequest(unittest.TestCase): + """RestCreateOrgRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestCreateOrgRequest: + """Test RestCreateOrgRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestCreateOrgRequest` + """ + model = RestCreateOrgRequest() + if include_optional: + return RestCreateOrgRequest( + name = '', + slug = '' + ) + else: + return RestCreateOrgRequest( + ) + """ + + def testRestCreateOrgRequest(self): + """Test RestCreateOrgRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_free_tier_info.py b/sdk/fluid-py/test/test_rest_free_tier_info.py new file mode 100644 index 00000000..8801d49d --- /dev/null +++ b/sdk/fluid-py/test/test_rest_free_tier_info.py @@ -0,0 +1,55 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_free_tier_info import RestFreeTierInfo + + +class TestRestFreeTierInfo(unittest.TestCase): + """RestFreeTierInfo unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestFreeTierInfo: + """Test RestFreeTierInfo + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestFreeTierInfo` + """ + model = RestFreeTierInfo() + if include_optional: + return RestFreeTierInfo( + max_agent_hosts = 56, + max_concurrent_sandboxes = 56, + max_source_vms = 56 + ) + else: + return RestFreeTierInfo( + ) + """ + + def testRestFreeTierInfo(self): + """Test RestFreeTierInfo""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_host_token_response.py b/sdk/fluid-py/test/test_rest_host_token_response.py new file mode 100644 index 00000000..b99ae6a8 --- /dev/null +++ b/sdk/fluid-py/test/test_rest_host_token_response.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_host_token_response import RestHostTokenResponse + + +class TestRestHostTokenResponse(unittest.TestCase): + """RestHostTokenResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestHostTokenResponse: + """Test RestHostTokenResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestHostTokenResponse` + """ + model = RestHostTokenResponse() + if include_optional: + return RestHostTokenResponse( + created_at = '', + id = '', + name = '', + token = '' + ) + else: + return RestHostTokenResponse( + ) + """ + + def testRestHostTokenResponse(self): + """Test RestHostTokenResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_login_request.py b/sdk/fluid-py/test/test_rest_login_request.py new file mode 100644 index 00000000..5429f5af --- /dev/null +++ b/sdk/fluid-py/test/test_rest_login_request.py @@ -0,0 +1,54 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_login_request import RestLoginRequest + + +class TestRestLoginRequest(unittest.TestCase): + """RestLoginRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestLoginRequest: + """Test RestLoginRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestLoginRequest` + """ + model = RestLoginRequest() + if include_optional: + return RestLoginRequest( + email = '', + password = '' + ) + else: + return RestLoginRequest( + ) + """ + + def testRestLoginRequest(self): + """Test RestLoginRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_member_response.py b/sdk/fluid-py/test/test_rest_member_response.py new file mode 100644 index 00000000..271277fb --- /dev/null +++ b/sdk/fluid-py/test/test_rest_member_response.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_member_response import RestMemberResponse + + +class TestRestMemberResponse(unittest.TestCase): + """RestMemberResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestMemberResponse: + """Test RestMemberResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestMemberResponse` + """ + model = RestMemberResponse() + if include_optional: + return RestMemberResponse( + created_at = '', + id = '', + role = '', + user_id = '' + ) + else: + return RestMemberResponse( + ) + """ + + def testRestMemberResponse(self): + """Test RestMemberResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_org_response.py b/sdk/fluid-py/test/test_rest_org_response.py new file mode 100644 index 00000000..438490a9 --- /dev/null +++ b/sdk/fluid-py/test/test_rest_org_response.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_org_response import RestOrgResponse + + +class TestRestOrgResponse(unittest.TestCase): + """RestOrgResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestOrgResponse: + """Test RestOrgResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestOrgResponse` + """ + model = RestOrgResponse() + if include_optional: + return RestOrgResponse( + created_at = '', + id = '', + name = '', + owner_id = '', + slug = '', + stripe_customer_id = '' + ) + else: + return RestOrgResponse( + ) + """ + + def testRestOrgResponse(self): + """Test RestOrgResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_register_request.py b/sdk/fluid-py/test/test_rest_register_request.py new file mode 100644 index 00000000..3fe6cc2f --- /dev/null +++ b/sdk/fluid-py/test/test_rest_register_request.py @@ -0,0 +1,55 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_register_request import RestRegisterRequest + + +class TestRestRegisterRequest(unittest.TestCase): + """RestRegisterRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestRegisterRequest: + """Test RestRegisterRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestRegisterRequest` + """ + model = RestRegisterRequest() + if include_optional: + return RestRegisterRequest( + display_name = '', + email = '', + password = '' + ) + else: + return RestRegisterRequest( + ) + """ + + def testRestRegisterRequest(self): + """Test RestRegisterRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_swagger_error.py b/sdk/fluid-py/test/test_rest_swagger_error.py new file mode 100644 index 00000000..9e400be5 --- /dev/null +++ b/sdk/fluid-py/test/test_rest_swagger_error.py @@ -0,0 +1,55 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_swagger_error import RestSwaggerError + + +class TestRestSwaggerError(unittest.TestCase): + """RestSwaggerError unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestSwaggerError: + """Test RestSwaggerError + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestSwaggerError` + """ + model = RestSwaggerError() + if include_optional: + return RestSwaggerError( + code = 56, + details = '', + error = '' + ) + else: + return RestSwaggerError( + ) + """ + + def testRestSwaggerError(self): + """Test RestSwaggerError""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_update_org_request.py b/sdk/fluid-py/test/test_rest_update_org_request.py new file mode 100644 index 00000000..42fd0463 --- /dev/null +++ b/sdk/fluid-py/test/test_rest_update_org_request.py @@ -0,0 +1,53 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_update_org_request import RestUpdateOrgRequest + + +class TestRestUpdateOrgRequest(unittest.TestCase): + """RestUpdateOrgRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestUpdateOrgRequest: + """Test RestUpdateOrgRequest + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestUpdateOrgRequest` + """ + model = RestUpdateOrgRequest() + if include_optional: + return RestUpdateOrgRequest( + name = '' + ) + else: + return RestUpdateOrgRequest( + ) + """ + + def testRestUpdateOrgRequest(self): + """Test RestUpdateOrgRequest""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_usage_summary.py b/sdk/fluid-py/test/test_rest_usage_summary.py new file mode 100644 index 00000000..d98ec733 --- /dev/null +++ b/sdk/fluid-py/test/test_rest_usage_summary.py @@ -0,0 +1,55 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_usage_summary import RestUsageSummary + + +class TestRestUsageSummary(unittest.TestCase): + """RestUsageSummary unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestUsageSummary: + """Test RestUsageSummary + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestUsageSummary` + """ + model = RestUsageSummary() + if include_optional: + return RestUsageSummary( + agent_hosts = 1.337, + sandbox_hours = 1.337, + source_vms = 1.337 + ) + else: + return RestUsageSummary( + ) + """ + + def testRestUsageSummary(self): + """Test RestUsageSummary""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_rest_user_response.py b/sdk/fluid-py/test/test_rest_user_response.py new file mode 100644 index 00000000..5af2d835 --- /dev/null +++ b/sdk/fluid-py/test/test_rest_user_response.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.rest_user_response import RestUserResponse + + +class TestRestUserResponse(unittest.TestCase): + """RestUserResponse unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> RestUserResponse: + """Test RestUserResponse + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `RestUserResponse` + """ + model = RestUserResponse() + if include_optional: + return RestUserResponse( + avatar_url = '', + display_name = '', + email = '', + email_verified = True, + id = '' + ) + else: + return RestUserResponse( + ) + """ + + def testRestUserResponse(self): + """Test RestUserResponse""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_sandboxes_api.py b/sdk/fluid-py/test/test_sandboxes_api.py new file mode 100644 index 00000000..3c983de0 --- /dev/null +++ b/sdk/fluid-py/test/test_sandboxes_api.py @@ -0,0 +1,101 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.api.sandboxes_api import SandboxesApi + + +class TestSandboxesApi(unittest.TestCase): + """SandboxesApi unit test stubs""" + + def setUp(self) -> None: + self.api = SandboxesApi() + + def tearDown(self) -> None: + pass + + def test_orgs_slug_sandboxes_get(self) -> None: + """Test case for orgs_slug_sandboxes_get + + List sandboxes + """ + pass + + def test_orgs_slug_sandboxes_post(self) -> None: + """Test case for orgs_slug_sandboxes_post + + Create sandbox + """ + pass + + def test_orgs_slug_sandboxes_sandbox_id_commands_get(self) -> None: + """Test case for orgs_slug_sandboxes_sandbox_id_commands_get + + List commands + """ + pass + + def test_orgs_slug_sandboxes_sandbox_id_delete(self) -> None: + """Test case for orgs_slug_sandboxes_sandbox_id_delete + + Destroy sandbox + """ + pass + + def test_orgs_slug_sandboxes_sandbox_id_get(self) -> None: + """Test case for orgs_slug_sandboxes_sandbox_id_get + + Get sandbox + """ + pass + + def test_orgs_slug_sandboxes_sandbox_id_run_post(self) -> None: + """Test case for orgs_slug_sandboxes_sandbox_id_run_post + + Run command + """ + pass + + def test_orgs_slug_sandboxes_sandbox_id_snapshot_post(self) -> None: + """Test case for orgs_slug_sandboxes_sandbox_id_snapshot_post + + Create snapshot + """ + pass + + def test_orgs_slug_sandboxes_sandbox_id_start_post(self) -> None: + """Test case for orgs_slug_sandboxes_sandbox_id_start_post + + Start sandbox + """ + pass + + def test_orgs_slug_sandboxes_sandbox_id_stop_post(self) -> None: + """Test case for orgs_slug_sandboxes_sandbox_id_stop_post + + Stop sandbox + """ + pass + + def test_orgs_slug_sandboxes_sandbox_idip_get(self) -> None: + """Test case for orgs_slug_sandboxes_sandbox_idip_get + + Get sandbox IP + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_source_vms_api.py b/sdk/fluid-py/test/test_source_vms_api.py new file mode 100644 index 00000000..8d2fb4df --- /dev/null +++ b/sdk/fluid-py/test/test_source_vms_api.py @@ -0,0 +1,59 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.api.source_vms_api import SourceVMsApi + + +class TestSourceVMsApi(unittest.TestCase): + """SourceVMsApi unit test stubs""" + + def setUp(self) -> None: + self.api = SourceVMsApi() + + def tearDown(self) -> None: + pass + + def test_orgs_slug_sources_vm_prepare_post(self) -> None: + """Test case for orgs_slug_sources_vm_prepare_post + + Prepare source VM + """ + pass + + def test_orgs_slug_sources_vm_read_post(self) -> None: + """Test case for orgs_slug_sources_vm_read_post + + Read source file + """ + pass + + def test_orgs_slug_sources_vm_run_post(self) -> None: + """Test case for orgs_slug_sources_vm_run_post + + Run source command + """ + pass + + def test_orgs_slug_vms_get(self) -> None: + """Test case for orgs_slug_vms_get + + List source VMs + """ + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_store_command.py b/sdk/fluid-py/test/test_store_command.py new file mode 100644 index 00000000..882f88ba --- /dev/null +++ b/sdk/fluid-py/test/test_store_command.py @@ -0,0 +1,61 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.store_command import StoreCommand + + +class TestStoreCommand(unittest.TestCase): + """StoreCommand unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> StoreCommand: + """Test StoreCommand + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `StoreCommand` + """ + model = StoreCommand() + if include_optional: + return StoreCommand( + command = '', + duration_ms = 56, + ended_at = '', + exit_code = 56, + id = '', + sandbox_id = '', + started_at = '', + stderr = '', + stdout = '' + ) + else: + return StoreCommand( + ) + """ + + def testStoreCommand(self): + """Test StoreCommand""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_store_sandbox.py b/sdk/fluid-py/test/test_store_sandbox.py new file mode 100644 index 00000000..ea4712c4 --- /dev/null +++ b/sdk/fluid-py/test/test_store_sandbox.py @@ -0,0 +1,70 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.store_sandbox import StoreSandbox + + +class TestStoreSandbox(unittest.TestCase): + """StoreSandbox unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> StoreSandbox: + """Test StoreSandbox + include_optional is a boolean, when False only required + params are included, when True both required and + optional params are included""" + # uncomment below to create an instance of `StoreSandbox` + """ + model = StoreSandbox() + if include_optional: + return StoreSandbox( + agent_id = '', + base_image = '', + bridge = '', + created_at = '', + deleted_at = '', + host_id = '', + id = '', + ip_address = '', + mac_address = '', + memory_mb = 56, + name = '', + org_id = '', + source_vm = '', + state = 'CREATING', + tap_device = '', + ttl_seconds = 56, + updated_at = '', + vcpus = 56 + ) + else: + return StoreSandbox( + ) + """ + + def testStoreSandbox(self): + """Test StoreSandbox""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/fluid-py/test/test_store_sandbox_state.py b/sdk/fluid-py/test/test_store_sandbox_state.py new file mode 100644 index 00000000..659dcec4 --- /dev/null +++ b/sdk/fluid-py/test/test_store_sandbox_state.py @@ -0,0 +1,35 @@ +# coding: utf-8 + +""" + Fluid API + + API for managing sandboxes, organizations, billing, and hosts + + The version of the OpenAPI document: 1.0 + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest + +from fluid.models.store_sandbox_state import StoreSandboxState + + +class TestStoreSandboxState(unittest.TestCase): + """StoreSandboxState unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testStoreSandboxState(self): + """Test StoreSandboxState""" + # inst = StoreSandboxState() + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/scripts/generate.sh b/sdk/scripts/generate.sh index ef950adc..c780c48d 100755 --- a/sdk/scripts/generate.sh +++ b/sdk/scripts/generate.sh @@ -10,7 +10,7 @@ docker run --rm \ --user "$(id -u):$(id -g)" \ -v ${PWD}/..:/local \ openapitools/openapi-generator-cli generate --skip-validate-spec \ - -i /local/fluid-remote/docs/openapi.yaml \ + -i /local/api/docs/openapi.yaml \ -g python \ -o /local/sdk/fluid-py/ \ -c /local/sdk/.openapi-generator/config.yaml \ diff --git a/web/.gitignore b/web/.gitignore index 5df4b9bc..de0226fb 100644 --- a/web/.gitignore +++ b/web/.gitignore @@ -24,3 +24,4 @@ dist-ssr *.sln *.sw? .eslintcache +.env diff --git a/web/AGENTS.md b/web/AGENTS.md index 49fe87b7..6abbe684 100644 --- a/web/AGENTS.md +++ b/web/AGENTS.md @@ -116,7 +116,7 @@ web/ ## Development Workflow -1. Start the backend services (API at `:8080`, tmux-client at `:8081`) +1. Start the backend services (API at `:8080`) 2. Run `bun run dev` to start the frontend dev server 3. Changes to source files trigger hot module replacement 4. Run `bun run generate-api` after backend API changes to update types diff --git a/web/Dockerfile b/web/Dockerfile index c3d2bebe..e375d0a8 100644 --- a/web/Dockerfile +++ b/web/Dockerfile @@ -1,19 +1,12 @@ -FROM oven/bun:latest - -# Set the working directory +FROM oven/bun:latest AS build WORKDIR /app - -# Copy package files and lock file COPY package.json bun.lock ./ - -# Install dependencies -RUN bun install - -# Copy the rest of the application code +RUN bun install --frozen-lockfile COPY . . +RUN bun run build -# Expose the port Vite uses (default 5173) -EXPOSE 5173 - -# Run the start command (adjust if you add a start script) -CMD ["bun", "run", "start"] +FROM nginx:alpine +COPY --from=build /app/dist /usr/share/nginx/html +COPY nginx.conf /etc/nginx/templates/default.conf.template +ENV NGINX_ENVSUBST_FILTER=API_URL +EXPOSE 80 diff --git a/web/bun.lock b/web/bun.lock index 4e9cf62e..a3733765 100644 --- a/web/bun.lock +++ b/web/bun.lock @@ -2,35 +2,44 @@ "lockfileVersion": 1, "workspaces": { "": { - "name": "virsh-sandbox-frontend", + "name": "fluid-web", "dependencies": { "@base-ui/react": "^1.0.0", "@faker-js/faker": "^10.1.0", "@fontsource-variable/jetbrains-mono": "^5.2.8", + "@mdx-js/rollup": "^3.1.1", "@tailwindcss/vite": "^4.1.18", "@tanstack/react-form": "^1.27.6", "@tanstack/react-query": "^5.90.12", "@tanstack/react-router": "^1.143.4", "@tanstack/react-table": "^8.21.3", "@tanstack/router-devtools": "^1.143.4", + "@xterm/addon-fit": "^0.11.0", + "@xterm/xterm": "^6.0.0", "axios": "^1.13.2", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", + "framer-motion": "^12.34.0", "lucide-react": "^0.562.0", "next-themes": "^0.4.6", + "posthog-js": "^1.351.1", "radix-ui": "^1.4.3", "react": "^19.2.0", "react-dom": "^19.2.0", + "react-intersection-observer": "^10.0.2", "shadcn": "^3.6.2", + "shiki": "^3.22.0", "sonner": "^2.0.7", "tailwind-merge": "^3.4.0", "tailwindcss": "^4.1.18", "tw-animate-css": "^1.4.0", + "yaml": "^2.8.2", "zod": "^4.2.1", }, "devDependencies": { "@eslint/js": "^9.39.1", "@tanstack/router-plugin": "^1.143.4", + "@types/mdx": "^2.0.13", "@types/node": "^25.0.3", "@types/react": "^19.2.5", "@types/react-dom": "^19.2.3", @@ -42,6 +51,9 @@ "orval": "^7.17.2", "prettier": "^3.7.4", "prettier-plugin-tailwindcss": "^0.7.2", + "remark-frontmatter": "^5.0.0", + "remark-gfm": "^4.0.1", + "remark-mdx-frontmatter": "^5.2.0", "typescript": "~5.9.3", "typescript-eslint": "^8.46.4", "vite": "npm:rolldown-vite@7.2.5", @@ -52,1910 +64,11872 @@ "vite": "npm:rolldown-vite@7.2.5", }, "packages": { - "@antfu/ni": ["@antfu/ni@25.0.0", "", { "dependencies": { "ansis": "^4.0.0", "fzf": "^0.5.2", "package-manager-detector": "^1.3.0", "tinyexec": "^1.0.1" }, "bin": { "na": "bin/na.mjs", "ni": "bin/ni.mjs", "nr": "bin/nr.mjs", "nci": "bin/nci.mjs", "nlx": "bin/nlx.mjs", "nun": "bin/nun.mjs", "nup": "bin/nup.mjs" } }, "sha512-9q/yCljni37pkMr4sPrI3G4jqdIk074+iukc5aFJl7kmDCCsiJrbZ6zKxnES1Gwg+i9RcDZwvktl23puGslmvA=="], - - "@apidevtools/json-schema-ref-parser": ["@apidevtools/json-schema-ref-parser@14.0.1", "", { "dependencies": { "@types/json-schema": "^7.0.15", "js-yaml": "^4.1.0" } }, "sha512-Oc96zvmxx1fqoSEdUmfmvvb59/KDOnUoJ7s2t7bISyAn0XEz57LCCw8k2Y4Pf3mwKaZLMciESALORLgfe2frCw=="], - - "@apidevtools/openapi-schemas": ["@apidevtools/openapi-schemas@2.1.0", "", {}, "sha512-Zc1AlqrJlX3SlpupFGpiLi2EbteyP7fXmUOGup6/DnkRgjP9bgMM/ag+n91rsv0U1Gpz0H3VILA/o3bW7Ua6BQ=="], - - "@apidevtools/swagger-methods": ["@apidevtools/swagger-methods@3.0.2", "", {}, "sha512-QAkD5kK2b1WfjDS/UQn/qQkbwF31uqRjPTrsCs5ZG9BQGAkjwvqGFjjPqAuzac/IYzpPtRzjCP1WrTuAIjMrXg=="], - - "@apidevtools/swagger-parser": ["@apidevtools/swagger-parser@12.1.0", "", { "dependencies": { "@apidevtools/json-schema-ref-parser": "14.0.1", "@apidevtools/openapi-schemas": "^2.1.0", "@apidevtools/swagger-methods": "^3.0.2", "ajv": "^8.17.1", "ajv-draft-04": "^1.0.0", "call-me-maybe": "^1.0.2" }, "peerDependencies": { "openapi-types": ">=7" } }, "sha512-e5mJoswsnAX0jG+J09xHFYQXb/bUc5S3pLpMxUuRUA2H8T2kni3yEoyz2R3Dltw5f4A6j6rPNMpWTK+iVDFlng=="], - - "@asyncapi/specs": ["@asyncapi/specs@6.10.0", "", { "dependencies": { "@types/json-schema": "^7.0.11" } }, "sha512-vB5oKLsdrLUORIZ5BXortZTlVyGWWMC1Nud/0LtgxQ3Yn2738HigAD6EVqScvpPsDUI/bcLVsYEXN4dtXQHVng=="], - - "@babel/code-frame": ["@babel/code-frame@7.27.1", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.27.1", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg=="], - - "@babel/compat-data": ["@babel/compat-data@7.28.5", "", {}, "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA=="], - - "@babel/core": ["@babel/core@7.28.5", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.5", "@babel/helper-compilation-targets": "^7.27.2", "@babel/helper-module-transforms": "^7.28.3", "@babel/helpers": "^7.28.4", "@babel/parser": "^7.28.5", "@babel/template": "^7.27.2", "@babel/traverse": "^7.28.5", "@babel/types": "^7.28.5", "@jridgewell/remapping": "^2.3.5", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.3", "semver": "^6.3.1" } }, "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw=="], - - "@babel/generator": ["@babel/generator@7.28.5", "", { "dependencies": { "@babel/parser": "^7.28.5", "@babel/types": "^7.28.5", "@jridgewell/gen-mapping": "^0.3.12", "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" } }, "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ=="], - - "@babel/helper-annotate-as-pure": ["@babel/helper-annotate-as-pure@7.27.3", "", { "dependencies": { "@babel/types": "^7.27.3" } }, "sha512-fXSwMQqitTGeHLBC08Eq5yXz2m37E4pJX1qAU1+2cNedz/ifv/bVXft90VeSav5nFO61EcNgwr0aJxbyPaWBPg=="], - - "@babel/helper-compilation-targets": ["@babel/helper-compilation-targets@7.27.2", "", { "dependencies": { "@babel/compat-data": "^7.27.2", "@babel/helper-validator-option": "^7.27.1", "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" } }, "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ=="], - - "@babel/helper-create-class-features-plugin": ["@babel/helper-create-class-features-plugin@7.28.5", "", { "dependencies": { "@babel/helper-annotate-as-pure": "^7.27.3", "@babel/helper-member-expression-to-functions": "^7.28.5", "@babel/helper-optimise-call-expression": "^7.27.1", "@babel/helper-replace-supers": "^7.27.1", "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", "@babel/traverse": "^7.28.5", "semver": "^6.3.1" }, "peerDependencies": { "@babel/core": "^7.0.0" } }, "sha512-q3WC4JfdODypvxArsJQROfupPBq9+lMwjKq7C33GhbFYJsufD0yd/ziwD+hJucLeWsnFPWZjsU2DNFqBPE7jwQ=="], - - "@babel/helper-globals": ["@babel/helper-globals@7.28.0", "", {}, "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw=="], - - "@babel/helper-member-expression-to-functions": ["@babel/helper-member-expression-to-functions@7.28.5", "", { "dependencies": { "@babel/traverse": "^7.28.5", "@babel/types": "^7.28.5" } }, "sha512-cwM7SBRZcPCLgl8a7cY0soT1SptSzAlMH39vwiRpOQkJlh53r5hdHwLSCZpQdVLT39sZt+CRpNwYG4Y2v77atg=="], - - "@babel/helper-module-imports": ["@babel/helper-module-imports@7.27.1", "", { "dependencies": { "@babel/traverse": "^7.27.1", "@babel/types": "^7.27.1" } }, "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w=="], - - "@babel/helper-module-transforms": ["@babel/helper-module-transforms@7.28.3", "", { "dependencies": { "@babel/helper-module-imports": "^7.27.1", "@babel/helper-validator-identifier": "^7.27.1", "@babel/traverse": "^7.28.3" }, "peerDependencies": { "@babel/core": "^7.0.0" } }, "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw=="], - - "@babel/helper-optimise-call-expression": ["@babel/helper-optimise-call-expression@7.27.1", "", { "dependencies": { "@babel/types": "^7.27.1" } }, "sha512-URMGH08NzYFhubNSGJrpUEphGKQwMQYBySzat5cAByY1/YgIRkULnIy3tAMeszlL/so2HbeilYloUmSpd7GdVw=="], - - "@babel/helper-plugin-utils": ["@babel/helper-plugin-utils@7.27.1", "", {}, "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw=="], - - "@babel/helper-replace-supers": ["@babel/helper-replace-supers@7.27.1", "", { "dependencies": { "@babel/helper-member-expression-to-functions": "^7.27.1", "@babel/helper-optimise-call-expression": "^7.27.1", "@babel/traverse": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0" } }, "sha512-7EHz6qDZc8RYS5ElPoShMheWvEgERonFCs7IAonWLLUTXW59DP14bCZt89/GKyreYn8g3S83m21FelHKbeDCKA=="], - - "@babel/helper-skip-transparent-expression-wrappers": ["@babel/helper-skip-transparent-expression-wrappers@7.27.1", "", { "dependencies": { "@babel/traverse": "^7.27.1", "@babel/types": "^7.27.1" } }, "sha512-Tub4ZKEXqbPjXgWLl2+3JpQAYBJ8+ikpQ2Ocj/q/r0LwE3UhENh7EUabyHjz2kCEsrRY83ew2DQdHluuiDQFzg=="], - - "@babel/helper-string-parser": ["@babel/helper-string-parser@7.27.1", "", {}, "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA=="], - - "@babel/helper-validator-identifier": ["@babel/helper-validator-identifier@7.28.5", "", {}, "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q=="], - - "@babel/helper-validator-option": ["@babel/helper-validator-option@7.27.1", "", {}, "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg=="], - - "@babel/helpers": ["@babel/helpers@7.28.4", "", { "dependencies": { "@babel/template": "^7.27.2", "@babel/types": "^7.28.4" } }, "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w=="], - - "@babel/parser": ["@babel/parser@7.28.5", "", { "dependencies": { "@babel/types": "^7.28.5" }, "bin": "./bin/babel-parser.js" }, "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ=="], - - "@babel/plugin-syntax-jsx": ["@babel/plugin-syntax-jsx@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w=="], - - "@babel/plugin-syntax-typescript": ["@babel/plugin-syntax-typescript@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ=="], - - "@babel/plugin-transform-modules-commonjs": ["@babel/plugin-transform-modules-commonjs@7.27.1", "", { "dependencies": { "@babel/helper-module-transforms": "^7.27.1", "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-OJguuwlTYlN0gBZFRPqwOGNWssZjfIUdS7HMYtN8c1KmwpwHFBwTeFZrg9XZa+DFTitWOW5iTAG7tyCUPsCCyw=="], - - "@babel/plugin-transform-react-jsx-self": ["@babel/plugin-transform-react-jsx-self@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw=="], - - "@babel/plugin-transform-react-jsx-source": ["@babel/plugin-transform-react-jsx-source@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw=="], - - "@babel/plugin-transform-typescript": ["@babel/plugin-transform-typescript@7.28.5", "", { "dependencies": { "@babel/helper-annotate-as-pure": "^7.27.3", "@babel/helper-create-class-features-plugin": "^7.28.5", "@babel/helper-plugin-utils": "^7.27.1", "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", "@babel/plugin-syntax-typescript": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-x2Qa+v/CuEoX7Dr31iAfr0IhInrVOWZU/2vJMJ00FOR/2nM0BcBEclpaf9sWCDc+v5e9dMrhSH8/atq/kX7+bA=="], - - "@babel/preset-typescript": ["@babel/preset-typescript@7.28.5", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1", "@babel/helper-validator-option": "^7.27.1", "@babel/plugin-syntax-jsx": "^7.27.1", "@babel/plugin-transform-modules-commonjs": "^7.27.1", "@babel/plugin-transform-typescript": "^7.28.5" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-+bQy5WOI2V6LJZpPVxY+yp66XdZ2yifu0Mc1aP5CQKgjn4QM5IN2i5fAZ4xKop47pr8rpVhiAeu+nDQa12C8+g=="], - - "@babel/runtime": ["@babel/runtime@7.28.4", "", {}, "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ=="], - - "@babel/template": ["@babel/template@7.27.2", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/parser": "^7.27.2", "@babel/types": "^7.27.1" } }, "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw=="], - - "@babel/traverse": ["@babel/traverse@7.28.5", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.5", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.28.5", "@babel/template": "^7.27.2", "@babel/types": "^7.28.5", "debug": "^4.3.1" } }, "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ=="], - - "@babel/types": ["@babel/types@7.28.5", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.28.5" } }, "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA=="], - - "@base-ui/react": ["@base-ui/react@1.0.0", "", { "dependencies": { "@babel/runtime": "^7.28.4", "@base-ui/utils": "0.2.3", "@floating-ui/react-dom": "^2.1.6", "@floating-ui/utils": "^0.2.10", "reselect": "^5.1.1", "tabbable": "^6.3.0", "use-sync-external-store": "^1.6.0" }, "peerDependencies": { "@types/react": "^17 || ^18 || ^19", "react": "^17 || ^18 || ^19", "react-dom": "^17 || ^18 || ^19" }, "optionalPeers": ["@types/react"] }, "sha512-4USBWz++DUSLTuIYpbYkSgy1F9ZmNG9S/lXvlUN6qMK0P0RlW+6eQmDUB4DgZ7HVvtXl4pvi4z5J2fv6Z3+9hg=="], - - "@base-ui/utils": ["@base-ui/utils@0.2.3", "", { "dependencies": { "@babel/runtime": "^7.28.4", "@floating-ui/utils": "^0.2.10", "reselect": "^5.1.1", "use-sync-external-store": "^1.6.0" }, "peerDependencies": { "@types/react": "^17 || ^18 || ^19", "react": "^17 || ^18 || ^19", "react-dom": "^17 || ^18 || ^19" }, "optionalPeers": ["@types/react"] }, "sha512-/CguQ2PDaOzeVOkllQR8nocJ0FFIDqsWIcURsVmm53QGo8NhFNpePjNlyPIB41luxfOqnG7PU0xicMEw3ls7XQ=="], - - "@commander-js/extra-typings": ["@commander-js/extra-typings@14.0.0", "", { "peerDependencies": { "commander": "~14.0.0" } }, "sha512-hIn0ncNaJRLkZrxBIp5AsW/eXEHNKYQBh0aPdoUqNgD+Io3NIykQqpKFyKcuasZhicGaEZJX/JBSIkZ4e5x8Dg=="], - - "@dotenvx/dotenvx": ["@dotenvx/dotenvx@1.51.2", "", { "dependencies": { "commander": "^11.1.0", "dotenv": "^17.2.1", "eciesjs": "^0.4.10", "execa": "^5.1.1", "fdir": "^6.2.0", "ignore": "^5.3.0", "object-treeify": "1.1.33", "picomatch": "^4.0.2", "which": "^4.0.0" }, "bin": { "dotenvx": "src/cli/dotenvx.js" } }, "sha512-+693mNflujDZxudSEqSNGpn92QgFhJlBn9q2mDQ9yGWyHuz3hZ8B5g3EXCwdAz4DMJAI+OFCIbfEFZS+YRdrEA=="], - - "@ecies/ciphers": ["@ecies/ciphers@0.2.5", "", { "peerDependencies": { "@noble/ciphers": "^1.0.0" } }, "sha512-GalEZH4JgOMHYYcYmVqnFirFsjZHeoGMDt9IxEnM9F7GRUUyUksJ7Ou53L83WHJq3RWKD3AcBpo0iQh0oMpf8A=="], - - "@emnapi/core": ["@emnapi/core@1.7.1", "", { "dependencies": { "@emnapi/wasi-threads": "1.1.0", "tslib": "^2.4.0" } }, "sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg=="], - - "@emnapi/runtime": ["@emnapi/runtime@1.7.1", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA=="], - - "@emnapi/wasi-threads": ["@emnapi/wasi-threads@1.1.0", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ=="], - - "@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.25.12", "", { "os": "aix", "cpu": "ppc64" }, "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA=="], - - "@esbuild/android-arm": ["@esbuild/android-arm@0.25.12", "", { "os": "android", "cpu": "arm" }, "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg=="], - - "@esbuild/android-arm64": ["@esbuild/android-arm64@0.25.12", "", { "os": "android", "cpu": "arm64" }, "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg=="], - - "@esbuild/android-x64": ["@esbuild/android-x64@0.25.12", "", { "os": "android", "cpu": "x64" }, "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg=="], - - "@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.25.12", "", { "os": "darwin", "cpu": "arm64" }, "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg=="], - - "@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.25.12", "", { "os": "darwin", "cpu": "x64" }, "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA=="], - - "@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.25.12", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg=="], - - "@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.25.12", "", { "os": "freebsd", "cpu": "x64" }, "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ=="], - - "@esbuild/linux-arm": ["@esbuild/linux-arm@0.25.12", "", { "os": "linux", "cpu": "arm" }, "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw=="], - - "@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.25.12", "", { "os": "linux", "cpu": "arm64" }, "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ=="], - - "@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.25.12", "", { "os": "linux", "cpu": "ia32" }, "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA=="], - - "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng=="], - - "@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw=="], - - "@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.25.12", "", { "os": "linux", "cpu": "ppc64" }, "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA=="], - - "@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.25.12", "", { "os": "linux", "cpu": "none" }, "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w=="], - - "@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.25.12", "", { "os": "linux", "cpu": "s390x" }, "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg=="], - - "@esbuild/linux-x64": ["@esbuild/linux-x64@0.25.12", "", { "os": "linux", "cpu": "x64" }, "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw=="], - - "@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.25.12", "", { "os": "none", "cpu": "arm64" }, "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg=="], - - "@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.25.12", "", { "os": "none", "cpu": "x64" }, "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ=="], - - "@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.25.12", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A=="], - - "@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.25.12", "", { "os": "openbsd", "cpu": "x64" }, "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw=="], - - "@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.25.12", "", { "os": "none", "cpu": "arm64" }, "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg=="], - - "@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.25.12", "", { "os": "sunos", "cpu": "x64" }, "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w=="], - - "@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.25.12", "", { "os": "win32", "cpu": "arm64" }, "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg=="], - - "@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.25.12", "", { "os": "win32", "cpu": "ia32" }, "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ=="], - - "@esbuild/win32-x64": ["@esbuild/win32-x64@0.25.12", "", { "os": "win32", "cpu": "x64" }, "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA=="], - - "@eslint-community/eslint-utils": ["@eslint-community/eslint-utils@4.9.0", "", { "dependencies": { "eslint-visitor-keys": "^3.4.3" }, "peerDependencies": { "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" } }, "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g=="], - - "@eslint-community/regexpp": ["@eslint-community/regexpp@4.12.2", "", {}, "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew=="], - - "@eslint/config-array": ["@eslint/config-array@0.21.1", "", { "dependencies": { "@eslint/object-schema": "^2.1.7", "debug": "^4.3.1", "minimatch": "^3.1.2" } }, "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA=="], - - "@eslint/config-helpers": ["@eslint/config-helpers@0.4.2", "", { "dependencies": { "@eslint/core": "^0.17.0" } }, "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw=="], - - "@eslint/core": ["@eslint/core@0.17.0", "", { "dependencies": { "@types/json-schema": "^7.0.15" } }, "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ=="], - - "@eslint/eslintrc": ["@eslint/eslintrc@3.3.3", "", { "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", "espree": "^10.0.1", "globals": "^14.0.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", "js-yaml": "^4.1.1", "minimatch": "^3.1.2", "strip-json-comments": "^3.1.1" } }, "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ=="], - - "@eslint/js": ["@eslint/js@9.39.2", "", {}, "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA=="], - - "@eslint/object-schema": ["@eslint/object-schema@2.1.7", "", {}, "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA=="], - - "@eslint/plugin-kit": ["@eslint/plugin-kit@0.4.1", "", { "dependencies": { "@eslint/core": "^0.17.0", "levn": "^0.4.1" } }, "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA=="], - - "@exodus/schemasafe": ["@exodus/schemasafe@1.3.0", "", {}, "sha512-5Aap/GaRupgNx/feGBwLLTVv8OQFfv3pq2lPRzPg9R+IOBnDgghTGW7l7EuVXOvg5cc/xSAlRW8rBrjIC3Nvqw=="], - - "@faker-js/faker": ["@faker-js/faker@10.1.0", "", {}, "sha512-C3mrr3b5dRVlKPJdfrAXS8+dq+rq8Qm5SNRazca0JKgw1HQERFmrVb0towvMmw5uu8hHKNiQasMaR/tydf3Zsg=="], - - "@floating-ui/core": ["@floating-ui/core@1.7.3", "", { "dependencies": { "@floating-ui/utils": "^0.2.10" } }, "sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w=="], - - "@floating-ui/dom": ["@floating-ui/dom@1.7.4", "", { "dependencies": { "@floating-ui/core": "^1.7.3", "@floating-ui/utils": "^0.2.10" } }, "sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA=="], - - "@floating-ui/react-dom": ["@floating-ui/react-dom@2.1.6", "", { "dependencies": { "@floating-ui/dom": "^1.7.4" }, "peerDependencies": { "react": ">=16.8.0", "react-dom": ">=16.8.0" } }, "sha512-4JX6rEatQEvlmgU80wZyq9RT96HZJa88q8hp0pBd+LrczeDI4o6uA2M+uvxngVHo4Ihr8uibXxH6+70zhAFrVw=="], - - "@floating-ui/utils": ["@floating-ui/utils@0.2.10", "", {}, "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ=="], - - "@fontsource-variable/jetbrains-mono": ["@fontsource-variable/jetbrains-mono@5.2.8", "", {}, "sha512-WBA9elru6Jdp5df2mES55wuOO0WIrn3kpXnI4+W2ek5u3ZgLS9XS4gmIlcQhiZOWEKl95meYdvK7xI+ETLCq/Q=="], - - "@gerrit0/mini-shiki": ["@gerrit0/mini-shiki@3.20.0", "", { "dependencies": { "@shikijs/engine-oniguruma": "^3.20.0", "@shikijs/langs": "^3.20.0", "@shikijs/themes": "^3.20.0", "@shikijs/types": "^3.20.0", "@shikijs/vscode-textmate": "^10.0.2" } }, "sha512-Wa57i+bMpK6PGJZ1f2myxo3iO+K/kZikcyvH8NIqNNZhQUbDav7V9LQmWOXhf946mz5c1NZ19WMsGYiDKTryzQ=="], - - "@hono/node-server": ["@hono/node-server@1.19.7", "", { "peerDependencies": { "hono": "^4" } }, "sha512-vUcD0uauS7EU2caukW8z5lJKtoGMokxNbJtBiwHgpqxEXokaHCBkQUmCHhjFB1VUTWdqj25QoMkMKzgjq+uhrw=="], - - "@humanfs/core": ["@humanfs/core@0.19.1", "", {}, "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA=="], - - "@humanfs/node": ["@humanfs/node@0.16.7", "", { "dependencies": { "@humanfs/core": "^0.19.1", "@humanwhocodes/retry": "^0.4.0" } }, "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ=="], - - "@humanwhocodes/module-importer": ["@humanwhocodes/module-importer@1.0.1", "", {}, "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA=="], - - "@humanwhocodes/retry": ["@humanwhocodes/retry@0.4.3", "", {}, "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ=="], - - "@ibm-cloud/openapi-ruleset": ["@ibm-cloud/openapi-ruleset@1.33.5", "", { "dependencies": { "@ibm-cloud/openapi-ruleset-utilities": "1.9.0", "@stoplight/spectral-formats": "^1.8.2", "@stoplight/spectral-functions": "^1.9.3", "@stoplight/spectral-rulesets": "^1.21.3", "chalk": "^4.1.2", "inflected": "^2.1.0", "jsonschema": "^1.5.0", "lodash": "^4.17.21", "loglevel": "^1.9.2", "loglevel-plugin-prefix": "0.8.4", "minimatch": "^6.2.0", "validator": "^13.15.23" } }, "sha512-oT8USsTulFAA8FiBN0lA2rJqQI2lIt+HP2pdakGQXo3EviL2vqJTgpSCRwjl6mLJL158f1BVcdQUOEFGxomK3w=="], - - "@ibm-cloud/openapi-ruleset-utilities": ["@ibm-cloud/openapi-ruleset-utilities@1.9.0", "", {}, "sha512-AoFbSarOqFBYH+1TZ9Ahkm2IWYSi5v0pBk88fpV+5b3qGJukypX8PwvCWADjuyIccKg48/F73a6hTTkBzDQ2UA=="], - - "@inquirer/ansi": ["@inquirer/ansi@1.0.2", "", {}, "sha512-S8qNSZiYzFd0wAcyG5AXCvUHC5Sr7xpZ9wZ2py9XR88jUz8wooStVx5M6dRzczbBWjic9NP7+rY0Xi7qqK/aMQ=="], - - "@inquirer/confirm": ["@inquirer/confirm@5.1.21", "", { "dependencies": { "@inquirer/core": "^10.3.2", "@inquirer/type": "^3.0.10" }, "peerDependencies": { "@types/node": ">=18" }, "optionalPeers": ["@types/node"] }, "sha512-KR8edRkIsUayMXV+o3Gv+q4jlhENF9nMYUZs9PA2HzrXeHI8M5uDag70U7RJn9yyiMZSbtF5/UexBtAVtZGSbQ=="], - - "@inquirer/core": ["@inquirer/core@10.3.2", "", { "dependencies": { "@inquirer/ansi": "^1.0.2", "@inquirer/figures": "^1.0.15", "@inquirer/type": "^3.0.10", "cli-width": "^4.1.0", "mute-stream": "^2.0.0", "signal-exit": "^4.1.0", "wrap-ansi": "^6.2.0", "yoctocolors-cjs": "^2.1.3" }, "peerDependencies": { "@types/node": ">=18" }, "optionalPeers": ["@types/node"] }, "sha512-43RTuEbfP8MbKzedNqBrlhhNKVwoK//vUFNW3Q3vZ88BLcrs4kYpGg+B2mm5p2K/HfygoCxuKwJJiv8PbGmE0A=="], - - "@inquirer/figures": ["@inquirer/figures@1.0.15", "", {}, "sha512-t2IEY+unGHOzAaVM5Xx6DEWKeXlDDcNPeDyUpsRc6CUhBfU3VQOEl+Vssh7VNp1dR8MdUJBWhuObjXCsVpjN5g=="], - - "@inquirer/type": ["@inquirer/type@3.0.10", "", { "peerDependencies": { "@types/node": ">=18" }, "optionalPeers": ["@types/node"] }, "sha512-BvziSRxfz5Ov8ch0z/n3oijRSEcEsHnhggm4xFZe93DHcUCTlutlq9Ox4SVENAfcRD22UQq7T/atg9Wr3k09eA=="], - - "@isaacs/balanced-match": ["@isaacs/balanced-match@4.0.1", "", {}, "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ=="], - - "@isaacs/brace-expansion": ["@isaacs/brace-expansion@5.0.0", "", { "dependencies": { "@isaacs/balanced-match": "^4.0.1" } }, "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA=="], - - "@jridgewell/gen-mapping": ["@jridgewell/gen-mapping@0.3.13", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA=="], - - "@jridgewell/remapping": ["@jridgewell/remapping@2.3.5", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ=="], - - "@jridgewell/resolve-uri": ["@jridgewell/resolve-uri@3.1.2", "", {}, "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw=="], - - "@jridgewell/sourcemap-codec": ["@jridgewell/sourcemap-codec@1.5.5", "", {}, "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og=="], - - "@jridgewell/trace-mapping": ["@jridgewell/trace-mapping@0.3.31", "", { "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" } }, "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw=="], - - "@jsep-plugin/assignment": ["@jsep-plugin/assignment@1.3.0", "", { "peerDependencies": { "jsep": "^0.4.0||^1.0.0" } }, "sha512-VVgV+CXrhbMI3aSusQyclHkenWSAm95WaiKrMxRFam3JSUiIaQjoMIw2sEs/OX4XifnqeQUN4DYbJjlA8EfktQ=="], - - "@jsep-plugin/regex": ["@jsep-plugin/regex@1.0.4", "", { "peerDependencies": { "jsep": "^0.4.0||^1.0.0" } }, "sha512-q7qL4Mgjs1vByCaTnDFcBnV9HS7GVPJX5vyVoCgZHNSC9rjwIlmbXG5sUuorR5ndfHAIlJ8pVStxvjXHbNvtUg=="], - - "@jsep-plugin/ternary": ["@jsep-plugin/ternary@1.1.4", "", { "peerDependencies": { "jsep": "^0.4.0||^1.0.0" } }, "sha512-ck5wiqIbqdMX6WRQztBL7ASDty9YLgJ3sSAK5ZpBzXeySvFGCzIvM6UiAI4hTZ22fEcYQVV/zhUbNscggW+Ukg=="], - - "@modelcontextprotocol/sdk": ["@modelcontextprotocol/sdk@1.25.1", "", { "dependencies": { "@hono/node-server": "^1.19.7", "ajv": "^8.17.1", "ajv-formats": "^3.0.1", "content-type": "^1.0.5", "cors": "^2.8.5", "cross-spawn": "^7.0.5", "eventsource": "^3.0.2", "eventsource-parser": "^3.0.0", "express": "^5.0.1", "express-rate-limit": "^7.5.0", "jose": "^6.1.1", "json-schema-typed": "^8.0.2", "pkce-challenge": "^5.0.0", "raw-body": "^3.0.0", "zod": "^3.25 || ^4.0", "zod-to-json-schema": "^3.25.0" }, "peerDependencies": { "@cfworker/json-schema": "^4.1.1" }, "optionalPeers": ["@cfworker/json-schema"] }, "sha512-yO28oVFFC7EBoiKdAn+VqRm+plcfv4v0xp6osG/VsCB0NlPZWi87ajbCZZ8f/RvOFLEu7//rSRmuZZ7lMoe3gQ=="], - - "@mswjs/interceptors": ["@mswjs/interceptors@0.40.0", "", { "dependencies": { "@open-draft/deferred-promise": "^2.2.0", "@open-draft/logger": "^0.3.0", "@open-draft/until": "^2.0.0", "is-node-process": "^1.2.0", "outvariant": "^1.4.3", "strict-event-emitter": "^0.5.1" } }, "sha512-EFd6cVbHsgLa6wa4RljGj6Wk75qoHxUSyc5asLyyPSyuhIcdS2Q3Phw6ImS1q+CkALthJRShiYfKANcQMuMqsQ=="], - - "@napi-rs/wasm-runtime": ["@napi-rs/wasm-runtime@1.1.0", "", { "dependencies": { "@emnapi/core": "^1.7.1", "@emnapi/runtime": "^1.7.1", "@tybys/wasm-util": "^0.10.1" } }, "sha512-Fq6DJW+Bb5jaWE69/qOE0D1TUN9+6uWhCeZpdnSBk14pjLcCWR7Q8n49PTSPHazM37JqrsdpEthXy2xn6jWWiA=="], - - "@noble/ciphers": ["@noble/ciphers@1.3.0", "", {}, "sha512-2I0gnIVPtfnMw9ee9h1dJG7tp81+8Ob3OJb3Mv37rx5L40/b0i7djjCVvGOVqc9AEIQyvyu1i6ypKdFw8R8gQw=="], - - "@noble/curves": ["@noble/curves@1.9.7", "", { "dependencies": { "@noble/hashes": "1.8.0" } }, "sha512-gbKGcRUYIjA3/zCCNaWDciTMFI0dCkvou3TL8Zmy5Nc7sJ47a0jtOeZoTaMxkuqRo9cRhjOdZJXegxYE5FN/xw=="], - - "@noble/hashes": ["@noble/hashes@1.8.0", "", {}, "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A=="], - - "@nodelib/fs.scandir": ["@nodelib/fs.scandir@2.1.5", "", { "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" } }, "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g=="], - - "@nodelib/fs.stat": ["@nodelib/fs.stat@2.0.5", "", {}, "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A=="], - - "@nodelib/fs.walk": ["@nodelib/fs.walk@1.2.8", "", { "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" } }, "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg=="], - - "@open-draft/deferred-promise": ["@open-draft/deferred-promise@2.2.0", "", {}, "sha512-CecwLWx3rhxVQF6V4bAgPS5t+So2sTbPgAzafKkVizyi7tlwpcFpdFqq+wqF2OwNBmqFuu6tOyouTuxgpMfzmA=="], - - "@open-draft/logger": ["@open-draft/logger@0.3.0", "", { "dependencies": { "is-node-process": "^1.2.0", "outvariant": "^1.4.0" } }, "sha512-X2g45fzhxH238HKO4xbSr7+wBS8Fvw6ixhTDuvLd5mqh6bJJCFAPwU9mPDxbcrRtfxv4u5IHCEH77BmxvXmmxQ=="], - - "@open-draft/until": ["@open-draft/until@2.1.0", "", {}, "sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg=="], - - "@orval/angular": ["@orval/angular@7.17.2", "", { "dependencies": { "@orval/core": "7.17.2" } }, "sha512-+WBILd8KJD/I8eyX2eyTmzhyY+oTvbCTV25YoF2/UzWaBIq0reIhe5i2J2vUBXFnhZDLfwJYn70v341y7NSPlQ=="], - - "@orval/axios": ["@orval/axios@7.17.2", "", { "dependencies": { "@orval/core": "7.17.2" } }, "sha512-9wNxnEtNSI4S22V5ta4f5jaFAfrJ5PEcQwALP6ogsBxT0lUhzHWhRGWaopA2SJLW8eCGfPVo4fF/x/2i9ecG8w=="], - - "@orval/core": ["@orval/core@7.17.2", "", { "dependencies": { "@apidevtools/swagger-parser": "^12.1.0", "@ibm-cloud/openapi-ruleset": "^1.33.1", "@stoplight/spectral-core": "^1.20.0", "acorn": "^8.15.0", "chalk": "^4.1.2", "compare-versions": "^6.1.1", "debug": "^4.4.3", "esbuild": "^0.25.11", "esutils": "2.0.3", "fs-extra": "^11.3.1", "globby": "11.1.0", "lodash.isempty": "^4.4.0", "lodash.uniq": "^4.5.0", "lodash.uniqby": "^4.7.0", "lodash.uniqwith": "^4.5.0", "micromatch": "^4.0.8", "openapi3-ts": "4.5.0", "swagger2openapi": "^7.0.8", "typedoc": "^0.28.14" } }, "sha512-7kv7JgC6Va9hE/OiYTaJEU52Uy52y8Ill26QZufgP9yeMT60h3yiycF//LSQp83P+fGzcS9PLuCS4uRPmrMtgA=="], - - "@orval/fetch": ["@orval/fetch@7.17.2", "", { "dependencies": { "@orval/core": "7.17.2", "openapi3-ts": "4.5.0" } }, "sha512-gBgk4POAacKs4c7VKCQPTkdLWvxF2s1p/EBKT1N6h2QKUxZcO7MbV141FQXFghFGFYgpuNiUTlox4BbSeCVrXw=="], - - "@orval/hono": ["@orval/hono@7.17.2", "", { "dependencies": { "@orval/core": "7.17.2", "@orval/zod": "7.17.2", "fs-extra": "^11.3.2", "lodash.uniq": "^4.5.0", "openapi3-ts": "4.5.0" } }, "sha512-YSTDYPysnD5LyugRKGSU4kZEMgK0HeLeZ7r1X/4u6U5FrRdhmjf+gjnCseBR61yKwS/8BLMogIe7qw8uOs83Hg=="], - - "@orval/mcp": ["@orval/mcp@7.17.2", "", { "dependencies": { "@orval/core": "7.17.2", "@orval/fetch": "7.17.2", "@orval/zod": "7.17.2", "openapi3-ts": "4.5.0" } }, "sha512-7aJcZMSh2ZPHdcgz0JdilVTLcaMbPeEHJgpJTvSUZembgQvBwJYfalaaROfBmRH9nSfHP11DYmsuG5kTIfd3JA=="], - - "@orval/mock": ["@orval/mock@7.17.2", "", { "dependencies": { "@orval/core": "7.17.2", "openapi3-ts": "4.5.0" } }, "sha512-otVzA/yR2lKQJRSIeVyuh+tl/uE/EJQ82oJg9Zu11S70QzxMTCg/IEe8fbTeE3APtoujhACB6aZ/qqbmK29ugw=="], - - "@orval/query": ["@orval/query@7.17.2", "", { "dependencies": { "@orval/core": "7.17.2", "@orval/fetch": "7.17.2", "chalk": "^4.1.2", "lodash.omitby": "^4.6.0" } }, "sha512-dbD3U6CuRBYszMQOlitO4MryLnrlZIHwjNGsree0l5WO25e9QrfEMPxFU9wVaLnP/w10ODMBsBIBNUmy5ifXxQ=="], - - "@orval/swr": ["@orval/swr@7.17.2", "", { "dependencies": { "@orval/core": "7.17.2", "@orval/fetch": "7.17.2" } }, "sha512-BWN7E41JEfr2qVFi+sNwPH3jXJgL4BiIAJTqZraZW0S89EmI4kU2yh3YveNmeopXai7mssaiiAlE8qe4wFcWnw=="], - - "@orval/zod": ["@orval/zod@7.17.2", "", { "dependencies": { "@orval/core": "7.17.2", "lodash.uniq": "^4.5.0", "openapi3-ts": "4.5.0" } }, "sha512-Zf2sAQAGZ99VzummF4/Tq5pbXe6qR+w37XmLX5bMokshT8lEuV9goT4gftW4AWGY3Azrp/Syj3fdbSBthN/KPw=="], - - "@oxc-project/runtime": ["@oxc-project/runtime@0.97.0", "", {}, "sha512-yH0zw7z+jEws4dZ4IUKoix5Lh3yhqIJWF9Dc8PWvhpo7U7O+lJrv7ZZL4BeRO0la8LBQFwcCewtLBnVV7hPe/w=="], - - "@oxc-project/types": ["@oxc-project/types@0.97.0", "", {}, "sha512-lxmZK4xFrdvU0yZiDwgVQTCvh2gHWBJCBk5ALsrtsBWhs0uDIi+FTOnXRQeQfs304imdvTdaakT/lqwQ8hkOXQ=="], - - "@radix-ui/number": ["@radix-ui/number@1.1.1", "", {}, "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g=="], - - "@radix-ui/primitive": ["@radix-ui/primitive@1.1.3", "", {}, "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg=="], - - "@radix-ui/react-accessible-icon": ["@radix-ui/react-accessible-icon@1.1.7", "", { "dependencies": { "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-XM+E4WXl0OqUJFovy6GjmxxFyx9opfCAIUku4dlKRd5YEPqt4kALOkQOp0Of6reHuUkJuiPBEc5k0o4z4lTC8A=="], - - "@radix-ui/react-accordion": ["@radix-ui/react-accordion@1.2.12", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collapsible": "1.1.12", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-T4nygeh9YE9dLRPhAHSeOZi7HBXo+0kYIPJXayZfvWOWA0+n3dESrZbjfDPUABkUNym6Hd+f2IR113To8D2GPA=="], - - "@radix-ui/react-alert-dialog": ["@radix-ui/react-alert-dialog@1.1.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dialog": "1.1.15", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-oTVLkEw5GpdRe29BqJ0LSDFWI3qu0vR1M0mUkOQWDIUnY/QIkLpgDMWuKxP94c2NAC2LGcgVhG1ImF3jkZ5wXw=="], - - "@radix-ui/react-arrow": ["@radix-ui/react-arrow@1.1.7", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w=="], - - "@radix-ui/react-aspect-ratio": ["@radix-ui/react-aspect-ratio@1.1.7", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Yq6lvO9HQyPwev1onK1daHCHqXVLzPhSVjmsNjCa2Zcxy2f7uJD2itDtxknv6FzAKCwD1qQkeVDmX/cev13n/g=="], - - "@radix-ui/react-avatar": ["@radix-ui/react-avatar@1.1.10", "", { "dependencies": { "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-is-hydrated": "0.1.0", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-V8piFfWapM5OmNCXTzVQY+E1rDa53zY+MQ4Y7356v4fFz6vqCyUtIz2rUD44ZEdwg78/jKmMJHj07+C/Z/rcog=="], - - "@radix-ui/react-checkbox": ["@radix-ui/react-checkbox@1.3.3", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-wBbpv+NQftHDdG86Qc0pIyXk5IR3tM8Vd0nWLKDcX8nNn4nXFOFwsKuqw2okA/1D/mpaAkmuyndrPJTYDNZtFw=="], - - "@radix-ui/react-collapsible": ["@radix-ui/react-collapsible@1.1.12", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Uu+mSh4agx2ib1uIGPP4/CKNULyajb3p92LsVXmH2EHVMTfZWpll88XJ0j4W0z3f8NK1eYl1+Mf/szHPmcHzyA=="], - - "@radix-ui/react-collection": ["@radix-ui/react-collection@1.1.7", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw=="], - - "@radix-ui/react-compose-refs": ["@radix-ui/react-compose-refs@1.1.2", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg=="], - - "@radix-ui/react-context": ["@radix-ui/react-context@1.1.2", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA=="], - - "@radix-ui/react-context-menu": ["@radix-ui/react-context-menu@2.2.16", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-menu": "2.1.16", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-O8morBEW+HsVG28gYDZPTrT9UUovQUlJue5YO836tiTJhuIWBm/zQHc7j388sHWtdH/xUZurK9olD2+pcqx5ww=="], - - "@radix-ui/react-dialog": ["@radix-ui/react-dialog@1.1.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-focus-guards": "1.1.3", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw=="], - - "@radix-ui/react-direction": ["@radix-ui/react-direction@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw=="], - - "@radix-ui/react-dismissable-layer": ["@radix-ui/react-dismissable-layer@1.1.11", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-escape-keydown": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg=="], - - "@radix-ui/react-dropdown-menu": ["@radix-ui/react-dropdown-menu@2.1.16", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-menu": "2.1.16", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-1PLGQEynI/3OX/ftV54COn+3Sud/Mn8vALg2rWnBLnRaGtJDduNW/22XjlGgPdpcIbiQxjKtb7BkcjP00nqfJw=="], - - "@radix-ui/react-focus-guards": ["@radix-ui/react-focus-guards@1.1.3", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw=="], - - "@radix-ui/react-focus-scope": ["@radix-ui/react-focus-scope@1.1.7", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw=="], - - "@radix-ui/react-form": ["@radix-ui/react-form@0.1.8", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-label": "2.1.7", "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-QM70k4Zwjttifr5a4sZFts9fn8FzHYvQ5PiB19O2HsYibaHSVt9fH9rzB0XZo/YcM+b7t/p7lYCT/F5eOeF5yQ=="], - - "@radix-ui/react-hover-card": ["@radix-ui/react-hover-card@1.1.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-popper": "1.2.8", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-qgTkjNT1CfKMoP0rcasmlH2r1DAiYicWsDsufxl940sT2wHNEWWv6FMWIQXWhVdmC1d/HYfbhQx60KYyAtKxjg=="], - - "@radix-ui/react-id": ["@radix-ui/react-id@1.1.1", "", { "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg=="], - - "@radix-ui/react-label": ["@radix-ui/react-label@2.1.7", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-YT1GqPSL8kJn20djelMX7/cTRp/Y9w5IZHvfxQTVHrOqa2yMl7i/UfMqKRU5V7mEyKTrUVgJXhNQPVCG8PBLoQ=="], - - "@radix-ui/react-menu": ["@radix-ui/react-menu@2.1.16", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-focus-guards": "1.1.3", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.8", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.11", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-callback-ref": "1.1.1", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-72F2T+PLlphrqLcAotYPp0uJMr5SjP5SL01wfEspJbru5Zs5vQaSHb4VB3ZMJPimgHHCHG7gMOeOB9H3Hdmtxg=="], - - "@radix-ui/react-menubar": ["@radix-ui/react-menubar@1.1.16", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-menu": "2.1.16", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.11", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-EB1FktTz5xRRi2Er974AUQZWg2yVBb1yjip38/lgwtCVRd3a+maUoGHN/xs9Yv8SY8QwbSEb+YrxGadVWbEutA=="], - - "@radix-ui/react-navigation-menu": ["@radix-ui/react-navigation-menu@1.2.14", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-YB9mTFQvCOAQMHU+C/jVl96WmuWeltyUEpRJJky51huhds5W2FQr1J8D/16sQlf0ozxkPK8uF3niQMdUwZPv5w=="], - - "@radix-ui/react-one-time-password-field": ["@radix-ui/react-one-time-password-field@0.1.8", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.11", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-is-hydrated": "0.1.0", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-ycS4rbwURavDPVjCb5iS3aG4lURFDILi6sKI/WITUMZ13gMmn/xGjpLoqBAalhJaDk8I3UbCM5GzKHrnzwHbvg=="], - - "@radix-ui/react-password-toggle-field": ["@radix-ui/react-password-toggle-field@0.1.3", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-is-hydrated": "0.1.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-/UuCrDBWravcaMix4TdT+qlNdVwOM1Nck9kWx/vafXsdfj1ChfhOdfi3cy9SGBpWgTXwYCuboT/oYpJy3clqfw=="], - - "@radix-ui/react-popover": ["@radix-ui/react-popover@1.1.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-focus-guards": "1.1.3", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.8", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-kr0X2+6Yy/vJzLYJUPCZEc8SfQcf+1COFoAqauJm74umQhta9M7lNJHP7QQS3vkvcGLQUbWpMzwrXYwrYztHKA=="], - - "@radix-ui/react-popper": ["@radix-ui/react-popper@1.2.8", "", { "dependencies": { "@floating-ui/react-dom": "^2.0.0", "@radix-ui/react-arrow": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-rect": "1.1.1", "@radix-ui/react-use-size": "1.1.1", "@radix-ui/rect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw=="], - - "@radix-ui/react-portal": ["@radix-ui/react-portal@1.1.9", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ=="], - - "@radix-ui/react-presence": ["@radix-ui/react-presence@1.1.5", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ=="], - - "@radix-ui/react-primitive": ["@radix-ui/react-primitive@2.1.3", "", { "dependencies": { "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ=="], - - "@radix-ui/react-progress": ["@radix-ui/react-progress@1.1.7", "", { "dependencies": { "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-vPdg/tF6YC/ynuBIJlk1mm7Le0VgW6ub6J2UWnTQ7/D23KXcPI1qy+0vBkgKgd38RCMJavBXpB83HPNFMTb0Fg=="], - - "@radix-ui/react-radio-group": ["@radix-ui/react-radio-group@1.3.8", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.11", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-VBKYIYImA5zsxACdisNQ3BjCBfmbGH3kQlnFVqlWU4tXwjy7cGX8ta80BcrO+WJXIn5iBylEH3K6ZTlee//lgQ=="], - - "@radix-ui/react-roving-focus": ["@radix-ui/react-roving-focus@1.1.11", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA=="], - - "@radix-ui/react-scroll-area": ["@radix-ui/react-scroll-area@1.2.10", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-tAXIa1g3sM5CGpVT0uIbUx/U3Gs5N8T52IICuCtObaos1S8fzsrPXG5WObkQN3S6NVl6wKgPhAIiBGbWnvc97A=="], - - "@radix-ui/react-select": ["@radix-ui/react-select@2.2.6", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-focus-guards": "1.1.3", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.8", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-I30RydO+bnn2PQztvo25tswPH+wFBjehVGtmagkU78yMdwTwVf12wnAOF+AeP8S2N8xD+5UPbGhkUfPyvT+mwQ=="], - - "@radix-ui/react-separator": ["@radix-ui/react-separator@1.1.7", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-0HEb8R9E8A+jZjvmFCy/J4xhbXy3TV+9XSnGJ3KvTtjlIUy/YQ/p6UYZvi7YbeoeXdyU9+Y3scizK6hkY37baA=="], - - "@radix-ui/react-slider": ["@radix-ui/react-slider@1.3.6", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-JPYb1GuM1bxfjMRlNLE+BcmBC8onfCi60Blk7OBqi2MLTFdS+8401U4uFjnwkOr49BLmXxLC6JHkvAsx5OJvHw=="], - - "@radix-ui/react-slot": ["@radix-ui/react-slot@1.2.3", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A=="], - - "@radix-ui/react-switch": ["@radix-ui/react-switch@1.2.6", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-bByzr1+ep1zk4VubeEVViV592vu2lHE2BZY5OnzehZqOOgogN80+mNtCqPkhn2gklJqOpxWgPoYTSnhBCqpOXQ=="], - - "@radix-ui/react-tabs": ["@radix-ui/react-tabs@1.1.13", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.11", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-7xdcatg7/U+7+Udyoj2zodtI9H/IIopqo+YOIcZOq1nJwXWBZ9p8xiu5llXlekDbZkca79a/fozEYQXIA4sW6A=="], - - "@radix-ui/react-toast": ["@radix-ui/react-toast@1.2.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-3OSz3TacUWy4WtOXV38DggwxoqJK4+eDkNMl5Z/MJZaoUPaP4/9lf81xXMe1I2ReTAptverZUpbPY4wWwWyL5g=="], - - "@radix-ui/react-toggle": ["@radix-ui/react-toggle@1.1.10", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-lS1odchhFTeZv3xwHH31YPObmJn8gOg7Lq12inrr0+BH/l3Tsq32VfjqH1oh80ARM3mlkfMic15n0kg4sD1poQ=="], - - "@radix-ui/react-toggle-group": ["@radix-ui/react-toggle-group@1.1.11", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.11", "@radix-ui/react-toggle": "1.1.10", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-5umnS0T8JQzQT6HbPyO7Hh9dgd82NmS36DQr+X/YJ9ctFNCiiQd6IJAYYZ33LUwm8M+taCz5t2ui29fHZc4Y6Q=="], - - "@radix-ui/react-toolbar": ["@radix-ui/react-toolbar@1.1.11", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.11", "@radix-ui/react-separator": "1.1.7", "@radix-ui/react-toggle-group": "1.1.11" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-4ol06/1bLoFu1nwUqzdD4Y5RZ9oDdKeiHIsntug54Hcr1pgaHiPqHFEaXI1IFP/EsOfROQZ8Mig9VTIRza6Tjg=="], - - "@radix-ui/react-tooltip": ["@radix-ui/react-tooltip@1.2.8", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.8", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-tY7sVt1yL9ozIxvmbtN5qtmH2krXcBCfjEiCgKGLqunJHvgvZG2Pcl2oQ3kbcZARb1BGEHdkLzcYGO8ynVlieg=="], - - "@radix-ui/react-use-callback-ref": ["@radix-ui/react-use-callback-ref@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg=="], - - "@radix-ui/react-use-controllable-state": ["@radix-ui/react-use-controllable-state@1.2.2", "", { "dependencies": { "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg=="], - - "@radix-ui/react-use-effect-event": ["@radix-ui/react-use-effect-event@0.0.2", "", { "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA=="], - - "@radix-ui/react-use-escape-keydown": ["@radix-ui/react-use-escape-keydown@1.1.1", "", { "dependencies": { "@radix-ui/react-use-callback-ref": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g=="], - - "@radix-ui/react-use-is-hydrated": ["@radix-ui/react-use-is-hydrated@0.1.0", "", { "dependencies": { "use-sync-external-store": "^1.5.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-U+UORVEq+cTnRIaostJv9AGdV3G6Y+zbVd+12e18jQ5A3c0xL03IhnHuiU4UV69wolOQp5GfR58NW/EgdQhwOA=="], - - "@radix-ui/react-use-layout-effect": ["@radix-ui/react-use-layout-effect@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ=="], - - "@radix-ui/react-use-previous": ["@radix-ui/react-use-previous@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ=="], - - "@radix-ui/react-use-rect": ["@radix-ui/react-use-rect@1.1.1", "", { "dependencies": { "@radix-ui/rect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w=="], - - "@radix-ui/react-use-size": ["@radix-ui/react-use-size@1.1.1", "", { "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ=="], - - "@radix-ui/react-visually-hidden": ["@radix-ui/react-visually-hidden@1.2.3", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug=="], - - "@radix-ui/rect": ["@radix-ui/rect@1.1.1", "", {}, "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw=="], - - "@rolldown/binding-android-arm64": ["@rolldown/binding-android-arm64@1.0.0-beta.50", "", { "os": "android", "cpu": "arm64" }, "sha512-XlEkrOIHLyGT3avOgzfTFSjG+f+dZMw+/qd+Y3HLN86wlndrB/gSimrJCk4gOhr1XtRtEKfszpadI3Md4Z4/Ag=="], - - "@rolldown/binding-darwin-arm64": ["@rolldown/binding-darwin-arm64@1.0.0-beta.50", "", { "os": "darwin", "cpu": "arm64" }, "sha512-+JRqKJhoFlt5r9q+DecAGPLZ5PxeLva+wCMtAuoFMWPoZzgcYrr599KQ+Ix0jwll4B4HGP43avu9My8KtSOR+w=="], - - "@rolldown/binding-darwin-x64": ["@rolldown/binding-darwin-x64@1.0.0-beta.50", "", { "os": "darwin", "cpu": "x64" }, "sha512-fFXDjXnuX7/gQZQm/1FoivVtRcyAzdjSik7Eo+9iwPQ9EgtA5/nB2+jmbzaKtMGG3q+BnZbdKHCtOacmNrkIDA=="], - - "@rolldown/binding-freebsd-x64": ["@rolldown/binding-freebsd-x64@1.0.0-beta.50", "", { "os": "freebsd", "cpu": "x64" }, "sha512-F1b6vARy49tjmT/hbloplzgJS7GIvwWZqt+tAHEstCh0JIh9sa8FAMVqEmYxDviqKBaAI8iVvUREm/Kh/PD26Q=="], - - "@rolldown/binding-linux-arm-gnueabihf": ["@rolldown/binding-linux-arm-gnueabihf@1.0.0-beta.50", "", { "os": "linux", "cpu": "arm" }, "sha512-U6cR76N8T8M6lHj7EZrQ3xunLPxSvYYxA8vJsBKZiFZkT8YV4kjgCO3KwMJL0NOjQCPGKyiXO07U+KmJzdPGRw=="], - - "@rolldown/binding-linux-arm64-gnu": ["@rolldown/binding-linux-arm64-gnu@1.0.0-beta.50", "", { "os": "linux", "cpu": "arm64" }, "sha512-ONgyjofCrrE3bnh5GZb8EINSFyR/hmwTzZ7oVuyUB170lboza1VMCnb8jgE6MsyyRgHYmN8Lb59i3NKGrxrYjw=="], - - "@rolldown/binding-linux-arm64-musl": ["@rolldown/binding-linux-arm64-musl@1.0.0-beta.50", "", { "os": "linux", "cpu": "arm64" }, "sha512-L0zRdH2oDPkmB+wvuTl+dJbXCsx62SkqcEqdM+79LOcB+PxbAxxjzHU14BuZIQdXcAVDzfpMfaHWzZuwhhBTcw=="], - - "@rolldown/binding-linux-x64-gnu": ["@rolldown/binding-linux-x64-gnu@1.0.0-beta.50", "", { "os": "linux", "cpu": "x64" }, "sha512-gyoI8o/TGpQd3OzkJnh1M2kxy1Bisg8qJ5Gci0sXm9yLFzEXIFdtc4EAzepxGvrT2ri99ar5rdsmNG0zP0SbIg=="], - - "@rolldown/binding-linux-x64-musl": ["@rolldown/binding-linux-x64-musl@1.0.0-beta.50", "", { "os": "linux", "cpu": "x64" }, "sha512-zti8A7M+xFDpKlghpcCAzyOi+e5nfUl3QhU023ce5NCgUxRG5zGP2GR9LTydQ1rnIPwZUVBWd4o7NjZDaQxaXA=="], - - "@rolldown/binding-openharmony-arm64": ["@rolldown/binding-openharmony-arm64@1.0.0-beta.50", "", { "os": "none", "cpu": "arm64" }, "sha512-eZUssog7qljrrRU9Mi0eqYEPm3Ch0UwB+qlWPMKSUXHNqhm3TvDZarJQdTevGEfu3EHAXJvBIe0YFYr0TPVaMA=="], - - "@rolldown/binding-wasm32-wasi": ["@rolldown/binding-wasm32-wasi@1.0.0-beta.50", "", { "dependencies": { "@napi-rs/wasm-runtime": "^1.0.7" }, "cpu": "none" }, "sha512-nmCN0nIdeUnmgeDXiQ+2HU6FT162o+rxnF7WMkBm4M5Ds8qTU7Dzv2Wrf22bo4ftnlrb2hKK6FSwAJSAe2FWLg=="], - - "@rolldown/binding-win32-arm64-msvc": ["@rolldown/binding-win32-arm64-msvc@1.0.0-beta.50", "", { "os": "win32", "cpu": "arm64" }, "sha512-7kcNLi7Ua59JTTLvbe1dYb028QEPaJPJQHqkmSZ5q3tJueUeb6yjRtx8mw4uIqgWZcnQHAR3PrLN4XRJxvgIkA=="], - - "@rolldown/binding-win32-ia32-msvc": ["@rolldown/binding-win32-ia32-msvc@1.0.0-beta.50", "", { "os": "win32", "cpu": "ia32" }, "sha512-lL70VTNvSCdSZkDPPVMwWn/M2yQiYvSoXw9hTLgdIWdUfC3g72UaruezusR6ceRuwHCY1Ayu2LtKqXkBO5LIwg=="], - - "@rolldown/binding-win32-x64-msvc": ["@rolldown/binding-win32-x64-msvc@1.0.0-beta.50", "", { "os": "win32", "cpu": "x64" }, "sha512-4qU4x5DXWB4JPjyTne/wBNPqkbQU8J45bl21geERBKtEittleonioACBL1R0PsBu0Aq21SwMK5a9zdBkWSlQtQ=="], - - "@rolldown/pluginutils": ["@rolldown/pluginutils@1.0.0-beta.53", "", {}, "sha512-vENRlFU4YbrwVqNDZ7fLvy+JR1CRkyr01jhSiDpE1u6py3OMzQfztQU2jxykW3ALNxO4kSlqIDeYyD0Y9RcQeQ=="], - - "@sec-ant/readable-stream": ["@sec-ant/readable-stream@0.4.1", "", {}, "sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg=="], - - "@shikijs/engine-oniguruma": ["@shikijs/engine-oniguruma@3.20.0", "", { "dependencies": { "@shikijs/types": "3.20.0", "@shikijs/vscode-textmate": "^10.0.2" } }, "sha512-Yx3gy7xLzM0ZOjqoxciHjA7dAt5tyzJE3L4uQoM83agahy+PlW244XJSrmJRSBvGYELDhYXPacD4R/cauV5bzQ=="], - - "@shikijs/langs": ["@shikijs/langs@3.20.0", "", { "dependencies": { "@shikijs/types": "3.20.0" } }, "sha512-le+bssCxcSHrygCWuOrYJHvjus6zhQ2K7q/0mgjiffRbkhM4o1EWu2m+29l0yEsHDbWaWPNnDUTRVVBvBBeKaA=="], - - "@shikijs/themes": ["@shikijs/themes@3.20.0", "", { "dependencies": { "@shikijs/types": "3.20.0" } }, "sha512-U1NSU7Sl26Q7ErRvJUouArxfM2euWqq1xaSrbqMu2iqa+tSp0D1Yah8216sDYbdDHw4C8b75UpE65eWorm2erQ=="], - - "@shikijs/types": ["@shikijs/types@3.20.0", "", { "dependencies": { "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-lhYAATn10nkZcBQ0BlzSbJA3wcmL5MXUUF8d2Zzon6saZDlToKaiRX60n2+ZaHJCmXEcZRWNzn+k9vplr8Jhsw=="], - - "@shikijs/vscode-textmate": ["@shikijs/vscode-textmate@10.0.2", "", {}, "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg=="], - - "@sindresorhus/merge-streams": ["@sindresorhus/merge-streams@4.0.0", "", {}, "sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ=="], - - "@stoplight/better-ajv-errors": ["@stoplight/better-ajv-errors@1.0.3", "", { "dependencies": { "jsonpointer": "^5.0.0", "leven": "^3.1.0" }, "peerDependencies": { "ajv": ">=8" } }, "sha512-0p9uXkuB22qGdNfy3VeEhxkU5uwvp/KrBTAbrLBURv6ilxIVwanKwjMc41lQfIVgPGcOkmLbTolfFrSsueu7zA=="], - - "@stoplight/json": ["@stoplight/json@3.21.7", "", { "dependencies": { "@stoplight/ordered-object-literal": "^1.0.3", "@stoplight/path": "^1.3.2", "@stoplight/types": "^13.6.0", "jsonc-parser": "~2.2.1", "lodash": "^4.17.21", "safe-stable-stringify": "^1.1" } }, "sha512-xcJXgKFqv/uCEgtGlPxy3tPA+4I+ZI4vAuMJ885+ThkTHFVkC+0Fm58lA9NlsyjnkpxFh4YiQWpH+KefHdbA0A=="], - - "@stoplight/json-ref-readers": ["@stoplight/json-ref-readers@1.2.2", "", { "dependencies": { "node-fetch": "^2.6.0", "tslib": "^1.14.1" } }, "sha512-nty0tHUq2f1IKuFYsLM4CXLZGHdMn+X/IwEUIpeSOXt0QjMUbL0Em57iJUDzz+2MkWG83smIigNZ3fauGjqgdQ=="], - - "@stoplight/json-ref-resolver": ["@stoplight/json-ref-resolver@3.1.6", "", { "dependencies": { "@stoplight/json": "^3.21.0", "@stoplight/path": "^1.3.2", "@stoplight/types": "^12.3.0 || ^13.0.0", "@types/urijs": "^1.19.19", "dependency-graph": "~0.11.0", "fast-memoize": "^2.5.2", "immer": "^9.0.6", "lodash": "^4.17.21", "tslib": "^2.6.0", "urijs": "^1.19.11" } }, "sha512-YNcWv3R3n3U6iQYBsFOiWSuRGE5su1tJSiX6pAPRVk7dP0L7lqCteXGzuVRQ0gMZqUl8v1P0+fAKxF6PLo9B5A=="], - - "@stoplight/ordered-object-literal": ["@stoplight/ordered-object-literal@1.0.5", "", {}, "sha512-COTiuCU5bgMUtbIFBuyyh2/yVVzlr5Om0v5utQDgBCuQUOPgU1DwoffkTfg4UBQOvByi5foF4w4T+H9CoRe5wg=="], - - "@stoplight/path": ["@stoplight/path@1.3.2", "", {}, "sha512-lyIc6JUlUA8Ve5ELywPC8I2Sdnh1zc1zmbYgVarhXIp9YeAB0ReeqmGEOWNtlHkbP2DAA1AL65Wfn2ncjK/jtQ=="], - - "@stoplight/spectral-core": ["@stoplight/spectral-core@1.20.0", "", { "dependencies": { "@stoplight/better-ajv-errors": "1.0.3", "@stoplight/json": "~3.21.0", "@stoplight/path": "1.3.2", "@stoplight/spectral-parsers": "^1.0.0", "@stoplight/spectral-ref-resolver": "^1.0.4", "@stoplight/spectral-runtime": "^1.1.2", "@stoplight/types": "~13.6.0", "@types/es-aggregate-error": "^1.0.2", "@types/json-schema": "^7.0.11", "ajv": "^8.17.1", "ajv-errors": "~3.0.0", "ajv-formats": "~2.1.1", "es-aggregate-error": "^1.0.7", "jsonpath-plus": "^10.3.0", "lodash": "~4.17.21", "lodash.topath": "^4.5.2", "minimatch": "3.1.2", "nimma": "0.2.3", "pony-cause": "^1.1.1", "simple-eval": "1.0.1", "tslib": "^2.8.1" } }, "sha512-5hBP81nCC1zn1hJXL/uxPNRKNcB+/pEIHgCjPRpl/w/qy9yC9ver04tw1W0l/PMiv0UeB5dYgozXVQ4j5a6QQQ=="], - - "@stoplight/spectral-formats": ["@stoplight/spectral-formats@1.8.2", "", { "dependencies": { "@stoplight/json": "^3.17.0", "@stoplight/spectral-core": "^1.19.2", "@types/json-schema": "^7.0.7", "tslib": "^2.8.1" } }, "sha512-c06HB+rOKfe7tuxg0IdKDEA5XnjL2vrn/m/OVIIxtINtBzphZrOgtRn7epQ5bQF5SWp84Ue7UJWaGgDwVngMFw=="], - - "@stoplight/spectral-functions": ["@stoplight/spectral-functions@1.10.1", "", { "dependencies": { "@stoplight/better-ajv-errors": "1.0.3", "@stoplight/json": "^3.17.1", "@stoplight/spectral-core": "^1.19.4", "@stoplight/spectral-formats": "^1.8.1", "@stoplight/spectral-runtime": "^1.1.2", "ajv": "^8.17.1", "ajv-draft-04": "~1.0.0", "ajv-errors": "~3.0.0", "ajv-formats": "~2.1.1", "lodash": "~4.17.21", "tslib": "^2.8.1" } }, "sha512-obu8ZfoHxELOapfGsCJixKZXZcffjg+lSoNuttpmUFuDzVLT3VmH8QkPXfOGOL5Pz80BR35ClNAToDkdnYIURg=="], - - "@stoplight/spectral-parsers": ["@stoplight/spectral-parsers@1.0.5", "", { "dependencies": { "@stoplight/json": "~3.21.0", "@stoplight/types": "^14.1.1", "@stoplight/yaml": "~4.3.0", "tslib": "^2.8.1" } }, "sha512-ANDTp2IHWGvsQDAY85/jQi9ZrF4mRrA5bciNHX+PUxPr4DwS6iv4h+FVWJMVwcEYdpyoIdyL+SRmHdJfQEPmwQ=="], - - "@stoplight/spectral-ref-resolver": ["@stoplight/spectral-ref-resolver@1.0.5", "", { "dependencies": { "@stoplight/json-ref-readers": "1.2.2", "@stoplight/json-ref-resolver": "~3.1.6", "@stoplight/spectral-runtime": "^1.1.2", "dependency-graph": "0.11.0", "tslib": "^2.8.1" } }, "sha512-gj3TieX5a9zMW29z3mBlAtDOCgN3GEc1VgZnCVlr5irmR4Qi5LuECuFItAq4pTn5Zu+sW5bqutsCH7D4PkpyAA=="], - - "@stoplight/spectral-rulesets": ["@stoplight/spectral-rulesets@1.22.0", "", { "dependencies": { "@asyncapi/specs": "^6.8.0", "@stoplight/better-ajv-errors": "1.0.3", "@stoplight/json": "^3.17.0", "@stoplight/spectral-core": "^1.19.4", "@stoplight/spectral-formats": "^1.8.1", "@stoplight/spectral-functions": "^1.9.1", "@stoplight/spectral-runtime": "^1.1.2", "@stoplight/types": "^13.6.0", "@types/json-schema": "^7.0.7", "ajv": "^8.17.1", "ajv-formats": "~2.1.1", "json-schema-traverse": "^1.0.0", "leven": "3.1.0", "lodash": "~4.17.21", "tslib": "^2.8.1" } }, "sha512-l2EY2jiKKLsvnPfGy+pXC0LeGsbJzcQP5G/AojHgf+cwN//VYxW1Wvv4WKFx/CLmLxc42mJYF2juwWofjWYNIQ=="], - - "@stoplight/spectral-runtime": ["@stoplight/spectral-runtime@1.1.4", "", { "dependencies": { "@stoplight/json": "^3.20.1", "@stoplight/path": "^1.3.2", "@stoplight/types": "^13.6.0", "abort-controller": "^3.0.0", "lodash": "^4.17.21", "node-fetch": "^2.7.0", "tslib": "^2.8.1" } }, "sha512-YHbhX3dqW0do6DhiPSgSGQzr6yQLlWybhKwWx0cqxjMwxej3TqLv3BXMfIUYFKKUqIwH4Q2mV8rrMM8qD2N0rQ=="], - - "@stoplight/types": ["@stoplight/types@13.6.0", "", { "dependencies": { "@types/json-schema": "^7.0.4", "utility-types": "^3.10.0" } }, "sha512-dzyuzvUjv3m1wmhPfq82lCVYGcXG0xUYgqnWfCq3PCVR4BKFhjdkHrnJ+jIDoMKvXb05AZP/ObQF6+NpDo29IQ=="], - - "@stoplight/yaml": ["@stoplight/yaml@4.3.0", "", { "dependencies": { "@stoplight/ordered-object-literal": "^1.0.5", "@stoplight/types": "^14.1.1", "@stoplight/yaml-ast-parser": "0.0.50", "tslib": "^2.2.0" } }, "sha512-JZlVFE6/dYpP9tQmV0/ADfn32L9uFarHWxfcRhReKUnljz1ZiUM5zpX+PH8h5CJs6lao3TuFqnPm9IJJCEkE2w=="], - - "@stoplight/yaml-ast-parser": ["@stoplight/yaml-ast-parser@0.0.50", "", {}, "sha512-Pb6M8TDO9DtSVla9yXSTAxmo9GVEouq5P40DWXdOie69bXogZTkgvopCq+yEvTMA0F6PEvdJmbtTV3ccIp11VQ=="], - - "@tailwindcss/node": ["@tailwindcss/node@4.1.18", "", { "dependencies": { "@jridgewell/remapping": "^2.3.4", "enhanced-resolve": "^5.18.3", "jiti": "^2.6.1", "lightningcss": "1.30.2", "magic-string": "^0.30.21", "source-map-js": "^1.2.1", "tailwindcss": "4.1.18" } }, "sha512-DoR7U1P7iYhw16qJ49fgXUlry1t4CpXeErJHnQ44JgTSKMaZUdf17cfn5mHchfJ4KRBZRFA/Coo+MUF5+gOaCQ=="], - - "@tailwindcss/oxide": ["@tailwindcss/oxide@4.1.18", "", { "optionalDependencies": { "@tailwindcss/oxide-android-arm64": "4.1.18", "@tailwindcss/oxide-darwin-arm64": "4.1.18", "@tailwindcss/oxide-darwin-x64": "4.1.18", "@tailwindcss/oxide-freebsd-x64": "4.1.18", "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.18", "@tailwindcss/oxide-linux-arm64-gnu": "4.1.18", "@tailwindcss/oxide-linux-arm64-musl": "4.1.18", "@tailwindcss/oxide-linux-x64-gnu": "4.1.18", "@tailwindcss/oxide-linux-x64-musl": "4.1.18", "@tailwindcss/oxide-wasm32-wasi": "4.1.18", "@tailwindcss/oxide-win32-arm64-msvc": "4.1.18", "@tailwindcss/oxide-win32-x64-msvc": "4.1.18" } }, "sha512-EgCR5tTS5bUSKQgzeMClT6iCY3ToqE1y+ZB0AKldj809QXk1Y+3jB0upOYZrn9aGIzPtUsP7sX4QQ4XtjBB95A=="], - - "@tailwindcss/oxide-android-arm64": ["@tailwindcss/oxide-android-arm64@4.1.18", "", { "os": "android", "cpu": "arm64" }, "sha512-dJHz7+Ugr9U/diKJA0W6N/6/cjI+ZTAoxPf9Iz9BFRF2GzEX8IvXxFIi/dZBloVJX/MZGvRuFA9rqwdiIEZQ0Q=="], - - "@tailwindcss/oxide-darwin-arm64": ["@tailwindcss/oxide-darwin-arm64@4.1.18", "", { "os": "darwin", "cpu": "arm64" }, "sha512-Gc2q4Qhs660bhjyBSKgq6BYvwDz4G+BuyJ5H1xfhmDR3D8HnHCmT/BSkvSL0vQLy/nkMLY20PQ2OoYMO15Jd0A=="], - - "@tailwindcss/oxide-darwin-x64": ["@tailwindcss/oxide-darwin-x64@4.1.18", "", { "os": "darwin", "cpu": "x64" }, "sha512-FL5oxr2xQsFrc3X9o1fjHKBYBMD1QZNyc1Xzw/h5Qu4XnEBi3dZn96HcHm41c/euGV+GRiXFfh2hUCyKi/e+yw=="], - - "@tailwindcss/oxide-freebsd-x64": ["@tailwindcss/oxide-freebsd-x64@4.1.18", "", { "os": "freebsd", "cpu": "x64" }, "sha512-Fj+RHgu5bDodmV1dM9yAxlfJwkkWvLiRjbhuO2LEtwtlYlBgiAT4x/j5wQr1tC3SANAgD+0YcmWVrj8R9trVMA=="], - - "@tailwindcss/oxide-linux-arm-gnueabihf": ["@tailwindcss/oxide-linux-arm-gnueabihf@4.1.18", "", { "os": "linux", "cpu": "arm" }, "sha512-Fp+Wzk/Ws4dZn+LV2Nqx3IilnhH51YZoRaYHQsVq3RQvEl+71VGKFpkfHrLM/Li+kt5c0DJe/bHXK1eHgDmdiA=="], - - "@tailwindcss/oxide-linux-arm64-gnu": ["@tailwindcss/oxide-linux-arm64-gnu@4.1.18", "", { "os": "linux", "cpu": "arm64" }, "sha512-S0n3jboLysNbh55Vrt7pk9wgpyTTPD0fdQeh7wQfMqLPM/Hrxi+dVsLsPrycQjGKEQk85Kgbx+6+QnYNiHalnw=="], - - "@tailwindcss/oxide-linux-arm64-musl": ["@tailwindcss/oxide-linux-arm64-musl@4.1.18", "", { "os": "linux", "cpu": "arm64" }, "sha512-1px92582HkPQlaaCkdRcio71p8bc8i/ap5807tPRDK/uw953cauQBT8c5tVGkOwrHMfc2Yh6UuxaH4vtTjGvHg=="], - - "@tailwindcss/oxide-linux-x64-gnu": ["@tailwindcss/oxide-linux-x64-gnu@4.1.18", "", { "os": "linux", "cpu": "x64" }, "sha512-v3gyT0ivkfBLoZGF9LyHmts0Isc8jHZyVcbzio6Wpzifg/+5ZJpDiRiUhDLkcr7f/r38SWNe7ucxmGW3j3Kb/g=="], - - "@tailwindcss/oxide-linux-x64-musl": ["@tailwindcss/oxide-linux-x64-musl@4.1.18", "", { "os": "linux", "cpu": "x64" }, "sha512-bhJ2y2OQNlcRwwgOAGMY0xTFStt4/wyU6pvI6LSuZpRgKQwxTec0/3Scu91O8ir7qCR3AuepQKLU/kX99FouqQ=="], - - "@tailwindcss/oxide-wasm32-wasi": ["@tailwindcss/oxide-wasm32-wasi@4.1.18", "", { "dependencies": { "@emnapi/core": "^1.7.1", "@emnapi/runtime": "^1.7.1", "@emnapi/wasi-threads": "^1.1.0", "@napi-rs/wasm-runtime": "^1.1.0", "@tybys/wasm-util": "^0.10.1", "tslib": "^2.4.0" }, "cpu": "none" }, "sha512-LffYTvPjODiP6PT16oNeUQJzNVyJl1cjIebq/rWWBF+3eDst5JGEFSc5cWxyRCJ0Mxl+KyIkqRxk1XPEs9x8TA=="], - - "@tailwindcss/oxide-win32-arm64-msvc": ["@tailwindcss/oxide-win32-arm64-msvc@4.1.18", "", { "os": "win32", "cpu": "arm64" }, "sha512-HjSA7mr9HmC8fu6bdsZvZ+dhjyGCLdotjVOgLA2vEqxEBZaQo9YTX4kwgEvPCpRh8o4uWc4J/wEoFzhEmjvPbA=="], - - "@tailwindcss/oxide-win32-x64-msvc": ["@tailwindcss/oxide-win32-x64-msvc@4.1.18", "", { "os": "win32", "cpu": "x64" }, "sha512-bJWbyYpUlqamC8dpR7pfjA0I7vdF6t5VpUGMWRkXVE3AXgIZjYUYAK7II1GNaxR8J1SSrSrppRar8G++JekE3Q=="], - - "@tailwindcss/vite": ["@tailwindcss/vite@4.1.18", "", { "dependencies": { "@tailwindcss/node": "4.1.18", "@tailwindcss/oxide": "4.1.18", "tailwindcss": "4.1.18" }, "peerDependencies": { "vite": "^5.2.0 || ^6 || ^7" } }, "sha512-jVA+/UpKL1vRLg6Hkao5jldawNmRo7mQYrZtNHMIVpLfLhDml5nMRUo/8MwoX2vNXvnaXNNMedrMfMugAVX1nA=="], - - "@tanstack/devtools-event-client": ["@tanstack/devtools-event-client@0.4.0", "", {}, "sha512-RPfGuk2bDZgcu9bAJodvO2lnZeHuz4/71HjZ0bGb/SPg8+lyTA+RLSKQvo7fSmPSi8/vcH3aKQ8EM9ywf1olaw=="], - - "@tanstack/form-core": ["@tanstack/form-core@1.27.6", "", { "dependencies": { "@tanstack/devtools-event-client": "^0.4.0", "@tanstack/pacer-lite": "^0.1.1", "@tanstack/store": "^0.7.7" } }, "sha512-1C4PUpOcCpivddKxtAeqdeqncxnPKiPpTVDRknDExCba+6zCsAjxgL+p3qYA3hu+EFyUAdW71rU+uqYbEa7qqA=="], - - "@tanstack/history": ["@tanstack/history@1.141.0", "", {}, "sha512-LS54XNyxyTs5m/pl1lkwlg7uZM3lvsv2FIIV1rsJgnfwVCnI+n4ZGZ2CcjNT13BPu/3hPP+iHmliBSscJxW5FQ=="], - - "@tanstack/pacer-lite": ["@tanstack/pacer-lite@0.1.1", "", {}, "sha512-y/xtNPNt/YeyoVxE/JCx+T7yjEzpezmbb+toK8DDD1P4m7Kzs5YR956+7OKexG3f8aXgC3rLZl7b1V+yNUSy5w=="], - - "@tanstack/query-core": ["@tanstack/query-core@5.90.12", "", {}, "sha512-T1/8t5DhV/SisWjDnaiU2drl6ySvsHj1bHBCWNXd+/T+Hh1cf6JodyEYMd5sgwm+b/mETT4EV3H+zCVczCU5hg=="], - - "@tanstack/react-form": ["@tanstack/react-form@1.27.6", "", { "dependencies": { "@tanstack/form-core": "1.27.6", "@tanstack/react-store": "^0.8.0" }, "peerDependencies": { "react": "^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-kq/68CKbCxK6TkFnGihtQ3qdrD5GPrVjfhkcqMFH/+X9jYOZDai52864T4997lC3nSEKFbUhkkXlaIy/wCSuNQ=="], - - "@tanstack/react-query": ["@tanstack/react-query@5.90.12", "", { "dependencies": { "@tanstack/query-core": "5.90.12" }, "peerDependencies": { "react": "^18 || ^19" } }, "sha512-graRZspg7EoEaw0a8faiUASCyJrqjKPdqJ9EwuDRUF9mEYJ1YPczI9H+/agJ0mOJkPCJDk0lsz5QTrLZ/jQ2rg=="], - - "@tanstack/react-router": ["@tanstack/react-router@1.143.4", "", { "dependencies": { "@tanstack/history": "1.141.0", "@tanstack/react-store": "^0.8.0", "@tanstack/router-core": "1.143.4", "isbot": "^5.1.22", "tiny-invariant": "^1.3.3", "tiny-warning": "^1.0.3" }, "peerDependencies": { "react": ">=18.0.0 || >=19.0.0", "react-dom": ">=18.0.0 || >=19.0.0" } }, "sha512-7Tz7YwJc8RKDQga3yNY03zNc/ey+AIDA1A5ppGYqIM+UR47uGdAKc/4MSpItznqkSUi1Csrw2nVtICSkGanKdQ=="], - - "@tanstack/react-router-devtools": ["@tanstack/react-router-devtools@1.143.4", "", { "dependencies": { "@tanstack/router-devtools-core": "1.143.4" }, "peerDependencies": { "@tanstack/react-router": "^1.143.4", "@tanstack/router-core": "^1.143.4", "react": ">=18.0.0 || >=19.0.0", "react-dom": ">=18.0.0 || >=19.0.0" }, "optionalPeers": ["@tanstack/router-core"] }, "sha512-+AKGHkC2aDL93XCWDMB9/cf8+N4awGylCK0mk0kJ5BUBVSoZpNVLtZiBFCxRuvZCQtY5PbdYT4xeUA0dbgH9Eg=="], - - "@tanstack/react-store": ["@tanstack/react-store@0.8.0", "", { "dependencies": { "@tanstack/store": "0.8.0", "use-sync-external-store": "^1.6.0" }, "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-1vG9beLIuB7q69skxK9r5xiLN3ztzIPfSQSs0GfeqWGO2tGIyInZx0x1COhpx97RKaONSoAb8C3dxacWksm1ow=="], - - "@tanstack/react-table": ["@tanstack/react-table@8.21.3", "", { "dependencies": { "@tanstack/table-core": "8.21.3" }, "peerDependencies": { "react": ">=16.8", "react-dom": ">=16.8" } }, "sha512-5nNMTSETP4ykGegmVkhjcS8tTLW6Vl4axfEGQN3v0zdHYbK4UfoqfPChclTrJ4EoK9QynqAu9oUf8VEmrpZ5Ww=="], - - "@tanstack/router-core": ["@tanstack/router-core@1.143.4", "", { "dependencies": { "@tanstack/history": "1.141.0", "@tanstack/store": "^0.8.0", "cookie-es": "^2.0.0", "seroval": "^1.4.1", "seroval-plugins": "^1.4.0", "tiny-invariant": "^1.3.3", "tiny-warning": "^1.0.3" } }, "sha512-VlSXrYQ/oBoUUGJx6t93KfzGHeBvL6GOmKRouPbHNqKi4ueVnQ2PdRX+s9eZoDAdcVsgmS7YlTCRgIbh2sAQpA=="], - - "@tanstack/router-devtools": ["@tanstack/router-devtools@1.143.4", "", { "dependencies": { "@tanstack/react-router-devtools": "1.143.4", "clsx": "^2.1.1", "goober": "^2.1.16" }, "peerDependencies": { "@tanstack/react-router": "^1.143.4", "csstype": "^3.0.10", "react": ">=18.0.0 || >=19.0.0", "react-dom": ">=18.0.0 || >=19.0.0" }, "optionalPeers": ["csstype"] }, "sha512-FycfcOodSRjc3Gx5i1rt/+gwiin+muSt6y2ZxmcsOwzg1QpdZAeELG9qZfqR6qg38nhJu9ixhmA/M4kf82cH8g=="], - - "@tanstack/router-devtools-core": ["@tanstack/router-devtools-core@1.143.4", "", { "dependencies": { "clsx": "^2.1.1", "goober": "^2.1.16", "tiny-invariant": "^1.3.3" }, "peerDependencies": { "@tanstack/router-core": "^1.143.4", "csstype": "^3.0.10", "solid-js": ">=1.9.5" }, "optionalPeers": ["csstype"] }, "sha512-f5uatl8LIlMS4O2uIQ/oh58pF62/N1qKrBPtYvc7B1Tvf16ER8Nr1t8d4a85MiQyyA4kgiqfnYryOfW+diLjwg=="], - - "@tanstack/router-generator": ["@tanstack/router-generator@1.143.4", "", { "dependencies": { "@tanstack/router-core": "1.143.4", "@tanstack/router-utils": "1.141.0", "@tanstack/virtual-file-routes": "1.141.0", "prettier": "^3.5.0", "recast": "^0.23.11", "source-map": "^0.7.4", "tsx": "^4.19.2", "zod": "^3.24.2" } }, "sha512-QBqJCNoXJQmWkoAR6VqSuA7nBUSf3y5p8t4JpbtLGUgQ7pLu03nUSjcnLqN84BednhpZXnh/Mw3jxnpA//UWCQ=="], - - "@tanstack/router-plugin": ["@tanstack/router-plugin@1.143.4", "", { "dependencies": { "@babel/core": "^7.27.7", "@babel/plugin-syntax-jsx": "^7.27.1", "@babel/plugin-syntax-typescript": "^7.27.1", "@babel/template": "^7.27.2", "@babel/traverse": "^7.27.7", "@babel/types": "^7.27.7", "@tanstack/router-core": "1.143.4", "@tanstack/router-generator": "1.143.4", "@tanstack/router-utils": "1.141.0", "@tanstack/virtual-file-routes": "1.141.0", "babel-dead-code-elimination": "^1.0.10", "chokidar": "^3.6.0", "unplugin": "^2.1.2", "zod": "^3.24.2" }, "peerDependencies": { "@rsbuild/core": ">=1.0.2", "@tanstack/react-router": "^1.143.4", "vite": ">=5.0.0 || >=6.0.0 || >=7.0.0", "vite-plugin-solid": "^2.11.10", "webpack": ">=5.92.0" }, "optionalPeers": ["@rsbuild/core", "@tanstack/react-router", "vite", "vite-plugin-solid", "webpack"] }, "sha512-gjqkdAHJ8lZ1pOcK2noboyLKtbwIH59H/3/a4OQu30yNmuRnDTN75OrSBMvHvgYnXM3a0qUo9uFCphsRbS9N6g=="], - - "@tanstack/router-utils": ["@tanstack/router-utils@1.141.0", "", { "dependencies": { "@babel/core": "^7.27.4", "@babel/generator": "^7.27.5", "@babel/parser": "^7.27.5", "@babel/preset-typescript": "^7.27.1", "ansis": "^4.1.0", "diff": "^8.0.2", "pathe": "^2.0.3", "tinyglobby": "^0.2.15" } }, "sha512-/eFGKCiix1SvjxwgzrmH4pHjMiMxc+GA4nIbgEkG2RdAJqyxLcRhd7RPLG0/LZaJ7d0ad3jrtRqsHLv2152Vbw=="], - - "@tanstack/store": ["@tanstack/store@0.8.0", "", {}, "sha512-Om+BO0YfMZe//X2z0uLF2j+75nQga6TpTJgLJQBiq85aOyZNIhkCgleNcud2KQg4k4v9Y9l+Uhru3qWMPGTOzQ=="], - - "@tanstack/table-core": ["@tanstack/table-core@8.21.3", "", {}, "sha512-ldZXEhOBb8Is7xLs01fR3YEc3DERiz5silj8tnGkFZytt1abEvl/GhUmCE0PMLaMPTa3Jk4HbKmRlHmu+gCftg=="], - - "@tanstack/virtual-file-routes": ["@tanstack/virtual-file-routes@1.141.0", "", {}, "sha512-CJrWtr6L9TVzEImm9S7dQINx+xJcYP/aDkIi6gnaWtIgbZs1pnzsE0yJc2noqXZ+yAOqLx3TBGpBEs9tS0P9/A=="], - - "@ts-morph/common": ["@ts-morph/common@0.27.0", "", { "dependencies": { "fast-glob": "^3.3.3", "minimatch": "^10.0.1", "path-browserify": "^1.0.1" } }, "sha512-Wf29UqxWDpc+i61k3oIOzcUfQt79PIT9y/MWfAGlrkjg6lBC1hwDECLXPVJAhWjiGbfBCxZd65F/LIZF3+jeJQ=="], - - "@tybys/wasm-util": ["@tybys/wasm-util@0.10.1", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg=="], - - "@types/babel__core": ["@types/babel__core@7.20.5", "", { "dependencies": { "@babel/parser": "^7.20.7", "@babel/types": "^7.20.7", "@types/babel__generator": "*", "@types/babel__template": "*", "@types/babel__traverse": "*" } }, "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA=="], - - "@types/babel__generator": ["@types/babel__generator@7.27.0", "", { "dependencies": { "@babel/types": "^7.0.0" } }, "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg=="], - - "@types/babel__template": ["@types/babel__template@7.4.4", "", { "dependencies": { "@babel/parser": "^7.1.0", "@babel/types": "^7.0.0" } }, "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A=="], - - "@types/babel__traverse": ["@types/babel__traverse@7.28.0", "", { "dependencies": { "@babel/types": "^7.28.2" } }, "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q=="], - - "@types/es-aggregate-error": ["@types/es-aggregate-error@1.0.6", "", { "dependencies": { "@types/node": "*" } }, "sha512-qJ7LIFp06h1QE1aVxbVd+zJP2wdaugYXYfd6JxsyRMrYHaxb6itXPogW2tz+ylUJ1n1b+JF1PHyYCfYHm0dvUg=="], - - "@types/estree": ["@types/estree@1.0.8", "", {}, "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="], - - "@types/hast": ["@types/hast@3.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ=="], - - "@types/json-schema": ["@types/json-schema@7.0.15", "", {}, "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA=="], - - "@types/node": ["@types/node@25.0.3", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-W609buLVRVmeW693xKfzHeIV6nJGGz98uCPfeXI1ELMLXVeKYZ9m15fAMSaUPBHYLGFsVRcMmSCksQOrZV9BYA=="], - - "@types/react": ["@types/react@19.2.7", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg=="], - - "@types/react-dom": ["@types/react-dom@19.2.3", "", { "peerDependencies": { "@types/react": "^19.2.0" } }, "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ=="], - - "@types/statuses": ["@types/statuses@2.0.6", "", {}, "sha512-xMAgYwceFhRA2zY+XbEA7mxYbA093wdiW8Vu6gZPGWy9cmOyU9XesH1tNcEWsKFd5Vzrqx5T3D38PWx1FIIXkA=="], - - "@types/unist": ["@types/unist@3.0.3", "", {}, "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q=="], - - "@types/urijs": ["@types/urijs@1.19.26", "", {}, "sha512-wkXrVzX5yoqLnndOwFsieJA7oKM8cNkOKJtf/3vVGSUFkWDKZvFHpIl9Pvqb/T9UsawBBFMTTD8xu7sK5MWuvg=="], - - "@typescript-eslint/eslint-plugin": ["@typescript-eslint/eslint-plugin@8.50.1", "", { "dependencies": { "@eslint-community/regexpp": "^4.10.0", "@typescript-eslint/scope-manager": "8.50.1", "@typescript-eslint/type-utils": "8.50.1", "@typescript-eslint/utils": "8.50.1", "@typescript-eslint/visitor-keys": "8.50.1", "ignore": "^7.0.0", "natural-compare": "^1.4.0", "ts-api-utils": "^2.1.0" }, "peerDependencies": { "@typescript-eslint/parser": "^8.50.1", "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-PKhLGDq3JAg0Jk/aK890knnqduuI/Qj+udH7wCf0217IGi4gt+acgCyPVe79qoT+qKUvHMDQkwJeKW9fwl8Cyw=="], - - "@typescript-eslint/parser": ["@typescript-eslint/parser@8.50.1", "", { "dependencies": { "@typescript-eslint/scope-manager": "8.50.1", "@typescript-eslint/types": "8.50.1", "@typescript-eslint/typescript-estree": "8.50.1", "@typescript-eslint/visitor-keys": "8.50.1", "debug": "^4.3.4" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-hM5faZwg7aVNa819m/5r7D0h0c9yC4DUlWAOvHAtISdFTc8xB86VmX5Xqabrama3wIPJ/q9RbGS1worb6JfnMg=="], - - "@typescript-eslint/project-service": ["@typescript-eslint/project-service@8.50.1", "", { "dependencies": { "@typescript-eslint/tsconfig-utils": "^8.50.1", "@typescript-eslint/types": "^8.50.1", "debug": "^4.3.4" }, "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-E1ur1MCVf+YiP89+o4Les/oBAVzmSbeRB0MQLfSlYtbWU17HPxZ6Bhs5iYmKZRALvEuBoXIZMOIRRc/P++Ortg=="], - - "@typescript-eslint/scope-manager": ["@typescript-eslint/scope-manager@8.50.1", "", { "dependencies": { "@typescript-eslint/types": "8.50.1", "@typescript-eslint/visitor-keys": "8.50.1" } }, "sha512-mfRx06Myt3T4vuoHaKi8ZWNTPdzKPNBhiblze5N50//TSHOAQQevl/aolqA/BcqqbJ88GUnLqjjcBc8EWdBcVw=="], - - "@typescript-eslint/tsconfig-utils": ["@typescript-eslint/tsconfig-utils@8.50.1", "", { "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-ooHmotT/lCWLXi55G4mvaUF60aJa012QzvLK0Y+Mp4WdSt17QhMhWOaBWeGTFVkb2gDgBe19Cxy1elPXylslDw=="], - - "@typescript-eslint/type-utils": ["@typescript-eslint/type-utils@8.50.1", "", { "dependencies": { "@typescript-eslint/types": "8.50.1", "@typescript-eslint/typescript-estree": "8.50.1", "@typescript-eslint/utils": "8.50.1", "debug": "^4.3.4", "ts-api-utils": "^2.1.0" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-7J3bf022QZE42tYMO6SL+6lTPKFk/WphhRPe9Tw/el+cEwzLz1Jjz2PX3GtGQVxooLDKeMVmMt7fWpYRdG5Etg=="], - - "@typescript-eslint/types": ["@typescript-eslint/types@8.50.1", "", {}, "sha512-v5lFIS2feTkNyMhd7AucE/9j/4V9v5iIbpVRncjk/K0sQ6Sb+Np9fgYS/63n6nwqahHQvbmujeBL7mp07Q9mlA=="], - - "@typescript-eslint/typescript-estree": ["@typescript-eslint/typescript-estree@8.50.1", "", { "dependencies": { "@typescript-eslint/project-service": "8.50.1", "@typescript-eslint/tsconfig-utils": "8.50.1", "@typescript-eslint/types": "8.50.1", "@typescript-eslint/visitor-keys": "8.50.1", "debug": "^4.3.4", "minimatch": "^9.0.4", "semver": "^7.6.0", "tinyglobby": "^0.2.15", "ts-api-utils": "^2.1.0" }, "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, "sha512-woHPdW+0gj53aM+cxchymJCrh0cyS7BTIdcDxWUNsclr9VDkOSbqC13juHzxOmQ22dDkMZEpZB+3X1WpUvzgVQ=="], - - "@typescript-eslint/utils": ["@typescript-eslint/utils@8.50.1", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.7.0", "@typescript-eslint/scope-manager": "8.50.1", "@typescript-eslint/types": "8.50.1", "@typescript-eslint/typescript-estree": "8.50.1" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-lCLp8H1T9T7gPbEuJSnHwnSuO9mDf8mfK/Nion5mZmiEaQD9sWf9W4dfeFqRyqRjF06/kBuTmAqcs9sewM2NbQ=="], - - "@typescript-eslint/visitor-keys": ["@typescript-eslint/visitor-keys@8.50.1", "", { "dependencies": { "@typescript-eslint/types": "8.50.1", "eslint-visitor-keys": "^4.2.1" } }, "sha512-IrDKrw7pCRUR94zeuCSUWQ+w8JEf5ZX5jl/e6AHGSLi1/zIr0lgutfn/7JpfCey+urpgQEdrZVYzCaVVKiTwhQ=="], - - "@vitejs/plugin-react": ["@vitejs/plugin-react@5.1.2", "", { "dependencies": { "@babel/core": "^7.28.5", "@babel/plugin-transform-react-jsx-self": "^7.27.1", "@babel/plugin-transform-react-jsx-source": "^7.27.1", "@rolldown/pluginutils": "1.0.0-beta.53", "@types/babel__core": "^7.20.5", "react-refresh": "^0.18.0" }, "peerDependencies": { "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" } }, "sha512-EcA07pHJouywpzsoTUqNh5NwGayl2PPVEJKUSinGGSxFGYn+shYbqMGBg6FXDqgXum9Ou/ecb+411ssw8HImJQ=="], - - "abort-controller": ["abort-controller@3.0.0", "", { "dependencies": { "event-target-shim": "^5.0.0" } }, "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg=="], - - "accepts": ["accepts@2.0.0", "", { "dependencies": { "mime-types": "^3.0.0", "negotiator": "^1.0.0" } }, "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng=="], - - "acorn": ["acorn@8.15.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg=="], - - "acorn-jsx": ["acorn-jsx@5.3.2", "", { "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ=="], - - "agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="], - - "ajv": ["ajv@6.12.6", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g=="], - - "ajv-draft-04": ["ajv-draft-04@1.0.0", "", { "peerDependencies": { "ajv": "^8.5.0" }, "optionalPeers": ["ajv"] }, "sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw=="], - - "ajv-errors": ["ajv-errors@3.0.0", "", { "peerDependencies": { "ajv": "^8.0.1" } }, "sha512-V3wD15YHfHz6y0KdhYFjyy9vWtEVALT9UrxfN3zqlI6dMioHnJrqOYfyPKol3oqrnCM9uwkcdCwkJ0WUcbLMTQ=="], - - "ajv-formats": ["ajv-formats@3.0.1", "", { "dependencies": { "ajv": "^8.0.0" } }, "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ=="], - - "ansi-colors": ["ansi-colors@4.1.3", "", {}, "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw=="], - - "ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], - - "ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], - - "ansis": ["ansis@4.2.0", "", {}, "sha512-HqZ5rWlFjGiV0tDm3UxxgNRqsOTniqoKZu0pIAfh7TZQMGuZK+hH0drySty0si0QXj1ieop4+SkSfPZBPPkHig=="], - - "anymatch": ["anymatch@3.1.3", "", { "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" } }, "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw=="], - - "argparse": ["argparse@2.0.1", "", {}, "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="], - - "aria-hidden": ["aria-hidden@1.2.6", "", { "dependencies": { "tslib": "^2.0.0" } }, "sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA=="], - - "array-buffer-byte-length": ["array-buffer-byte-length@1.0.2", "", { "dependencies": { "call-bound": "^1.0.3", "is-array-buffer": "^3.0.5" } }, "sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw=="], - - "array-union": ["array-union@2.1.0", "", {}, "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw=="], - - "arraybuffer.prototype.slice": ["arraybuffer.prototype.slice@1.0.4", "", { "dependencies": { "array-buffer-byte-length": "^1.0.1", "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-abstract": "^1.23.5", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "is-array-buffer": "^3.0.4" } }, "sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ=="], - - "ast-types": ["ast-types@0.16.1", "", { "dependencies": { "tslib": "^2.0.1" } }, "sha512-6t10qk83GOG8p0vKmaCr8eiilZwO171AvbROMtvvNiwrTly62t+7XkA8RdIIVbpMhCASAsxgAzdRSwh6nw/5Dg=="], - - "astring": ["astring@1.9.0", "", { "bin": { "astring": "bin/astring" } }, "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg=="], - - "async-function": ["async-function@1.0.0", "", {}, "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA=="], - - "asynckit": ["asynckit@0.4.0", "", {}, "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="], - - "available-typed-arrays": ["available-typed-arrays@1.0.7", "", { "dependencies": { "possible-typed-array-names": "^1.0.0" } }, "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ=="], - - "axios": ["axios@1.13.2", "", { "dependencies": { "follow-redirects": "^1.15.6", "form-data": "^4.0.4", "proxy-from-env": "^1.1.0" } }, "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA=="], - - "babel-dead-code-elimination": ["babel-dead-code-elimination@1.0.11", "", { "dependencies": { "@babel/core": "^7.23.7", "@babel/parser": "^7.23.6", "@babel/traverse": "^7.23.7", "@babel/types": "^7.23.6" } }, "sha512-mwq3W3e/pKSI6TG8lXMiDWvEi1VXYlSBlJlB3l+I0bAb5u1RNUl88udos85eOPNK3m5EXK9uO7d2g08pesTySQ=="], - - "balanced-match": ["balanced-match@1.0.2", "", {}, "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="], - - "baseline-browser-mapping": ["baseline-browser-mapping@2.9.11", "", { "bin": { "baseline-browser-mapping": "dist/cli.js" } }, "sha512-Sg0xJUNDU1sJNGdfGWhVHX0kkZ+HWcvmVymJbj6NSgZZmW/8S9Y2HQ5euytnIgakgxN6papOAWiwDo1ctFDcoQ=="], - - "binary-extensions": ["binary-extensions@2.3.0", "", {}, "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw=="], - - "body-parser": ["body-parser@2.2.1", "", { "dependencies": { "bytes": "^3.1.2", "content-type": "^1.0.5", "debug": "^4.4.3", "http-errors": "^2.0.0", "iconv-lite": "^0.7.0", "on-finished": "^2.4.1", "qs": "^6.14.0", "raw-body": "^3.0.1", "type-is": "^2.0.1" } }, "sha512-nfDwkulwiZYQIGwxdy0RUmowMhKcFVcYXUU7m4QlKYim1rUtg83xm2yjZ40QjDuc291AJjjeSc9b++AWHSgSHw=="], - - "brace-expansion": ["brace-expansion@1.1.12", "", { "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg=="], - - "braces": ["braces@3.0.3", "", { "dependencies": { "fill-range": "^7.1.1" } }, "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA=="], - - "browserslist": ["browserslist@4.28.1", "", { "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", "electron-to-chromium": "^1.5.263", "node-releases": "^2.0.27", "update-browserslist-db": "^1.2.0" }, "bin": { "browserslist": "cli.js" } }, "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA=="], - - "bundle-name": ["bundle-name@4.1.0", "", { "dependencies": { "run-applescript": "^7.0.0" } }, "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q=="], - - "bytes": ["bytes@3.1.2", "", {}, "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="], - - "call-bind": ["call-bind@1.0.8", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.0", "es-define-property": "^1.0.0", "get-intrinsic": "^1.2.4", "set-function-length": "^1.2.2" } }, "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww=="], - - "call-bind-apply-helpers": ["call-bind-apply-helpers@1.0.2", "", { "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" } }, "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ=="], - - "call-bound": ["call-bound@1.0.4", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "get-intrinsic": "^1.3.0" } }, "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg=="], - - "call-me-maybe": ["call-me-maybe@1.0.2", "", {}, "sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ=="], - - "callsites": ["callsites@3.1.0", "", {}, "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ=="], - - "caniuse-lite": ["caniuse-lite@1.0.30001761", "", {}, "sha512-JF9ptu1vP2coz98+5051jZ4PwQgd2ni8A+gYSN7EA7dPKIMf0pDlSUxhdmVOaV3/fYK5uWBkgSXJaRLr4+3A6g=="], - - "chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], - - "chokidar": ["chokidar@3.6.0", "", { "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", "glob-parent": "~5.1.2", "is-binary-path": "~2.1.0", "is-glob": "~4.0.1", "normalize-path": "~3.0.0", "readdirp": "~3.6.0" }, "optionalDependencies": { "fsevents": "~2.3.2" } }, "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw=="], - - "class-variance-authority": ["class-variance-authority@0.7.1", "", { "dependencies": { "clsx": "^2.1.1" } }, "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg=="], - - "cli-cursor": ["cli-cursor@5.0.0", "", { "dependencies": { "restore-cursor": "^5.0.0" } }, "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw=="], - - "cli-spinners": ["cli-spinners@2.9.2", "", {}, "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg=="], - - "cli-width": ["cli-width@4.1.0", "", {}, "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ=="], - - "cliui": ["cliui@8.0.1", "", { "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" } }, "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ=="], - - "clsx": ["clsx@2.1.1", "", {}, "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA=="], - - "code-block-writer": ["code-block-writer@13.0.3", "", {}, "sha512-Oofo0pq3IKnsFtuHqSF7TqBfr71aeyZDVJ0HpmqB7FBM2qEigL0iPONSCZSO9pE9dZTAxANe5XHG9Uy0YMv8cg=="], - - "color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], - - "color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], - - "combined-stream": ["combined-stream@1.0.8", "", { "dependencies": { "delayed-stream": "~1.0.0" } }, "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg=="], - - "commander": ["commander@14.0.2", "", {}, "sha512-TywoWNNRbhoD0BXs1P3ZEScW8W5iKrnbithIl0YH+uCmBd0QpPOA8yc82DS3BIE5Ma6FnBVUsJ7wVUDz4dvOWQ=="], - - "compare-versions": ["compare-versions@6.1.1", "", {}, "sha512-4hm4VPpIecmlg59CHXnRDnqGplJFrbLG4aFEl5vl6cK1u76ws3LLvX7ikFnTDl5vo39sjWD6AaDPYodJp/NNHg=="], - - "concat-map": ["concat-map@0.0.1", "", {}, "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="], - - "content-disposition": ["content-disposition@1.0.1", "", {}, "sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q=="], - - "content-type": ["content-type@1.0.5", "", {}, "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA=="], - - "convert-source-map": ["convert-source-map@2.0.0", "", {}, "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg=="], - - "cookie": ["cookie@1.1.1", "", {}, "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ=="], - - "cookie-es": ["cookie-es@2.0.0", "", {}, "sha512-RAj4E421UYRgqokKUmotqAwuplYw15qtdXfY+hGzgCJ/MBjCVZcSoHK/kH9kocfjRjcDME7IiDWR/1WX1TM2Pg=="], - - "cookie-signature": ["cookie-signature@1.2.2", "", {}, "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg=="], - - "cors": ["cors@2.8.5", "", { "dependencies": { "object-assign": "^4", "vary": "^1" } }, "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g=="], - - "cosmiconfig": ["cosmiconfig@9.0.0", "", { "dependencies": { "env-paths": "^2.2.1", "import-fresh": "^3.3.0", "js-yaml": "^4.1.0", "parse-json": "^5.2.0" }, "peerDependencies": { "typescript": ">=4.9.5" }, "optionalPeers": ["typescript"] }, "sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg=="], - - "cross-spawn": ["cross-spawn@7.0.6", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="], - - "cssesc": ["cssesc@3.0.0", "", { "bin": { "cssesc": "bin/cssesc" } }, "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg=="], - - "csstype": ["csstype@3.2.3", "", {}, "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ=="], - - "data-uri-to-buffer": ["data-uri-to-buffer@4.0.1", "", {}, "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A=="], - - "data-view-buffer": ["data-view-buffer@1.0.2", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-data-view": "^1.0.2" } }, "sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ=="], - - "data-view-byte-length": ["data-view-byte-length@1.0.2", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-data-view": "^1.0.2" } }, "sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ=="], - - "data-view-byte-offset": ["data-view-byte-offset@1.0.1", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "is-data-view": "^1.0.1" } }, "sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ=="], - - "debug": ["debug@4.4.3", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA=="], - - "dedent": ["dedent@1.7.1", "", { "peerDependencies": { "babel-plugin-macros": "^3.1.0" }, "optionalPeers": ["babel-plugin-macros"] }, "sha512-9JmrhGZpOlEgOLdQgSm0zxFaYoQon408V1v49aqTWuXENVlnCuY9JBZcXZiCsZQWDjTm5Qf/nIvAy77mXDAjEg=="], - - "deep-is": ["deep-is@0.1.4", "", {}, "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ=="], - - "deepmerge": ["deepmerge@4.3.1", "", {}, "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A=="], - - "default-browser": ["default-browser@5.4.0", "", { "dependencies": { "bundle-name": "^4.1.0", "default-browser-id": "^5.0.0" } }, "sha512-XDuvSq38Hr1MdN47EDvYtx3U0MTqpCEn+F6ft8z2vYDzMrvQhVp0ui9oQdqW3MvK3vqUETglt1tVGgjLuJ5izg=="], - - "default-browser-id": ["default-browser-id@5.0.1", "", {}, "sha512-x1VCxdX4t+8wVfd1so/9w+vQ4vx7lKd2Qp5tDRutErwmR85OgmfX7RlLRMWafRMY7hbEiXIbudNrjOAPa/hL8Q=="], - - "define-data-property": ["define-data-property@1.1.4", "", { "dependencies": { "es-define-property": "^1.0.0", "es-errors": "^1.3.0", "gopd": "^1.0.1" } }, "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A=="], - - "define-lazy-prop": ["define-lazy-prop@3.0.0", "", {}, "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg=="], - - "define-properties": ["define-properties@1.2.1", "", { "dependencies": { "define-data-property": "^1.0.1", "has-property-descriptors": "^1.0.0", "object-keys": "^1.1.1" } }, "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg=="], - - "delayed-stream": ["delayed-stream@1.0.0", "", {}, "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ=="], - - "depd": ["depd@2.0.0", "", {}, "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw=="], - - "dependency-graph": ["dependency-graph@0.11.0", "", {}, "sha512-JeMq7fEshyepOWDfcfHK06N3MhyPhz++vtqWhMT5O9A3K42rdsEDpfdVqjaqaAhsw6a+ZqeDvQVtD0hFHQWrzg=="], - - "detect-libc": ["detect-libc@2.1.2", "", {}, "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ=="], - - "detect-node-es": ["detect-node-es@1.1.0", "", {}, "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ=="], - - "diff": ["diff@8.0.2", "", {}, "sha512-sSuxWU5j5SR9QQji/o2qMvqRNYRDOcBTgsJ/DeCf4iSN4gW+gNMXM7wFIP+fdXZxoNiAnHUTGjCr+TSWXdRDKg=="], - - "dir-glob": ["dir-glob@3.0.1", "", { "dependencies": { "path-type": "^4.0.0" } }, "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA=="], - - "dotenv": ["dotenv@17.2.3", "", {}, "sha512-JVUnt+DUIzu87TABbhPmNfVdBDt18BLOWjMUFJMSi/Qqg7NTYtabbvSNJGOJ7afbRuv9D/lngizHtP7QyLQ+9w=="], - - "dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="], - - "eciesjs": ["eciesjs@0.4.16", "", { "dependencies": { "@ecies/ciphers": "^0.2.4", "@noble/ciphers": "^1.3.0", "@noble/curves": "^1.9.7", "@noble/hashes": "^1.8.0" } }, "sha512-dS5cbA9rA2VR4Ybuvhg6jvdmp46ubLn3E+px8cG/35aEDNclrqoCjg6mt0HYZ/M+OoESS3jSkCrqk1kWAEhWAw=="], - - "ee-first": ["ee-first@1.1.1", "", {}, "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="], - - "electron-to-chromium": ["electron-to-chromium@1.5.267", "", {}, "sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw=="], - - "emoji-regex": ["emoji-regex@10.6.0", "", {}, "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A=="], - - "encodeurl": ["encodeurl@2.0.0", "", {}, "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg=="], - - "enhanced-resolve": ["enhanced-resolve@5.18.4", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" } }, "sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q=="], - - "enquirer": ["enquirer@2.4.1", "", { "dependencies": { "ansi-colors": "^4.1.1", "strip-ansi": "^6.0.1" } }, "sha512-rRqJg/6gd538VHvR3PSrdRBb/1Vy2YfzHqzvbhGIQpDRKIa4FgV/54b5Q1xYSxOOwKvjXweS26E0Q+nAMwp2pQ=="], - - "entities": ["entities@4.5.0", "", {}, "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw=="], - - "env-paths": ["env-paths@2.2.1", "", {}, "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A=="], - - "error-ex": ["error-ex@1.3.4", "", { "dependencies": { "is-arrayish": "^0.2.1" } }, "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ=="], - - "es-abstract": ["es-abstract@1.24.1", "", { "dependencies": { "array-buffer-byte-length": "^1.0.2", "arraybuffer.prototype.slice": "^1.0.4", "available-typed-arrays": "^1.0.7", "call-bind": "^1.0.8", "call-bound": "^1.0.4", "data-view-buffer": "^1.0.2", "data-view-byte-length": "^1.0.2", "data-view-byte-offset": "^1.0.1", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "es-set-tostringtag": "^2.1.0", "es-to-primitive": "^1.3.0", "function.prototype.name": "^1.1.8", "get-intrinsic": "^1.3.0", "get-proto": "^1.0.1", "get-symbol-description": "^1.1.0", "globalthis": "^1.0.4", "gopd": "^1.2.0", "has-property-descriptors": "^1.0.2", "has-proto": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "internal-slot": "^1.1.0", "is-array-buffer": "^3.0.5", "is-callable": "^1.2.7", "is-data-view": "^1.0.2", "is-negative-zero": "^2.0.3", "is-regex": "^1.2.1", "is-set": "^2.0.3", "is-shared-array-buffer": "^1.0.4", "is-string": "^1.1.1", "is-typed-array": "^1.1.15", "is-weakref": "^1.1.1", "math-intrinsics": "^1.1.0", "object-inspect": "^1.13.4", "object-keys": "^1.1.1", "object.assign": "^4.1.7", "own-keys": "^1.0.1", "regexp.prototype.flags": "^1.5.4", "safe-array-concat": "^1.1.3", "safe-push-apply": "^1.0.0", "safe-regex-test": "^1.1.0", "set-proto": "^1.0.0", "stop-iteration-iterator": "^1.1.0", "string.prototype.trim": "^1.2.10", "string.prototype.trimend": "^1.0.9", "string.prototype.trimstart": "^1.0.8", "typed-array-buffer": "^1.0.3", "typed-array-byte-length": "^1.0.3", "typed-array-byte-offset": "^1.0.4", "typed-array-length": "^1.0.7", "unbox-primitive": "^1.1.0", "which-typed-array": "^1.1.19" } }, "sha512-zHXBLhP+QehSSbsS9Pt23Gg964240DPd6QCf8WpkqEXxQ7fhdZzYsocOr5u7apWonsS5EjZDmTF+/slGMyasvw=="], - - "es-aggregate-error": ["es-aggregate-error@1.0.14", "", { "dependencies": { "define-data-property": "^1.1.4", "define-properties": "^1.2.1", "es-abstract": "^1.24.0", "es-errors": "^1.3.0", "function-bind": "^1.1.2", "globalthis": "^1.0.4", "has-property-descriptors": "^1.0.2", "set-function-name": "^2.0.2" } }, "sha512-3YxX6rVb07B5TV11AV5wsL7nQCHXNwoHPsQC8S4AmBiqYhyNCJ5BRKXkXyDJvs8QzXN20NgRtxe3dEEQD9NLHA=="], - - "es-define-property": ["es-define-property@1.0.1", "", {}, "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g=="], - - "es-errors": ["es-errors@1.3.0", "", {}, "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="], - - "es-object-atoms": ["es-object-atoms@1.1.1", "", { "dependencies": { "es-errors": "^1.3.0" } }, "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA=="], - - "es-set-tostringtag": ["es-set-tostringtag@2.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA=="], - - "es-to-primitive": ["es-to-primitive@1.3.0", "", { "dependencies": { "is-callable": "^1.2.7", "is-date-object": "^1.0.5", "is-symbol": "^1.0.4" } }, "sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g=="], - - "es6-promise": ["es6-promise@3.3.1", "", {}, "sha512-SOp9Phqvqn7jtEUxPWdWfWoLmyt2VaJ6MpvP9Comy1MceMXqE6bxvaTu4iaxpYYPzhny28Lc+M87/c2cPK6lDg=="], - - "esbuild": ["esbuild@0.25.12", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.25.12", "@esbuild/android-arm": "0.25.12", "@esbuild/android-arm64": "0.25.12", "@esbuild/android-x64": "0.25.12", "@esbuild/darwin-arm64": "0.25.12", "@esbuild/darwin-x64": "0.25.12", "@esbuild/freebsd-arm64": "0.25.12", "@esbuild/freebsd-x64": "0.25.12", "@esbuild/linux-arm": "0.25.12", "@esbuild/linux-arm64": "0.25.12", "@esbuild/linux-ia32": "0.25.12", "@esbuild/linux-loong64": "0.25.12", "@esbuild/linux-mips64el": "0.25.12", "@esbuild/linux-ppc64": "0.25.12", "@esbuild/linux-riscv64": "0.25.12", "@esbuild/linux-s390x": "0.25.12", "@esbuild/linux-x64": "0.25.12", "@esbuild/netbsd-arm64": "0.25.12", "@esbuild/netbsd-x64": "0.25.12", "@esbuild/openbsd-arm64": "0.25.12", "@esbuild/openbsd-x64": "0.25.12", "@esbuild/openharmony-arm64": "0.25.12", "@esbuild/sunos-x64": "0.25.12", "@esbuild/win32-arm64": "0.25.12", "@esbuild/win32-ia32": "0.25.12", "@esbuild/win32-x64": "0.25.12" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg=="], - - "escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="], - - "escape-html": ["escape-html@1.0.3", "", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="], - - "escape-string-regexp": ["escape-string-regexp@4.0.0", "", {}, "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA=="], - - "eslint": ["eslint@9.39.2", "", { "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", "@eslint/config-array": "^0.21.1", "@eslint/config-helpers": "^0.4.2", "@eslint/core": "^0.17.0", "@eslint/eslintrc": "^3.3.1", "@eslint/js": "9.39.2", "@eslint/plugin-kit": "^0.4.1", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", "@humanwhocodes/retry": "^0.4.2", "@types/estree": "^1.0.6", "ajv": "^6.12.4", "chalk": "^4.0.0", "cross-spawn": "^7.0.6", "debug": "^4.3.2", "escape-string-regexp": "^4.0.0", "eslint-scope": "^8.4.0", "eslint-visitor-keys": "^4.2.1", "espree": "^10.4.0", "esquery": "^1.5.0", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", "file-entry-cache": "^8.0.0", "find-up": "^5.0.0", "glob-parent": "^6.0.2", "ignore": "^5.2.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", "json-stable-stringify-without-jsonify": "^1.0.1", "lodash.merge": "^4.6.2", "minimatch": "^3.1.2", "natural-compare": "^1.4.0", "optionator": "^0.9.3" }, "peerDependencies": { "jiti": "*" }, "optionalPeers": ["jiti"], "bin": { "eslint": "bin/eslint.js" } }, "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw=="], - - "eslint-plugin-react-hooks": ["eslint-plugin-react-hooks@7.0.1", "", { "dependencies": { "@babel/core": "^7.24.4", "@babel/parser": "^7.24.4", "hermes-parser": "^0.25.1", "zod": "^3.25.0 || ^4.0.0", "zod-validation-error": "^3.5.0 || ^4.0.0" }, "peerDependencies": { "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" } }, "sha512-O0d0m04evaNzEPoSW+59Mezf8Qt0InfgGIBJnpC0h3NH/WjUAR7BIKUfysC6todmtiZ/A0oUVS8Gce0WhBrHsA=="], - - "eslint-plugin-react-refresh": ["eslint-plugin-react-refresh@0.4.26", "", { "peerDependencies": { "eslint": ">=8.40" } }, "sha512-1RETEylht2O6FM/MvgnyvT+8K21wLqDNg4qD51Zj3guhjt433XbnnkVttHMyaVyAFD03QSV4LPS5iE3VQmO7XQ=="], - - "eslint-scope": ["eslint-scope@8.4.0", "", { "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" } }, "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg=="], - - "eslint-visitor-keys": ["eslint-visitor-keys@4.2.1", "", {}, "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ=="], - - "espree": ["espree@10.4.0", "", { "dependencies": { "acorn": "^8.15.0", "acorn-jsx": "^5.3.2", "eslint-visitor-keys": "^4.2.1" } }, "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ=="], - - "esprima": ["esprima@4.0.1", "", { "bin": { "esparse": "./bin/esparse.js", "esvalidate": "./bin/esvalidate.js" } }, "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A=="], - - "esquery": ["esquery@1.6.0", "", { "dependencies": { "estraverse": "^5.1.0" } }, "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg=="], - - "esrecurse": ["esrecurse@4.3.0", "", { "dependencies": { "estraverse": "^5.2.0" } }, "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag=="], - - "estraverse": ["estraverse@5.3.0", "", {}, "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA=="], - - "esutils": ["esutils@2.0.3", "", {}, "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g=="], - - "etag": ["etag@1.8.1", "", {}, "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg=="], - - "event-target-shim": ["event-target-shim@5.0.1", "", {}, "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ=="], - - "eventsource": ["eventsource@3.0.7", "", { "dependencies": { "eventsource-parser": "^3.0.1" } }, "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA=="], - - "eventsource-parser": ["eventsource-parser@3.0.6", "", {}, "sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg=="], - - "execa": ["execa@5.1.1", "", { "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^6.0.0", "human-signals": "^2.1.0", "is-stream": "^2.0.0", "merge-stream": "^2.0.0", "npm-run-path": "^4.0.1", "onetime": "^5.1.2", "signal-exit": "^3.0.3", "strip-final-newline": "^2.0.0" } }, "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg=="], - - "express": ["express@5.2.1", "", { "dependencies": { "accepts": "^2.0.0", "body-parser": "^2.2.1", "content-disposition": "^1.0.0", "content-type": "^1.0.5", "cookie": "^0.7.1", "cookie-signature": "^1.2.1", "debug": "^4.4.0", "depd": "^2.0.0", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "finalhandler": "^2.1.0", "fresh": "^2.0.0", "http-errors": "^2.0.0", "merge-descriptors": "^2.0.0", "mime-types": "^3.0.0", "on-finished": "^2.4.1", "once": "^1.4.0", "parseurl": "^1.3.3", "proxy-addr": "^2.0.7", "qs": "^6.14.0", "range-parser": "^1.2.1", "router": "^2.2.0", "send": "^1.1.0", "serve-static": "^2.2.0", "statuses": "^2.0.1", "type-is": "^2.0.1", "vary": "^1.1.2" } }, "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw=="], - - "express-rate-limit": ["express-rate-limit@7.5.1", "", { "peerDependencies": { "express": ">= 4.11" } }, "sha512-7iN8iPMDzOMHPUYllBEsQdWVB6fPDMPqwjBaFrgr4Jgr/+okjvzAy+UHlYYL/Vs0OsOrMkwS6PJDkFlJwoxUnw=="], - - "fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="], - - "fast-glob": ["fast-glob@3.3.3", "", { "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.8" } }, "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg=="], - - "fast-json-stable-stringify": ["fast-json-stable-stringify@2.1.0", "", {}, "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="], - - "fast-levenshtein": ["fast-levenshtein@2.0.6", "", {}, "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw=="], - - "fast-memoize": ["fast-memoize@2.5.2", "", {}, "sha512-Ue0LwpDYErFbmNnZSF0UH6eImUwDmogUO1jyE+JbN2gsQz/jICm1Ve7t9QT0rNSsfJt+Hs4/S3GnsDVjL4HVrw=="], - - "fast-safe-stringify": ["fast-safe-stringify@2.1.1", "", {}, "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA=="], - - "fast-uri": ["fast-uri@3.1.0", "", {}, "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA=="], - - "fastq": ["fastq@1.20.1", "", { "dependencies": { "reusify": "^1.0.4" } }, "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw=="], - - "fdir": ["fdir@6.5.0", "", { "peerDependencies": { "picomatch": "^3 || ^4" }, "optionalPeers": ["picomatch"] }, "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg=="], - - "fetch-blob": ["fetch-blob@3.2.0", "", { "dependencies": { "node-domexception": "^1.0.0", "web-streams-polyfill": "^3.0.3" } }, "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ=="], - - "figures": ["figures@6.1.0", "", { "dependencies": { "is-unicode-supported": "^2.0.0" } }, "sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg=="], - - "file-entry-cache": ["file-entry-cache@8.0.0", "", { "dependencies": { "flat-cache": "^4.0.0" } }, "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ=="], - - "fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="], - - "finalhandler": ["finalhandler@2.1.1", "", { "dependencies": { "debug": "^4.4.0", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "on-finished": "^2.4.1", "parseurl": "^1.3.3", "statuses": "^2.0.1" } }, "sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA=="], - - "find-up": ["find-up@5.0.0", "", { "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" } }, "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng=="], - - "flat-cache": ["flat-cache@4.0.1", "", { "dependencies": { "flatted": "^3.2.9", "keyv": "^4.5.4" } }, "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw=="], - - "flatted": ["flatted@3.3.3", "", {}, "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg=="], - - "follow-redirects": ["follow-redirects@1.15.11", "", {}, "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ=="], - - "for-each": ["for-each@0.3.5", "", { "dependencies": { "is-callable": "^1.2.7" } }, "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg=="], - - "form-data": ["form-data@4.0.5", "", { "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", "es-set-tostringtag": "^2.1.0", "hasown": "^2.0.2", "mime-types": "^2.1.12" } }, "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w=="], - - "formdata-polyfill": ["formdata-polyfill@4.0.10", "", { "dependencies": { "fetch-blob": "^3.1.2" } }, "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g=="], - - "forwarded": ["forwarded@0.2.0", "", {}, "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow=="], - - "fresh": ["fresh@2.0.0", "", {}, "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A=="], - - "fs-extra": ["fs-extra@11.3.3", "", { "dependencies": { "graceful-fs": "^4.2.0", "jsonfile": "^6.0.1", "universalify": "^2.0.0" } }, "sha512-VWSRii4t0AFm6ixFFmLLx1t7wS1gh+ckoa84aOeapGum0h+EZd1EhEumSB+ZdDLnEPuucsVB9oB7cxJHap6Afg=="], - - "fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], - - "function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="], - - "function.prototype.name": ["function.prototype.name@1.1.8", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "define-properties": "^1.2.1", "functions-have-names": "^1.2.3", "hasown": "^2.0.2", "is-callable": "^1.2.7" } }, "sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q=="], - - "functions-have-names": ["functions-have-names@1.2.3", "", {}, "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ=="], - - "fuzzysort": ["fuzzysort@3.1.0", "", {}, "sha512-sR9BNCjBg6LNgwvxlBd0sBABvQitkLzoVY9MYYROQVX/FvfJ4Mai9LsGhDgd8qYdds0bY77VzYd5iuB+v5rwQQ=="], - - "fzf": ["fzf@0.5.2", "", {}, "sha512-Tt4kuxLXFKHy8KT40zwsUPUkg1CrsgY25FxA2U/j/0WgEDCk3ddc/zLTCCcbSHX9FcKtLuVaDGtGE/STWC+j3Q=="], - - "generator-function": ["generator-function@2.0.1", "", {}, "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g=="], - - "gensync": ["gensync@1.0.0-beta.2", "", {}, "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg=="], - - "get-caller-file": ["get-caller-file@2.0.5", "", {}, "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg=="], - - "get-east-asian-width": ["get-east-asian-width@1.4.0", "", {}, "sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q=="], - - "get-intrinsic": ["get-intrinsic@1.3.0", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "math-intrinsics": "^1.1.0" } }, "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ=="], - - "get-nonce": ["get-nonce@1.0.1", "", {}, "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q=="], - - "get-own-enumerable-keys": ["get-own-enumerable-keys@1.0.0", "", {}, "sha512-PKsK2FSrQCyxcGHsGrLDcK0lx+0Ke+6e8KFFozA9/fIQLhQzPaRvJFdcz7+Axg3jUH/Mq+NI4xa5u/UT2tQskA=="], - - "get-proto": ["get-proto@1.0.1", "", { "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" } }, "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g=="], - - "get-stream": ["get-stream@6.0.1", "", {}, "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg=="], - - "get-symbol-description": ["get-symbol-description@1.1.0", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6" } }, "sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg=="], - - "get-tsconfig": ["get-tsconfig@4.13.0", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ=="], - - "glob-parent": ["glob-parent@6.0.2", "", { "dependencies": { "is-glob": "^4.0.3" } }, "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A=="], - - "globals": ["globals@16.5.0", "", {}, "sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ=="], - - "globalthis": ["globalthis@1.0.4", "", { "dependencies": { "define-properties": "^1.2.1", "gopd": "^1.0.1" } }, "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ=="], - - "globby": ["globby@11.1.0", "", { "dependencies": { "array-union": "^2.1.0", "dir-glob": "^3.0.1", "fast-glob": "^3.2.9", "ignore": "^5.2.0", "merge2": "^1.4.1", "slash": "^3.0.0" } }, "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g=="], - - "goober": ["goober@2.1.18", "", { "peerDependencies": { "csstype": "^3.0.10" } }, "sha512-2vFqsaDVIT9Gz7N6kAL++pLpp41l3PfDuusHcjnGLfR6+huZkl6ziX+zgVC3ZxpqWhzH6pyDdGrCeDhMIvwaxw=="], - - "gopd": ["gopd@1.2.0", "", {}, "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg=="], - - "graceful-fs": ["graceful-fs@4.2.11", "", {}, "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="], - - "graphql": ["graphql@16.12.0", "", {}, "sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ=="], - - "has-bigints": ["has-bigints@1.1.0", "", {}, "sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg=="], - - "has-flag": ["has-flag@4.0.0", "", {}, "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="], - - "has-property-descriptors": ["has-property-descriptors@1.0.2", "", { "dependencies": { "es-define-property": "^1.0.0" } }, "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg=="], - - "has-proto": ["has-proto@1.2.0", "", { "dependencies": { "dunder-proto": "^1.0.0" } }, "sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ=="], - - "has-symbols": ["has-symbols@1.1.0", "", {}, "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ=="], - - "has-tostringtag": ["has-tostringtag@1.0.2", "", { "dependencies": { "has-symbols": "^1.0.3" } }, "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw=="], - - "hasown": ["hasown@2.0.2", "", { "dependencies": { "function-bind": "^1.1.2" } }, "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ=="], - - "headers-polyfill": ["headers-polyfill@4.0.3", "", {}, "sha512-IScLbePpkvO846sIwOtOTDjutRMWdXdJmXdMvk6gCBHxFO8d+QKOQedyZSxFTTFYRSmlgSTDtXqqq4pcenBXLQ=="], - - "hermes-estree": ["hermes-estree@0.25.1", "", {}, "sha512-0wUoCcLp+5Ev5pDW2OriHC2MJCbwLwuRx+gAqMTOkGKJJiBCLjtrvy4PWUGn6MIVefecRpzoOZ/UV6iGdOr+Cw=="], - - "hermes-parser": ["hermes-parser@0.25.1", "", { "dependencies": { "hermes-estree": "0.25.1" } }, "sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA=="], - - "hono": ["hono@4.11.1", "", {}, "sha512-KsFcH0xxHes0J4zaQgWbYwmz3UPOOskdqZmItstUG93+Wk1ePBLkLGwbP9zlmh1BFUiL8Qp+Xfu9P7feJWpGNg=="], - - "http-errors": ["http-errors@2.0.1", "", { "dependencies": { "depd": "~2.0.0", "inherits": "~2.0.4", "setprototypeof": "~1.2.0", "statuses": "~2.0.2", "toidentifier": "~1.0.1" } }, "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ=="], - - "http2-client": ["http2-client@1.3.5", "", {}, "sha512-EC2utToWl4RKfs5zd36Mxq7nzHHBuomZboI0yYL6Y0RmBgT7Sgkq4rQ0ezFTYoIsSs7Tm9SJe+o2FcAg6GBhGA=="], - - "https-proxy-agent": ["https-proxy-agent@7.0.6", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw=="], - - "human-signals": ["human-signals@2.1.0", "", {}, "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw=="], - - "iconv-lite": ["iconv-lite@0.7.1", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-2Tth85cXwGFHfvRgZWszZSvdo+0Xsqmw8k8ZwxScfcBneNUraK+dxRxRm24nszx80Y0TVio8kKLt5sLE7ZCLlw=="], - - "ignore": ["ignore@5.3.2", "", {}, "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g=="], - - "immer": ["immer@9.0.21", "", {}, "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA=="], - - "import-fresh": ["import-fresh@3.3.1", "", { "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" } }, "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ=="], - - "imurmurhash": ["imurmurhash@0.1.4", "", {}, "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA=="], - - "inflected": ["inflected@2.1.0", "", {}, "sha512-hAEKNxvHf2Iq3H60oMBHkB4wl5jn3TPF3+fXek/sRwAB5gP9xWs4r7aweSF95f99HFoz69pnZTcu8f0SIHV18w=="], - - "inherits": ["inherits@2.0.4", "", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="], - - "internal-slot": ["internal-slot@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "hasown": "^2.0.2", "side-channel": "^1.1.0" } }, "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw=="], - - "ipaddr.js": ["ipaddr.js@1.9.1", "", {}, "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g=="], - - "is-array-buffer": ["is-array-buffer@3.0.5", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "get-intrinsic": "^1.2.6" } }, "sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A=="], - - "is-arrayish": ["is-arrayish@0.2.1", "", {}, "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg=="], - - "is-async-function": ["is-async-function@2.1.1", "", { "dependencies": { "async-function": "^1.0.0", "call-bound": "^1.0.3", "get-proto": "^1.0.1", "has-tostringtag": "^1.0.2", "safe-regex-test": "^1.1.0" } }, "sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ=="], - - "is-bigint": ["is-bigint@1.1.0", "", { "dependencies": { "has-bigints": "^1.0.2" } }, "sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ=="], - - "is-binary-path": ["is-binary-path@2.1.0", "", { "dependencies": { "binary-extensions": "^2.0.0" } }, "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw=="], - - "is-boolean-object": ["is-boolean-object@1.2.2", "", { "dependencies": { "call-bound": "^1.0.3", "has-tostringtag": "^1.0.2" } }, "sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A=="], - - "is-callable": ["is-callable@1.2.7", "", {}, "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA=="], - - "is-data-view": ["is-data-view@1.0.2", "", { "dependencies": { "call-bound": "^1.0.2", "get-intrinsic": "^1.2.6", "is-typed-array": "^1.1.13" } }, "sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw=="], - - "is-date-object": ["is-date-object@1.1.0", "", { "dependencies": { "call-bound": "^1.0.2", "has-tostringtag": "^1.0.2" } }, "sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg=="], - - "is-docker": ["is-docker@3.0.0", "", { "bin": { "is-docker": "cli.js" } }, "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ=="], - - "is-extglob": ["is-extglob@2.1.1", "", {}, "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="], - - "is-finalizationregistry": ["is-finalizationregistry@1.1.1", "", { "dependencies": { "call-bound": "^1.0.3" } }, "sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg=="], - - "is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="], - - "is-generator-function": ["is-generator-function@1.1.2", "", { "dependencies": { "call-bound": "^1.0.4", "generator-function": "^2.0.0", "get-proto": "^1.0.1", "has-tostringtag": "^1.0.2", "safe-regex-test": "^1.1.0" } }, "sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA=="], - - "is-glob": ["is-glob@4.0.3", "", { "dependencies": { "is-extglob": "^2.1.1" } }, "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg=="], - - "is-in-ssh": ["is-in-ssh@1.0.0", "", {}, "sha512-jYa6Q9rH90kR1vKB6NM7qqd1mge3Fx4Dhw5TVlK1MUBqhEOuCagrEHMevNuCcbECmXZ0ThXkRm+Ymr51HwEPAw=="], - - "is-inside-container": ["is-inside-container@1.0.0", "", { "dependencies": { "is-docker": "^3.0.0" }, "bin": { "is-inside-container": "cli.js" } }, "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA=="], - - "is-interactive": ["is-interactive@2.0.0", "", {}, "sha512-qP1vozQRI+BMOPcjFzrjXuQvdak2pHNUMZoeG2eRbiSqyvbEf/wQtEOTOX1guk6E3t36RkaqiSt8A/6YElNxLQ=="], - - "is-map": ["is-map@2.0.3", "", {}, "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw=="], - - "is-negative-zero": ["is-negative-zero@2.0.3", "", {}, "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw=="], - - "is-node-process": ["is-node-process@1.2.0", "", {}, "sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw=="], - - "is-number": ["is-number@7.0.0", "", {}, "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="], - - "is-number-object": ["is-number-object@1.1.1", "", { "dependencies": { "call-bound": "^1.0.3", "has-tostringtag": "^1.0.2" } }, "sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw=="], - - "is-obj": ["is-obj@3.0.0", "", {}, "sha512-IlsXEHOjtKhpN8r/tRFj2nDyTmHvcfNeu/nrRIcXE17ROeatXchkojffa1SpdqW4cr/Fj6QkEf/Gn4zf6KKvEQ=="], - - "is-plain-obj": ["is-plain-obj@4.1.0", "", {}, "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg=="], - - "is-promise": ["is-promise@4.0.0", "", {}, "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ=="], - - "is-regex": ["is-regex@1.2.1", "", { "dependencies": { "call-bound": "^1.0.2", "gopd": "^1.2.0", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g=="], - - "is-regexp": ["is-regexp@3.1.0", "", {}, "sha512-rbku49cWloU5bSMI+zaRaXdQHXnthP6DZ/vLnfdSKyL4zUzuWnomtOEiZZOd+ioQ+avFo/qau3KPTc7Fjy1uPA=="], - - "is-set": ["is-set@2.0.3", "", {}, "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg=="], - - "is-shared-array-buffer": ["is-shared-array-buffer@1.0.4", "", { "dependencies": { "call-bound": "^1.0.3" } }, "sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A=="], - - "is-stream": ["is-stream@2.0.1", "", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="], - - "is-string": ["is-string@1.1.1", "", { "dependencies": { "call-bound": "^1.0.3", "has-tostringtag": "^1.0.2" } }, "sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA=="], - - "is-symbol": ["is-symbol@1.1.1", "", { "dependencies": { "call-bound": "^1.0.2", "has-symbols": "^1.1.0", "safe-regex-test": "^1.1.0" } }, "sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w=="], - - "is-typed-array": ["is-typed-array@1.1.15", "", { "dependencies": { "which-typed-array": "^1.1.16" } }, "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ=="], - - "is-unicode-supported": ["is-unicode-supported@2.1.0", "", {}, "sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ=="], - - "is-weakmap": ["is-weakmap@2.0.2", "", {}, "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w=="], - - "is-weakref": ["is-weakref@1.1.1", "", { "dependencies": { "call-bound": "^1.0.3" } }, "sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew=="], - - "is-weakset": ["is-weakset@2.0.4", "", { "dependencies": { "call-bound": "^1.0.3", "get-intrinsic": "^1.2.6" } }, "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ=="], - - "is-wsl": ["is-wsl@3.1.0", "", { "dependencies": { "is-inside-container": "^1.0.0" } }, "sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw=="], - - "isarray": ["isarray@2.0.5", "", {}, "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw=="], - - "isbot": ["isbot@5.1.32", "", {}, "sha512-VNfjM73zz2IBZmdShMfAUg10prm6t7HFUQmNAEOAVS4YH92ZrZcvkMcGX6cIgBJAzWDzPent/EeAtYEHNPNPBQ=="], - - "isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], - - "jiti": ["jiti@2.6.1", "", { "bin": { "jiti": "lib/jiti-cli.mjs" } }, "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ=="], - - "jose": ["jose@6.1.3", "", {}, "sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ=="], - - "js-tokens": ["js-tokens@4.0.0", "", {}, "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="], - - "js-yaml": ["js-yaml@4.1.1", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA=="], - - "jsep": ["jsep@1.4.0", "", {}, "sha512-B7qPcEVE3NVkmSJbaYxvv4cHkVW7DQsZz13pUMrfS8z8Q/BuShN+gcTXrUlPiGqM2/t/EEaI030bpxMqY8gMlw=="], - - "jsesc": ["jsesc@3.1.0", "", { "bin": { "jsesc": "bin/jsesc" } }, "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA=="], - - "json-buffer": ["json-buffer@3.0.1", "", {}, "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ=="], - - "json-parse-even-better-errors": ["json-parse-even-better-errors@2.3.1", "", {}, "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w=="], - - "json-schema-traverse": ["json-schema-traverse@0.4.1", "", {}, "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="], - - "json-schema-typed": ["json-schema-typed@8.0.2", "", {}, "sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA=="], - - "json-stable-stringify-without-jsonify": ["json-stable-stringify-without-jsonify@1.0.1", "", {}, "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw=="], - - "json5": ["json5@2.2.3", "", { "bin": { "json5": "lib/cli.js" } }, "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="], - - "jsonc-parser": ["jsonc-parser@2.2.1", "", {}, "sha512-o6/yDBYccGvTz1+QFevz6l6OBZ2+fMVu2JZ9CIhzsYRX4mjaK5IyX9eldUdCmga16zlgQxyrj5pt9kzuj2C02w=="], - - "jsonfile": ["jsonfile@6.2.0", "", { "dependencies": { "universalify": "^2.0.0" }, "optionalDependencies": { "graceful-fs": "^4.1.6" } }, "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg=="], - - "jsonpath-plus": ["jsonpath-plus@10.3.0", "", { "dependencies": { "@jsep-plugin/assignment": "^1.3.0", "@jsep-plugin/regex": "^1.0.4", "jsep": "^1.4.0" }, "bin": { "jsonpath": "bin/jsonpath-cli.js", "jsonpath-plus": "bin/jsonpath-cli.js" } }, "sha512-8TNmfeTCk2Le33A3vRRwtuworG/L5RrgMvdjhKZxvyShO+mBu2fP50OWUjRLNtvw344DdDarFh9buFAZs5ujeA=="], - - "jsonpointer": ["jsonpointer@5.0.1", "", {}, "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ=="], - - "jsonschema": ["jsonschema@1.5.0", "", {}, "sha512-K+A9hhqbn0f3pJX17Q/7H6yQfD/5OXgdrR5UE12gMXCiN9D5Xq2o5mddV2QEcX/bjla99ASsAAQUyMCCRWAEhw=="], - - "keyv": ["keyv@4.5.4", "", { "dependencies": { "json-buffer": "3.0.1" } }, "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw=="], - - "kleur": ["kleur@4.1.5", "", {}, "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ=="], - - "leven": ["leven@3.1.0", "", {}, "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A=="], - - "levn": ["levn@0.4.1", "", { "dependencies": { "prelude-ls": "^1.2.1", "type-check": "~0.4.0" } }, "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ=="], - - "lightningcss": ["lightningcss@1.30.2", "", { "dependencies": { "detect-libc": "^2.0.3" }, "optionalDependencies": { "lightningcss-android-arm64": "1.30.2", "lightningcss-darwin-arm64": "1.30.2", "lightningcss-darwin-x64": "1.30.2", "lightningcss-freebsd-x64": "1.30.2", "lightningcss-linux-arm-gnueabihf": "1.30.2", "lightningcss-linux-arm64-gnu": "1.30.2", "lightningcss-linux-arm64-musl": "1.30.2", "lightningcss-linux-x64-gnu": "1.30.2", "lightningcss-linux-x64-musl": "1.30.2", "lightningcss-win32-arm64-msvc": "1.30.2", "lightningcss-win32-x64-msvc": "1.30.2" } }, "sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ=="], - - "lightningcss-android-arm64": ["lightningcss-android-arm64@1.30.2", "", { "os": "android", "cpu": "arm64" }, "sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A=="], - - "lightningcss-darwin-arm64": ["lightningcss-darwin-arm64@1.30.2", "", { "os": "darwin", "cpu": "arm64" }, "sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA=="], - - "lightningcss-darwin-x64": ["lightningcss-darwin-x64@1.30.2", "", { "os": "darwin", "cpu": "x64" }, "sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ=="], - - "lightningcss-freebsd-x64": ["lightningcss-freebsd-x64@1.30.2", "", { "os": "freebsd", "cpu": "x64" }, "sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA=="], - - "lightningcss-linux-arm-gnueabihf": ["lightningcss-linux-arm-gnueabihf@1.30.2", "", { "os": "linux", "cpu": "arm" }, "sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA=="], - - "lightningcss-linux-arm64-gnu": ["lightningcss-linux-arm64-gnu@1.30.2", "", { "os": "linux", "cpu": "arm64" }, "sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A=="], - - "lightningcss-linux-arm64-musl": ["lightningcss-linux-arm64-musl@1.30.2", "", { "os": "linux", "cpu": "arm64" }, "sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA=="], - - "lightningcss-linux-x64-gnu": ["lightningcss-linux-x64-gnu@1.30.2", "", { "os": "linux", "cpu": "x64" }, "sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w=="], - - "lightningcss-linux-x64-musl": ["lightningcss-linux-x64-musl@1.30.2", "", { "os": "linux", "cpu": "x64" }, "sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA=="], - - "lightningcss-win32-arm64-msvc": ["lightningcss-win32-arm64-msvc@1.30.2", "", { "os": "win32", "cpu": "arm64" }, "sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ=="], - - "lightningcss-win32-x64-msvc": ["lightningcss-win32-x64-msvc@1.30.2", "", { "os": "win32", "cpu": "x64" }, "sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw=="], - - "lines-and-columns": ["lines-and-columns@1.2.4", "", {}, "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg=="], - - "linkify-it": ["linkify-it@5.0.0", "", { "dependencies": { "uc.micro": "^2.0.0" } }, "sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ=="], - - "locate-path": ["locate-path@6.0.0", "", { "dependencies": { "p-locate": "^5.0.0" } }, "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw=="], - - "lodash": ["lodash@4.17.21", "", {}, "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="], - - "lodash.isempty": ["lodash.isempty@4.4.0", "", {}, "sha512-oKMuF3xEeqDltrGMfDxAPGIVMSSRv8tbRSODbrs4KGsRRLEhrW8N8Rd4DRgB2+621hY8A8XwwrTVhXWpxFvMzg=="], - - "lodash.merge": ["lodash.merge@4.6.2", "", {}, "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ=="], - - "lodash.omitby": ["lodash.omitby@4.6.0", "", {}, "sha512-5OrRcIVR75M288p4nbI2WLAf3ndw2GD9fyNv3Bc15+WCxJDdZ4lYndSxGd7hnG6PVjiJTeJE2dHEGhIuKGicIQ=="], - - "lodash.topath": ["lodash.topath@4.5.2", "", {}, "sha512-1/W4dM+35DwvE/iEd1M9ekewOSTlpFekhw9mhAtrwjVqUr83/ilQiyAvmg4tVX7Unkcfl1KC+i9WdaT4B6aQcg=="], - - "lodash.uniq": ["lodash.uniq@4.5.0", "", {}, "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ=="], - - "lodash.uniqby": ["lodash.uniqby@4.7.0", "", {}, "sha512-e/zcLx6CSbmaEgFHCA7BnoQKyCtKMxnuWrJygbwPs/AIn+IMKl66L8/s+wBUn5LRw2pZx3bUHibiV1b6aTWIww=="], - - "lodash.uniqwith": ["lodash.uniqwith@4.5.0", "", {}, "sha512-7lYL8bLopMoy4CTICbxygAUq6CdRJ36vFc80DucPueUee+d5NBRxz3FdT9Pes/HEx5mPoT9jwnsEJWz1N7uq7Q=="], - - "log-symbols": ["log-symbols@6.0.0", "", { "dependencies": { "chalk": "^5.3.0", "is-unicode-supported": "^1.3.0" } }, "sha512-i24m8rpwhmPIS4zscNzK6MSEhk0DUWa/8iYQWxhffV8jkI4Phvs3F+quL5xvS0gdQR0FyTCMMH33Y78dDTzzIw=="], - - "loglevel": ["loglevel@1.9.2", "", {}, "sha512-HgMmCqIJSAKqo68l0rS2AanEWfkxaZ5wNiEFb5ggm08lDs9Xl2KxBlX3PTcaD2chBM1gXAYf491/M2Rv8Jwayg=="], - - "loglevel-plugin-prefix": ["loglevel-plugin-prefix@0.8.4", "", {}, "sha512-WpG9CcFAOjz/FtNht+QJeGpvVl/cdR6P0z6OcXSkr8wFJOsV2GRj2j10JLfjuA4aYkcKCNIEqRGCyTife9R8/g=="], - - "lru-cache": ["lru-cache@5.1.1", "", { "dependencies": { "yallist": "^3.0.2" } }, "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w=="], - - "lucide-react": ["lucide-react@0.562.0", "", { "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-82hOAu7y0dbVuFfmO4bYF1XEwYk/mEbM5E+b1jgci/udUBEE/R7LF5Ip0CCEmXe8AybRM8L+04eP+LGZeDvkiw=="], - - "lunr": ["lunr@2.3.9", "", {}, "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow=="], - - "magic-string": ["magic-string@0.30.21", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.5" } }, "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ=="], - - "markdown-it": ["markdown-it@14.1.0", "", { "dependencies": { "argparse": "^2.0.1", "entities": "^4.4.0", "linkify-it": "^5.0.0", "mdurl": "^2.0.0", "punycode.js": "^2.3.1", "uc.micro": "^2.1.0" }, "bin": { "markdown-it": "bin/markdown-it.mjs" } }, "sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg=="], - - "math-intrinsics": ["math-intrinsics@1.1.0", "", {}, "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g=="], - - "mdurl": ["mdurl@2.0.0", "", {}, "sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w=="], - - "media-typer": ["media-typer@1.1.0", "", {}, "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw=="], - - "merge-descriptors": ["merge-descriptors@2.0.0", "", {}, "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g=="], - - "merge-stream": ["merge-stream@2.0.0", "", {}, "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w=="], - - "merge2": ["merge2@1.4.1", "", {}, "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="], - - "micromatch": ["micromatch@4.0.8", "", { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA=="], - - "mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], - - "mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], - - "mimic-fn": ["mimic-fn@2.1.0", "", {}, "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg=="], - - "mimic-function": ["mimic-function@5.0.1", "", {}, "sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA=="], - - "minimatch": ["minimatch@3.1.2", "", { "dependencies": { "brace-expansion": "^1.1.7" } }, "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw=="], - - "minimist": ["minimist@1.2.8", "", {}, "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="], - - "ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], - - "msw": ["msw@2.12.4", "", { "dependencies": { "@inquirer/confirm": "^5.0.0", "@mswjs/interceptors": "^0.40.0", "@open-draft/deferred-promise": "^2.2.0", "@types/statuses": "^2.0.6", "cookie": "^1.0.2", "graphql": "^16.12.0", "headers-polyfill": "^4.0.2", "is-node-process": "^1.2.0", "outvariant": "^1.4.3", "path-to-regexp": "^6.3.0", "picocolors": "^1.1.1", "rettime": "^0.7.0", "statuses": "^2.0.2", "strict-event-emitter": "^0.5.1", "tough-cookie": "^6.0.0", "type-fest": "^5.2.0", "until-async": "^3.0.2", "yargs": "^17.7.2" }, "peerDependencies": { "typescript": ">= 4.8.x" }, "optionalPeers": ["typescript"], "bin": { "msw": "cli/index.js" } }, "sha512-rHNiVfTyKhzc0EjoXUBVGteNKBevdjOlVC6GlIRXpy+/3LHEIGRovnB5WPjcvmNODVQ1TNFnoa7wsGbd0V3epg=="], - - "mute-stream": ["mute-stream@2.0.0", "", {}, "sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA=="], - - "nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="], - - "natural-compare": ["natural-compare@1.4.0", "", {}, "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw=="], - - "negotiator": ["negotiator@1.0.0", "", {}, "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg=="], - - "next-themes": ["next-themes@0.4.6", "", { "peerDependencies": { "react": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc", "react-dom": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc" } }, "sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA=="], - - "nimma": ["nimma@0.2.3", "", { "dependencies": { "@jsep-plugin/regex": "^1.0.1", "@jsep-plugin/ternary": "^1.0.2", "astring": "^1.8.1", "jsep": "^1.2.0" }, "optionalDependencies": { "jsonpath-plus": "^6.0.1 || ^10.1.0", "lodash.topath": "^4.5.2" } }, "sha512-1ZOI8J+1PKKGceo/5CT5GfQOG6H8I2BencSK06YarZ2wXwH37BSSUWldqJmMJYA5JfqDqffxDXynt6f11AyKcA=="], - - "node-domexception": ["node-domexception@1.0.0", "", {}, "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ=="], - - "node-fetch": ["node-fetch@3.3.2", "", { "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", "formdata-polyfill": "^4.0.10" } }, "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA=="], - - "node-fetch-h2": ["node-fetch-h2@2.3.0", "", { "dependencies": { "http2-client": "^1.2.5" } }, "sha512-ofRW94Ab0T4AOh5Fk8t0h8OBWrmjb0SSB20xh1H8YnPV9EJ+f5AMoYSUQ2zgJ4Iq2HAK0I2l5/Nequ8YzFS3Hg=="], - - "node-readfiles": ["node-readfiles@0.2.0", "", { "dependencies": { "es6-promise": "^3.2.1" } }, "sha512-SU00ZarexNlE4Rjdm83vglt5Y9yiQ+XI1XpflWlb7q7UTN1JUItm69xMeiQCTxtTfnzt+83T8Cx+vI2ED++VDA=="], - - "node-releases": ["node-releases@2.0.27", "", {}, "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA=="], - - "normalize-path": ["normalize-path@3.0.0", "", {}, "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA=="], - - "npm-run-path": ["npm-run-path@4.0.1", "", { "dependencies": { "path-key": "^3.0.0" } }, "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw=="], - - "oas-kit-common": ["oas-kit-common@1.0.8", "", { "dependencies": { "fast-safe-stringify": "^2.0.7" } }, "sha512-pJTS2+T0oGIwgjGpw7sIRU8RQMcUoKCDWFLdBqKB2BNmGpbBMH2sdqAaOXUg8OzonZHU0L7vfJu1mJFEiYDWOQ=="], - - "oas-linter": ["oas-linter@3.2.2", "", { "dependencies": { "@exodus/schemasafe": "^1.0.0-rc.2", "should": "^13.2.1", "yaml": "^1.10.0" } }, "sha512-KEGjPDVoU5K6swgo9hJVA/qYGlwfbFx+Kg2QB/kd7rzV5N8N5Mg6PlsoCMohVnQmo+pzJap/F610qTodKzecGQ=="], - - "oas-resolver": ["oas-resolver@2.5.6", "", { "dependencies": { "node-fetch-h2": "^2.3.0", "oas-kit-common": "^1.0.8", "reftools": "^1.1.9", "yaml": "^1.10.0", "yargs": "^17.0.1" }, "bin": { "resolve": "resolve.js" } }, "sha512-Yx5PWQNZomfEhPPOphFbZKi9W93CocQj18NlD2Pa4GWZzdZpSJvYwoiuurRI7m3SpcChrnO08hkuQDL3FGsVFQ=="], - - "oas-schema-walker": ["oas-schema-walker@1.1.5", "", {}, "sha512-2yucenq1a9YPmeNExoUa9Qwrt9RFkjqaMAA1X+U7sbb0AqBeTIdMHky9SQQ6iN94bO5NW0W4TRYXerG+BdAvAQ=="], - - "oas-validator": ["oas-validator@5.0.8", "", { "dependencies": { "call-me-maybe": "^1.0.1", "oas-kit-common": "^1.0.8", "oas-linter": "^3.2.2", "oas-resolver": "^2.5.6", "oas-schema-walker": "^1.1.5", "reftools": "^1.1.9", "should": "^13.2.1", "yaml": "^1.10.0" } }, "sha512-cu20/HE5N5HKqVygs3dt94eYJfBi0TsZvPVXDhbXQHiEityDN+RROTleefoKRKKJ9dFAF2JBkDHgvWj0sjKGmw=="], - - "object-assign": ["object-assign@4.1.1", "", {}, "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="], - - "object-inspect": ["object-inspect@1.13.4", "", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="], - - "object-keys": ["object-keys@1.1.1", "", {}, "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA=="], - - "object-treeify": ["object-treeify@1.1.33", "", {}, "sha512-EFVjAYfzWqWsBMRHPMAXLCDIJnpMhdWAqR7xG6M6a2cs6PMFpl/+Z20w9zDW4vkxOFfddegBKq9Rehd0bxWE7A=="], - - "object.assign": ["object.assign@4.1.7", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.3", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0", "has-symbols": "^1.1.0", "object-keys": "^1.1.1" } }, "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw=="], - - "on-finished": ["on-finished@2.4.1", "", { "dependencies": { "ee-first": "1.1.1" } }, "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg=="], - - "once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="], - - "onetime": ["onetime@5.1.2", "", { "dependencies": { "mimic-fn": "^2.1.0" } }, "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg=="], - - "open": ["open@11.0.0", "", { "dependencies": { "default-browser": "^5.4.0", "define-lazy-prop": "^3.0.0", "is-in-ssh": "^1.0.0", "is-inside-container": "^1.0.0", "powershell-utils": "^0.1.0", "wsl-utils": "^0.3.0" } }, "sha512-smsWv2LzFjP03xmvFoJ331ss6h+jixfA4UUV/Bsiyuu4YJPfN+FIQGOIiv4w9/+MoHkfkJ22UIaQWRVFRfH6Vw=="], - - "openapi-types": ["openapi-types@12.1.3", "", {}, "sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw=="], - - "openapi3-ts": ["openapi3-ts@4.5.0", "", { "dependencies": { "yaml": "^2.8.0" } }, "sha512-jaL+HgTq2Gj5jRcfdutgRGLosCy/hT8sQf6VOy+P+g36cZOjI1iukdPnijC+4CmeRzg/jEllJUboEic2FhxhtQ=="], - - "optionator": ["optionator@0.9.4", "", { "dependencies": { "deep-is": "^0.1.3", "fast-levenshtein": "^2.0.6", "levn": "^0.4.1", "prelude-ls": "^1.2.1", "type-check": "^0.4.0", "word-wrap": "^1.2.5" } }, "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g=="], - - "ora": ["ora@8.2.0", "", { "dependencies": { "chalk": "^5.3.0", "cli-cursor": "^5.0.0", "cli-spinners": "^2.9.2", "is-interactive": "^2.0.0", "is-unicode-supported": "^2.0.0", "log-symbols": "^6.0.0", "stdin-discarder": "^0.2.2", "string-width": "^7.2.0", "strip-ansi": "^7.1.0" } }, "sha512-weP+BZ8MVNnlCm8c0Qdc1WSWq4Qn7I+9CJGm7Qali6g44e/PUzbjNqJX5NJ9ljlNMosfJvg1fKEGILklK9cwnw=="], - - "orval": ["orval@7.17.2", "", { "dependencies": { "@apidevtools/swagger-parser": "^12.1.0", "@commander-js/extra-typings": "^14.0.0", "@orval/angular": "7.17.2", "@orval/axios": "7.17.2", "@orval/core": "7.17.2", "@orval/fetch": "7.17.2", "@orval/hono": "7.17.2", "@orval/mcp": "7.17.2", "@orval/mock": "7.17.2", "@orval/query": "7.17.2", "@orval/swr": "7.17.2", "@orval/zod": "7.17.2", "chalk": "^4.1.2", "chokidar": "^4.0.3", "commander": "^14.0.1", "enquirer": "^2.4.1", "execa": "^5.1.1", "find-up": "5.0.0", "fs-extra": "^11.3.2", "jiti": "^2.6.1", "js-yaml": "4.1.1", "lodash.uniq": "^4.5.0", "openapi3-ts": "4.5.0", "string-argv": "^0.3.2", "tsconfck": "^2.1.2", "typedoc": "^0.28.14", "typedoc-plugin-coverage": "^4.0.2", "typedoc-plugin-markdown": "^4.9.0" }, "bin": "./dist/bin/orval.js" }, "sha512-6+drCVVWNukdX+ytFPOC2UJ51gv0kGAPch7cTQYxO7VsGwdZ1DiZhYozdGehwu6QeVvAszPhPlh4uLq53k0x+w=="], - - "outvariant": ["outvariant@1.4.3", "", {}, "sha512-+Sl2UErvtsoajRDKCE5/dBz4DIvHXQQnAxtQTF04OJxY0+DyZXSo5P5Bb7XYWOh81syohlYL24hbDwxedPUJCA=="], - - "own-keys": ["own-keys@1.0.1", "", { "dependencies": { "get-intrinsic": "^1.2.6", "object-keys": "^1.1.1", "safe-push-apply": "^1.0.0" } }, "sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg=="], - - "p-limit": ["p-limit@3.1.0", "", { "dependencies": { "yocto-queue": "^0.1.0" } }, "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ=="], - - "p-locate": ["p-locate@5.0.0", "", { "dependencies": { "p-limit": "^3.0.2" } }, "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw=="], - - "package-manager-detector": ["package-manager-detector@1.6.0", "", {}, "sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA=="], - - "parent-module": ["parent-module@1.0.1", "", { "dependencies": { "callsites": "^3.0.0" } }, "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g=="], - - "parse-json": ["parse-json@5.2.0", "", { "dependencies": { "@babel/code-frame": "^7.0.0", "error-ex": "^1.3.1", "json-parse-even-better-errors": "^2.3.0", "lines-and-columns": "^1.1.6" } }, "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg=="], - - "parse-ms": ["parse-ms@4.0.0", "", {}, "sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw=="], - - "parseurl": ["parseurl@1.3.3", "", {}, "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="], - - "path-browserify": ["path-browserify@1.0.1", "", {}, "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g=="], - - "path-exists": ["path-exists@4.0.0", "", {}, "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w=="], - - "path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="], - - "path-to-regexp": ["path-to-regexp@6.3.0", "", {}, "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ=="], - - "path-type": ["path-type@4.0.0", "", {}, "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw=="], - - "pathe": ["pathe@2.0.3", "", {}, "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w=="], - - "picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="], - - "picomatch": ["picomatch@4.0.3", "", {}, "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q=="], - - "pkce-challenge": ["pkce-challenge@5.0.1", "", {}, "sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ=="], - - "pony-cause": ["pony-cause@1.1.1", "", {}, "sha512-PxkIc/2ZpLiEzQXu5YRDOUgBlfGYBY8156HY5ZcRAwwonMk5W/MrJP2LLkG/hF7GEQzaHo2aS7ho6ZLCOvf+6g=="], - - "possible-typed-array-names": ["possible-typed-array-names@1.1.0", "", {}, "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg=="], - - "postcss": ["postcss@8.5.6", "", { "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg=="], - - "postcss-selector-parser": ["postcss-selector-parser@7.1.1", "", { "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" } }, "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg=="], - - "powershell-utils": ["powershell-utils@0.1.0", "", {}, "sha512-dM0jVuXJPsDN6DvRpea484tCUaMiXWjuCn++HGTqUWzGDjv5tZkEZldAJ/UMlqRYGFrD/etByo4/xOuC/snX2A=="], - - "prelude-ls": ["prelude-ls@1.2.1", "", {}, "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g=="], - - "prettier": ["prettier@3.7.4", "", { "bin": { "prettier": "bin/prettier.cjs" } }, "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA=="], - - "prettier-plugin-tailwindcss": ["prettier-plugin-tailwindcss@0.7.2", "", { "peerDependencies": { "@ianvs/prettier-plugin-sort-imports": "*", "@prettier/plugin-hermes": "*", "@prettier/plugin-oxc": "*", "@prettier/plugin-pug": "*", "@shopify/prettier-plugin-liquid": "*", "@trivago/prettier-plugin-sort-imports": "*", "@zackad/prettier-plugin-twig": "*", "prettier": "^3.0", "prettier-plugin-astro": "*", "prettier-plugin-css-order": "*", "prettier-plugin-jsdoc": "*", "prettier-plugin-marko": "*", "prettier-plugin-multiline-arrays": "*", "prettier-plugin-organize-attributes": "*", "prettier-plugin-organize-imports": "*", "prettier-plugin-sort-imports": "*", "prettier-plugin-svelte": "*" }, "optionalPeers": ["@ianvs/prettier-plugin-sort-imports", "@prettier/plugin-hermes", "@prettier/plugin-oxc", "@prettier/plugin-pug", "@shopify/prettier-plugin-liquid", "@trivago/prettier-plugin-sort-imports", "@zackad/prettier-plugin-twig", "prettier-plugin-astro", "prettier-plugin-css-order", "prettier-plugin-jsdoc", "prettier-plugin-marko", "prettier-plugin-multiline-arrays", "prettier-plugin-organize-attributes", "prettier-plugin-organize-imports", "prettier-plugin-sort-imports", "prettier-plugin-svelte"] }, "sha512-LkphyK3Fw+q2HdMOoiEHWf93fNtYJwfamoKPl7UwtjFQdei/iIBoX11G6j706FzN3ymX9mPVi97qIY8328vdnA=="], - - "pretty-ms": ["pretty-ms@9.3.0", "", { "dependencies": { "parse-ms": "^4.0.0" } }, "sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ=="], - - "prompts": ["prompts@2.4.2", "", { "dependencies": { "kleur": "^3.0.3", "sisteransi": "^1.0.5" } }, "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q=="], - - "proxy-addr": ["proxy-addr@2.0.7", "", { "dependencies": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" } }, "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg=="], - - "proxy-from-env": ["proxy-from-env@1.1.0", "", {}, "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg=="], - - "punycode": ["punycode@2.3.1", "", {}, "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg=="], - - "punycode.js": ["punycode.js@2.3.1", "", {}, "sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA=="], - - "qs": ["qs@6.14.0", "", { "dependencies": { "side-channel": "^1.1.0" } }, "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w=="], - - "queue-microtask": ["queue-microtask@1.2.3", "", {}, "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="], - - "radix-ui": ["radix-ui@1.4.3", "", { "dependencies": { "@radix-ui/primitive": "1.1.3", "@radix-ui/react-accessible-icon": "1.1.7", "@radix-ui/react-accordion": "1.2.12", "@radix-ui/react-alert-dialog": "1.1.15", "@radix-ui/react-arrow": "1.1.7", "@radix-ui/react-aspect-ratio": "1.1.7", "@radix-ui/react-avatar": "1.1.10", "@radix-ui/react-checkbox": "1.3.3", "@radix-ui/react-collapsible": "1.1.12", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-context-menu": "2.2.16", "@radix-ui/react-dialog": "1.1.15", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.11", "@radix-ui/react-dropdown-menu": "2.1.16", "@radix-ui/react-focus-guards": "1.1.3", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-form": "0.1.8", "@radix-ui/react-hover-card": "1.1.15", "@radix-ui/react-label": "2.1.7", "@radix-ui/react-menu": "2.1.16", "@radix-ui/react-menubar": "1.1.16", "@radix-ui/react-navigation-menu": "1.2.14", "@radix-ui/react-one-time-password-field": "0.1.8", "@radix-ui/react-password-toggle-field": "0.1.3", "@radix-ui/react-popover": "1.1.15", "@radix-ui/react-popper": "1.2.8", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.5", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-progress": "1.1.7", "@radix-ui/react-radio-group": "1.3.8", "@radix-ui/react-roving-focus": "1.1.11", "@radix-ui/react-scroll-area": "1.2.10", "@radix-ui/react-select": "2.2.6", "@radix-ui/react-separator": "1.1.7", "@radix-ui/react-slider": "1.3.6", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-switch": "1.2.6", "@radix-ui/react-tabs": "1.1.13", "@radix-ui/react-toast": "1.2.15", "@radix-ui/react-toggle": "1.1.10", "@radix-ui/react-toggle-group": "1.1.11", "@radix-ui/react-toolbar": "1.1.11", "@radix-ui/react-tooltip": "1.2.8", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-escape-keydown": "1.1.1", "@radix-ui/react-use-is-hydrated": "0.1.0", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-size": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-aWizCQiyeAenIdUbqEpXgRA1ya65P13NKn/W8rWkcN0OPkRDxdBVLWnIEDsS2RpwCK2nobI7oMUSmexzTDyAmA=="], - - "range-parser": ["range-parser@1.2.1", "", {}, "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="], - - "raw-body": ["raw-body@3.0.2", "", { "dependencies": { "bytes": "~3.1.2", "http-errors": "~2.0.1", "iconv-lite": "~0.7.0", "unpipe": "~1.0.0" } }, "sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA=="], - - "react": ["react@19.2.3", "", {}, "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA=="], - - "react-dom": ["react-dom@19.2.3", "", { "dependencies": { "scheduler": "^0.27.0" }, "peerDependencies": { "react": "^19.2.3" } }, "sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg=="], - - "react-refresh": ["react-refresh@0.18.0", "", {}, "sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw=="], - - "react-remove-scroll": ["react-remove-scroll@2.7.2", "", { "dependencies": { "react-remove-scroll-bar": "^2.3.7", "react-style-singleton": "^2.2.3", "tslib": "^2.1.0", "use-callback-ref": "^1.3.3", "use-sidecar": "^1.1.3" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Iqb9NjCCTt6Hf+vOdNIZGdTiH1QSqr27H/Ek9sv/a97gfueI/5h1s3yRi1nngzMUaOOToin5dI1dXKdXiF+u0Q=="], - - "react-remove-scroll-bar": ["react-remove-scroll-bar@2.3.8", "", { "dependencies": { "react-style-singleton": "^2.2.2", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" }, "optionalPeers": ["@types/react"] }, "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q=="], - - "react-style-singleton": ["react-style-singleton@2.2.3", "", { "dependencies": { "get-nonce": "^1.0.0", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ=="], - - "readdirp": ["readdirp@3.6.0", "", { "dependencies": { "picomatch": "^2.2.1" } }, "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA=="], - - "recast": ["recast@0.23.11", "", { "dependencies": { "ast-types": "^0.16.1", "esprima": "~4.0.0", "source-map": "~0.6.1", "tiny-invariant": "^1.3.3", "tslib": "^2.0.1" } }, "sha512-YTUo+Flmw4ZXiWfQKGcwwc11KnoRAYgzAE2E7mXKCjSviTKShtxBsN6YUUBB2gtaBzKzeKunxhUwNHQuRryhWA=="], - - "reflect.getprototypeof": ["reflect.getprototypeof@1.0.10", "", { "dependencies": { "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-abstract": "^1.23.9", "es-errors": "^1.3.0", "es-object-atoms": "^1.0.0", "get-intrinsic": "^1.2.7", "get-proto": "^1.0.1", "which-builtin-type": "^1.2.1" } }, "sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw=="], - - "reftools": ["reftools@1.1.9", "", {}, "sha512-OVede/NQE13xBQ+ob5CKd5KyeJYU2YInb1bmV4nRoOfquZPkAkxuOXicSe1PvqIuZZ4kD13sPKBbR7UFDmli6w=="], - - "regexp.prototype.flags": ["regexp.prototype.flags@1.5.4", "", { "dependencies": { "call-bind": "^1.0.8", "define-properties": "^1.2.1", "es-errors": "^1.3.0", "get-proto": "^1.0.1", "gopd": "^1.2.0", "set-function-name": "^2.0.2" } }, "sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA=="], - - "require-directory": ["require-directory@2.1.1", "", {}, "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q=="], - - "require-from-string": ["require-from-string@2.0.2", "", {}, "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw=="], - - "reselect": ["reselect@5.1.1", "", {}, "sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w=="], - - "resolve-from": ["resolve-from@4.0.0", "", {}, "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g=="], - - "resolve-pkg-maps": ["resolve-pkg-maps@1.0.0", "", {}, "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw=="], - - "restore-cursor": ["restore-cursor@5.1.0", "", { "dependencies": { "onetime": "^7.0.0", "signal-exit": "^4.1.0" } }, "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA=="], - - "rettime": ["rettime@0.7.0", "", {}, "sha512-LPRKoHnLKd/r3dVxcwO7vhCW+orkOGj9ViueosEBK6ie89CijnfRlhaDhHq/3Hxu4CkWQtxwlBG0mzTQY6uQjw=="], - - "reusify": ["reusify@1.1.0", "", {}, "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw=="], - - "rolldown": ["rolldown@1.0.0-beta.50", "", { "dependencies": { "@oxc-project/types": "=0.97.0", "@rolldown/pluginutils": "1.0.0-beta.50" }, "optionalDependencies": { "@rolldown/binding-android-arm64": "1.0.0-beta.50", "@rolldown/binding-darwin-arm64": "1.0.0-beta.50", "@rolldown/binding-darwin-x64": "1.0.0-beta.50", "@rolldown/binding-freebsd-x64": "1.0.0-beta.50", "@rolldown/binding-linux-arm-gnueabihf": "1.0.0-beta.50", "@rolldown/binding-linux-arm64-gnu": "1.0.0-beta.50", "@rolldown/binding-linux-arm64-musl": "1.0.0-beta.50", "@rolldown/binding-linux-x64-gnu": "1.0.0-beta.50", "@rolldown/binding-linux-x64-musl": "1.0.0-beta.50", "@rolldown/binding-openharmony-arm64": "1.0.0-beta.50", "@rolldown/binding-wasm32-wasi": "1.0.0-beta.50", "@rolldown/binding-win32-arm64-msvc": "1.0.0-beta.50", "@rolldown/binding-win32-ia32-msvc": "1.0.0-beta.50", "@rolldown/binding-win32-x64-msvc": "1.0.0-beta.50" }, "bin": { "rolldown": "bin/cli.mjs" } }, "sha512-JFULvCNl/anKn99eKjOSEubi0lLmNqQDAjyEMME2T4CwezUDL0i6t1O9xZsu2OMehPnV2caNefWpGF+8TnzB6A=="], - - "router": ["router@2.2.0", "", { "dependencies": { "debug": "^4.4.0", "depd": "^2.0.0", "is-promise": "^4.0.0", "parseurl": "^1.3.3", "path-to-regexp": "^8.0.0" } }, "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ=="], - - "run-applescript": ["run-applescript@7.1.0", "", {}, "sha512-DPe5pVFaAsinSaV6QjQ6gdiedWDcRCbUuiQfQa2wmWV7+xC9bGulGI8+TdRmoFkAPaBXk8CrAbnlY2ISniJ47Q=="], - - "run-parallel": ["run-parallel@1.2.0", "", { "dependencies": { "queue-microtask": "^1.2.2" } }, "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA=="], - - "safe-array-concat": ["safe-array-concat@1.1.3", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.2", "get-intrinsic": "^1.2.6", "has-symbols": "^1.1.0", "isarray": "^2.0.5" } }, "sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q=="], - - "safe-push-apply": ["safe-push-apply@1.0.0", "", { "dependencies": { "es-errors": "^1.3.0", "isarray": "^2.0.5" } }, "sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA=="], - - "safe-regex-test": ["safe-regex-test@1.1.0", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "is-regex": "^1.2.1" } }, "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw=="], - - "safe-stable-stringify": ["safe-stable-stringify@1.1.1", "", {}, "sha512-ERq4hUjKDbJfE4+XtZLFPCDi8Vb1JqaxAPTxWFLBx8XcAlf9Bda/ZJdVezs/NAfsMQScyIlUMx+Yeu7P7rx5jw=="], - - "safer-buffer": ["safer-buffer@2.1.2", "", {}, "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="], - - "scheduler": ["scheduler@0.27.0", "", {}, "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q=="], - - "semver": ["semver@6.3.1", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], - - "send": ["send@1.2.1", "", { "dependencies": { "debug": "^4.4.3", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "fresh": "^2.0.0", "http-errors": "^2.0.1", "mime-types": "^3.0.2", "ms": "^2.1.3", "on-finished": "^2.4.1", "range-parser": "^1.2.1", "statuses": "^2.0.2" } }, "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ=="], - - "seroval": ["seroval@1.4.1", "", {}, "sha512-9GOc+8T6LN4aByLN75uRvMbrwY5RDBW6lSlknsY4LEa9ZmWcxKcRe1G/Q3HZXjltxMHTrStnvrwAICxZrhldtg=="], - - "seroval-plugins": ["seroval-plugins@1.4.0", "", { "peerDependencies": { "seroval": "^1.0" } }, "sha512-zir1aWzoiax6pbBVjoYVd0O1QQXgIL3eVGBMsBsNmM8Ukq90yGaWlfx0AB9dTS8GPqrOrbXn79vmItCUP9U3BQ=="], - - "serve-static": ["serve-static@2.2.1", "", { "dependencies": { "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "parseurl": "^1.3.3", "send": "^1.2.0" } }, "sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw=="], - - "set-function-length": ["set-function-length@1.2.2", "", { "dependencies": { "define-data-property": "^1.1.4", "es-errors": "^1.3.0", "function-bind": "^1.1.2", "get-intrinsic": "^1.2.4", "gopd": "^1.0.1", "has-property-descriptors": "^1.0.2" } }, "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg=="], - - "set-function-name": ["set-function-name@2.0.2", "", { "dependencies": { "define-data-property": "^1.1.4", "es-errors": "^1.3.0", "functions-have-names": "^1.2.3", "has-property-descriptors": "^1.0.2" } }, "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ=="], - - "set-proto": ["set-proto@1.0.0", "", { "dependencies": { "dunder-proto": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.0.0" } }, "sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw=="], - - "setprototypeof": ["setprototypeof@1.2.0", "", {}, "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="], - - "shadcn": ["shadcn@3.6.2", "", { "dependencies": { "@antfu/ni": "^25.0.0", "@babel/core": "^7.28.0", "@babel/parser": "^7.28.0", "@babel/plugin-transform-typescript": "^7.28.0", "@babel/preset-typescript": "^7.27.1", "@dotenvx/dotenvx": "^1.48.4", "@modelcontextprotocol/sdk": "^1.17.2", "browserslist": "^4.26.2", "commander": "^14.0.0", "cosmiconfig": "^9.0.0", "dedent": "^1.6.0", "deepmerge": "^4.3.1", "diff": "^8.0.2", "execa": "^9.6.0", "fast-glob": "^3.3.3", "fs-extra": "^11.3.1", "fuzzysort": "^3.1.0", "https-proxy-agent": "^7.0.6", "kleur": "^4.1.5", "msw": "^2.10.4", "node-fetch": "^3.3.2", "open": "^11.0.0", "ora": "^8.2.0", "postcss": "^8.5.6", "postcss-selector-parser": "^7.1.0", "prompts": "^2.4.2", "recast": "^0.23.11", "stringify-object": "^5.0.0", "ts-morph": "^26.0.0", "tsconfig-paths": "^4.2.0", "zod": "^3.24.1", "zod-to-json-schema": "^3.24.6" }, "bin": { "shadcn": "dist/index.js" } }, "sha512-2g48/7UsXTSWMFU9GYww85AN5iVTkErbeycrcleI55R+atqW8HE1M/YDFyQ+0T3Bwsd4e8vycPu9gmwODunDpw=="], - - "shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="], - - "shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="], - - "should": ["should@13.2.3", "", { "dependencies": { "should-equal": "^2.0.0", "should-format": "^3.0.3", "should-type": "^1.4.0", "should-type-adaptors": "^1.0.1", "should-util": "^1.0.0" } }, "sha512-ggLesLtu2xp+ZxI+ysJTmNjh2U0TsC+rQ/pfED9bUZZ4DKefP27D+7YJVVTvKsmjLpIi9jAa7itwDGkDDmt1GQ=="], - - "should-equal": ["should-equal@2.0.0", "", { "dependencies": { "should-type": "^1.4.0" } }, "sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA=="], - - "should-format": ["should-format@3.0.3", "", { "dependencies": { "should-type": "^1.3.0", "should-type-adaptors": "^1.0.1" } }, "sha512-hZ58adtulAk0gKtua7QxevgUaXTTXxIi8t41L3zo9AHvjXO1/7sdLECuHeIN2SRtYXpNkmhoUP2pdeWgricQ+Q=="], - - "should-type": ["should-type@1.4.0", "", {}, "sha512-MdAsTu3n25yDbIe1NeN69G4n6mUnJGtSJHygX3+oN0ZbO3DTiATnf7XnYJdGT42JCXurTb1JI0qOBR65shvhPQ=="], - - "should-type-adaptors": ["should-type-adaptors@1.1.0", "", { "dependencies": { "should-type": "^1.3.0", "should-util": "^1.0.0" } }, "sha512-JA4hdoLnN+kebEp2Vs8eBe9g7uy0zbRo+RMcU0EsNy+R+k049Ki+N5tT5Jagst2g7EAja+euFuoXFCa8vIklfA=="], - - "should-util": ["should-util@1.0.1", "", {}, "sha512-oXF8tfxx5cDk8r2kYqlkUJzZpDBqVY/II2WhvU0n9Y3XYvAYRmeaf1PvvIvTgPnv4KJ+ES5M0PyDq5Jp+Ygy2g=="], - - "side-channel": ["side-channel@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3", "side-channel-list": "^1.0.0", "side-channel-map": "^1.0.1", "side-channel-weakmap": "^1.0.2" } }, "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw=="], - - "side-channel-list": ["side-channel-list@1.0.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3" } }, "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA=="], - - "side-channel-map": ["side-channel-map@1.0.1", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3" } }, "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA=="], - - "side-channel-weakmap": ["side-channel-weakmap@1.0.2", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3", "side-channel-map": "^1.0.1" } }, "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A=="], - - "signal-exit": ["signal-exit@3.0.7", "", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="], - - "simple-eval": ["simple-eval@1.0.1", "", { "dependencies": { "jsep": "^1.3.6" } }, "sha512-LH7FpTAkeD+y5xQC4fzS+tFtaNlvt3Ib1zKzvhjv/Y+cioV4zIuw4IZr2yhRLu67CWL7FR9/6KXKnjRoZTvGGQ=="], - - "sisteransi": ["sisteransi@1.0.5", "", {}, "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg=="], - - "slash": ["slash@3.0.0", "", {}, "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q=="], - - "solid-js": ["solid-js@1.9.10", "", { "dependencies": { "csstype": "^3.1.0", "seroval": "~1.3.0", "seroval-plugins": "~1.3.0" } }, "sha512-Coz956cos/EPDlhs6+jsdTxKuJDPT7B5SVIWgABwROyxjY7Xbr8wkzD68Et+NxnV7DLJ3nJdAC2r9InuV/4Jew=="], - - "sonner": ["sonner@2.0.7", "", { "peerDependencies": { "react": "^18.0.0 || ^19.0.0 || ^19.0.0-rc", "react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-rc" } }, "sha512-W6ZN4p58k8aDKA4XPcx2hpIQXBRAgyiWVkYhT7CvK6D3iAu7xjvVyhQHg2/iaKJZ1XVJ4r7XuwGL+WGEK37i9w=="], - - "source-map": ["source-map@0.7.6", "", {}, "sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ=="], - - "source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="], - - "statuses": ["statuses@2.0.2", "", {}, "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw=="], - - "stdin-discarder": ["stdin-discarder@0.2.2", "", {}, "sha512-UhDfHmA92YAlNnCfhmq0VeNL5bDbiZGg7sZ2IvPsXubGkiNa9EC+tUTsjBRsYUAz87btI6/1wf4XoVvQ3uRnmQ=="], - - "stop-iteration-iterator": ["stop-iteration-iterator@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "internal-slot": "^1.1.0" } }, "sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ=="], - - "strict-event-emitter": ["strict-event-emitter@0.5.1", "", {}, "sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ=="], - - "string-argv": ["string-argv@0.3.2", "", {}, "sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q=="], - - "string-width": ["string-width@7.2.0", "", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ=="], - - "string.prototype.trim": ["string.prototype.trim@1.2.10", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.2", "define-data-property": "^1.1.4", "define-properties": "^1.2.1", "es-abstract": "^1.23.5", "es-object-atoms": "^1.0.0", "has-property-descriptors": "^1.0.2" } }, "sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA=="], - - "string.prototype.trimend": ["string.prototype.trimend@1.0.9", "", { "dependencies": { "call-bind": "^1.0.8", "call-bound": "^1.0.2", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0" } }, "sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ=="], - - "string.prototype.trimstart": ["string.prototype.trimstart@1.0.8", "", { "dependencies": { "call-bind": "^1.0.7", "define-properties": "^1.2.1", "es-object-atoms": "^1.0.0" } }, "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg=="], - - "stringify-object": ["stringify-object@5.0.0", "", { "dependencies": { "get-own-enumerable-keys": "^1.0.0", "is-obj": "^3.0.0", "is-regexp": "^3.1.0" } }, "sha512-zaJYxz2FtcMb4f+g60KsRNFOpVMUyuJgA51Zi5Z1DOTC3S59+OQiVOzE9GZt0x72uBGWKsQIuBKeF9iusmKFsg=="], - - "strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], - - "strip-bom": ["strip-bom@3.0.0", "", {}, "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA=="], - - "strip-final-newline": ["strip-final-newline@2.0.0", "", {}, "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA=="], - - "strip-json-comments": ["strip-json-comments@3.1.1", "", {}, "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig=="], - - "supports-color": ["supports-color@7.2.0", "", { "dependencies": { "has-flag": "^4.0.0" } }, "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw=="], - - "swagger2openapi": ["swagger2openapi@7.0.8", "", { "dependencies": { "call-me-maybe": "^1.0.1", "node-fetch": "^2.6.1", "node-fetch-h2": "^2.3.0", "node-readfiles": "^0.2.0", "oas-kit-common": "^1.0.8", "oas-resolver": "^2.5.6", "oas-schema-walker": "^1.1.5", "oas-validator": "^5.0.8", "reftools": "^1.1.9", "yaml": "^1.10.0", "yargs": "^17.0.1" }, "bin": { "swagger2openapi": "swagger2openapi.js", "oas-validate": "oas-validate.js", "boast": "boast.js" } }, "sha512-upi/0ZGkYgEcLeGieoz8gT74oWHA0E7JivX7aN9mAf+Tc7BQoRBvnIGHoPDw+f9TXTW4s6kGYCZJtauP6OYp7g=="], - - "tabbable": ["tabbable@6.3.0", "", {}, "sha512-EIHvdY5bPLuWForiR/AN2Bxngzpuwn1is4asboytXtpTgsArc+WmSJKVLlhdh71u7jFcryDqB2A8lQvj78MkyQ=="], - - "tagged-tag": ["tagged-tag@1.0.0", "", {}, "sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng=="], - - "tailwind-merge": ["tailwind-merge@3.4.0", "", {}, "sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g=="], - - "tailwindcss": ["tailwindcss@4.1.18", "", {}, "sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw=="], - - "tapable": ["tapable@2.3.0", "", {}, "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg=="], - - "tiny-invariant": ["tiny-invariant@1.3.3", "", {}, "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg=="], - - "tiny-warning": ["tiny-warning@1.0.3", "", {}, "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA=="], - - "tinyexec": ["tinyexec@1.0.2", "", {}, "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg=="], - - "tinyglobby": ["tinyglobby@0.2.15", "", { "dependencies": { "fdir": "^6.5.0", "picomatch": "^4.0.3" } }, "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ=="], - - "tldts": ["tldts@7.0.19", "", { "dependencies": { "tldts-core": "^7.0.19" }, "bin": { "tldts": "bin/cli.js" } }, "sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA=="], - - "tldts-core": ["tldts-core@7.0.19", "", {}, "sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A=="], - - "to-regex-range": ["to-regex-range@5.0.1", "", { "dependencies": { "is-number": "^7.0.0" } }, "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ=="], - - "toidentifier": ["toidentifier@1.0.1", "", {}, "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA=="], - - "tough-cookie": ["tough-cookie@6.0.0", "", { "dependencies": { "tldts": "^7.0.5" } }, "sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w=="], - - "tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="], - - "ts-api-utils": ["ts-api-utils@2.1.0", "", { "peerDependencies": { "typescript": ">=4.8.4" } }, "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ=="], - - "ts-morph": ["ts-morph@26.0.0", "", { "dependencies": { "@ts-morph/common": "~0.27.0", "code-block-writer": "^13.0.3" } }, "sha512-ztMO++owQnz8c/gIENcM9XfCEzgoGphTv+nKpYNM1bgsdOVC/jRZuEBf6N+mLLDNg68Kl+GgUZfOySaRiG1/Ug=="], - - "tsconfck": ["tsconfck@2.1.2", "", { "peerDependencies": { "typescript": "^4.3.5 || ^5.0.0" }, "optionalPeers": ["typescript"], "bin": { "tsconfck": "bin/tsconfck.js" } }, "sha512-ghqN1b0puy3MhhviwO2kGF8SeMDNhEbnKxjK7h6+fvY9JAxqvXi8y5NAHSQv687OVboS2uZIByzGd45/YxrRHg=="], - - "tsconfig-paths": ["tsconfig-paths@4.2.0", "", { "dependencies": { "json5": "^2.2.2", "minimist": "^1.2.6", "strip-bom": "^3.0.0" } }, "sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg=="], - - "tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], - - "tsx": ["tsx@4.21.0", "", { "dependencies": { "esbuild": "~0.27.0", "get-tsconfig": "^4.7.5" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "bin": { "tsx": "dist/cli.mjs" } }, "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw=="], - - "tw-animate-css": ["tw-animate-css@1.4.0", "", {}, "sha512-7bziOlRqH0hJx80h/3mbicLW7o8qLsH5+RaLR2t+OHM3D0JlWGODQKQ4cxbK7WlvmUxpcj6Kgu6EKqjrGFe3QQ=="], - - "type-check": ["type-check@0.4.0", "", { "dependencies": { "prelude-ls": "^1.2.1" } }, "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew=="], - - "type-fest": ["type-fest@5.3.1", "", { "dependencies": { "tagged-tag": "^1.0.0" } }, "sha512-VCn+LMHbd4t6sF3wfU/+HKT63C9OoyrSIf4b+vtWHpt2U7/4InZG467YDNMFMR70DdHjAdpPWmw2lzRdg0Xqqg=="], - - "type-is": ["type-is@2.0.1", "", { "dependencies": { "content-type": "^1.0.5", "media-typer": "^1.1.0", "mime-types": "^3.0.0" } }, "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw=="], - - "typed-array-buffer": ["typed-array-buffer@1.0.3", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-typed-array": "^1.1.14" } }, "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw=="], - - "typed-array-byte-length": ["typed-array-byte-length@1.0.3", "", { "dependencies": { "call-bind": "^1.0.8", "for-each": "^0.3.3", "gopd": "^1.2.0", "has-proto": "^1.2.0", "is-typed-array": "^1.1.14" } }, "sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg=="], - - "typed-array-byte-offset": ["typed-array-byte-offset@1.0.4", "", { "dependencies": { "available-typed-arrays": "^1.0.7", "call-bind": "^1.0.8", "for-each": "^0.3.3", "gopd": "^1.2.0", "has-proto": "^1.2.0", "is-typed-array": "^1.1.15", "reflect.getprototypeof": "^1.0.9" } }, "sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ=="], - - "typed-array-length": ["typed-array-length@1.0.7", "", { "dependencies": { "call-bind": "^1.0.7", "for-each": "^0.3.3", "gopd": "^1.0.1", "is-typed-array": "^1.1.13", "possible-typed-array-names": "^1.0.0", "reflect.getprototypeof": "^1.0.6" } }, "sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg=="], - - "typedoc": ["typedoc@0.28.15", "", { "dependencies": { "@gerrit0/mini-shiki": "^3.17.0", "lunr": "^2.3.9", "markdown-it": "^14.1.0", "minimatch": "^9.0.5", "yaml": "^2.8.1" }, "peerDependencies": { "typescript": "5.0.x || 5.1.x || 5.2.x || 5.3.x || 5.4.x || 5.5.x || 5.6.x || 5.7.x || 5.8.x || 5.9.x" }, "bin": { "typedoc": "bin/typedoc" } }, "sha512-mw2/2vTL7MlT+BVo43lOsufkkd2CJO4zeOSuWQQsiXoV2VuEn7f6IZp2jsUDPmBMABpgR0R5jlcJ2OGEFYmkyg=="], - - "typedoc-plugin-coverage": ["typedoc-plugin-coverage@4.0.2", "", { "peerDependencies": { "typedoc": "0.28.x" } }, "sha512-mfn0e7NCqB8x2PfvhXrtmd7KWlsNf1+B2N9y8gR/jexXBLrXl/0e+b2HdG5HaTXGi7i0t2pyQY2VRmq7gtdEHQ=="], - - "typedoc-plugin-markdown": ["typedoc-plugin-markdown@4.9.0", "", { "peerDependencies": { "typedoc": "0.28.x" } }, "sha512-9Uu4WR9L7ZBgAl60N/h+jqmPxxvnC9nQAlnnO/OujtG2ubjnKTVUFY1XDhcMY+pCqlX3N2HsQM2QTYZIU9tJuw=="], - - "typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], - - "typescript-eslint": ["typescript-eslint@8.50.1", "", { "dependencies": { "@typescript-eslint/eslint-plugin": "8.50.1", "@typescript-eslint/parser": "8.50.1", "@typescript-eslint/typescript-estree": "8.50.1", "@typescript-eslint/utils": "8.50.1" }, "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" } }, "sha512-ytTHO+SoYSbhAH9CrYnMhiLx8To6PSSvqnvXyPUgPETCvB6eBKmTI9w6XMPS3HsBRGkwTVBX+urA8dYQx6bHfQ=="], - - "uc.micro": ["uc.micro@2.1.0", "", {}, "sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A=="], - - "unbox-primitive": ["unbox-primitive@1.1.0", "", { "dependencies": { "call-bound": "^1.0.3", "has-bigints": "^1.0.2", "has-symbols": "^1.1.0", "which-boxed-primitive": "^1.1.1" } }, "sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw=="], - - "undici-types": ["undici-types@7.16.0", "", {}, "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw=="], - - "unicorn-magic": ["unicorn-magic@0.3.0", "", {}, "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA=="], - - "universalify": ["universalify@2.0.1", "", {}, "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw=="], - - "unpipe": ["unpipe@1.0.0", "", {}, "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ=="], - - "unplugin": ["unplugin@2.3.11", "", { "dependencies": { "@jridgewell/remapping": "^2.3.5", "acorn": "^8.15.0", "picomatch": "^4.0.3", "webpack-virtual-modules": "^0.6.2" } }, "sha512-5uKD0nqiYVzlmCRs01Fhs2BdkEgBS3SAVP6ndrBsuK42iC2+JHyxM05Rm9G8+5mkmRtzMZGY8Ct5+mliZxU/Ww=="], - - "until-async": ["until-async@3.0.2", "", {}, "sha512-IiSk4HlzAMqTUseHHe3VhIGyuFmN90zMTpD3Z3y8jeQbzLIq500MVM7Jq2vUAnTKAFPJrqwkzr6PoTcPhGcOiw=="], - - "update-browserslist-db": ["update-browserslist-db@1.2.3", "", { "dependencies": { "escalade": "^3.2.0", "picocolors": "^1.1.1" }, "peerDependencies": { "browserslist": ">= 4.21.0" }, "bin": { "update-browserslist-db": "cli.js" } }, "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w=="], - - "uri-js": ["uri-js@4.4.1", "", { "dependencies": { "punycode": "^2.1.0" } }, "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg=="], - - "urijs": ["urijs@1.19.11", "", {}, "sha512-HXgFDgDommxn5/bIv0cnQZsPhHDA90NPHD6+c/v21U5+Sx5hoP8+dP9IZXBU1gIfvdRfhG8cel9QNPeionfcCQ=="], - - "use-callback-ref": ["use-callback-ref@1.3.3", "", { "dependencies": { "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg=="], - - "use-sidecar": ["use-sidecar@1.1.3", "", { "dependencies": { "detect-node-es": "^1.1.0", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ=="], - - "use-sync-external-store": ["use-sync-external-store@1.6.0", "", { "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w=="], - - "util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="], - - "utility-types": ["utility-types@3.11.0", "", {}, "sha512-6Z7Ma2aVEWisaL6TvBCy7P8rm2LQoPv6dJ7ecIaIixHcwfbJ0x7mWdbcwlIM5IGQxPZSFYeqRCqlOOeKoJYMkw=="], - - "validator": ["validator@13.15.26", "", {}, "sha512-spH26xU080ydGggxRyR1Yhcbgx+j3y5jbNXk/8L+iRvdIEQ4uTRH2Sgf2dokud6Q4oAtsbNvJ1Ft+9xmm6IZcA=="], - - "vary": ["vary@1.1.2", "", {}, "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg=="], - - "vite": ["rolldown-vite@7.2.5", "", { "dependencies": { "@oxc-project/runtime": "0.97.0", "fdir": "^6.5.0", "lightningcss": "^1.30.2", "picomatch": "^4.0.3", "postcss": "^8.5.6", "rolldown": "1.0.0-beta.50", "tinyglobby": "^0.2.15" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "peerDependencies": { "@types/node": "^20.19.0 || >=22.12.0", "esbuild": "^0.25.0", "jiti": ">=1.21.0", "less": "^4.0.0", "sass": "^1.70.0", "sass-embedded": "^1.70.0", "stylus": ">=0.54.8", "sugarss": "^5.0.0", "terser": "^5.16.0", "tsx": "^4.8.1", "yaml": "^2.4.2" }, "optionalPeers": ["@types/node", "esbuild", "jiti", "less", "sass", "sass-embedded", "stylus", "sugarss", "terser", "tsx", "yaml"], "bin": { "vite": "bin/vite.js" } }, "sha512-u09tdk/huMiN8xwoiBbig197jKdCamQTtOruSalOzbqGje3jdHiV0njQlAW0YvzoahkirFePNQ4RYlfnRQpXZA=="], - - "web-streams-polyfill": ["web-streams-polyfill@3.3.3", "", {}, "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw=="], - - "webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="], - - "webpack-virtual-modules": ["webpack-virtual-modules@0.6.2", "", {}, "sha512-66/V2i5hQanC51vBQKPH4aI8NMAcBW59FVBs+rC7eGHupMyfn34q7rZIE+ETlJ+XTevqfUhVVBgSUNSW2flEUQ=="], - - "whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="], - - "which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="], - - "which-boxed-primitive": ["which-boxed-primitive@1.1.1", "", { "dependencies": { "is-bigint": "^1.1.0", "is-boolean-object": "^1.2.1", "is-number-object": "^1.1.1", "is-string": "^1.1.1", "is-symbol": "^1.1.1" } }, "sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA=="], - - "which-builtin-type": ["which-builtin-type@1.2.1", "", { "dependencies": { "call-bound": "^1.0.2", "function.prototype.name": "^1.1.6", "has-tostringtag": "^1.0.2", "is-async-function": "^2.0.0", "is-date-object": "^1.1.0", "is-finalizationregistry": "^1.1.0", "is-generator-function": "^1.0.10", "is-regex": "^1.2.1", "is-weakref": "^1.0.2", "isarray": "^2.0.5", "which-boxed-primitive": "^1.1.0", "which-collection": "^1.0.2", "which-typed-array": "^1.1.16" } }, "sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q=="], - - "which-collection": ["which-collection@1.0.2", "", { "dependencies": { "is-map": "^2.0.3", "is-set": "^2.0.3", "is-weakmap": "^2.0.2", "is-weakset": "^2.0.3" } }, "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw=="], - - "which-typed-array": ["which-typed-array@1.1.19", "", { "dependencies": { "available-typed-arrays": "^1.0.7", "call-bind": "^1.0.8", "call-bound": "^1.0.4", "for-each": "^0.3.5", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-tostringtag": "^1.0.2" } }, "sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw=="], - - "word-wrap": ["word-wrap@1.2.5", "", {}, "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA=="], - - "wrap-ansi": ["wrap-ansi@6.2.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA=="], - - "wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="], - - "wsl-utils": ["wsl-utils@0.3.0", "", { "dependencies": { "is-wsl": "^3.1.0", "powershell-utils": "^0.1.0" } }, "sha512-3sFIGLiaDP7rTO4xh3g+b3AzhYDIUGGywE/WsmqzJWDxus5aJXVnPTNC/6L+r2WzrwXqVOdD262OaO+cEyPMSQ=="], - - "y18n": ["y18n@5.0.8", "", {}, "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA=="], - - "yallist": ["yallist@3.1.1", "", {}, "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="], - - "yaml": ["yaml@2.8.2", "", { "bin": { "yaml": "bin.mjs" } }, "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A=="], - - "yargs": ["yargs@17.7.2", "", { "dependencies": { "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", "string-width": "^4.2.3", "y18n": "^5.0.5", "yargs-parser": "^21.1.1" } }, "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w=="], - - "yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="], - - "yocto-queue": ["yocto-queue@0.1.0", "", {}, "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q=="], - - "yoctocolors": ["yoctocolors@2.1.2", "", {}, "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug=="], - - "yoctocolors-cjs": ["yoctocolors-cjs@2.1.3", "", {}, "sha512-U/PBtDf35ff0D8X8D0jfdzHYEPFxAI7jJlxZXwCSez5M3190m+QobIfh+sWDWSHMCWWJN2AWamkegn6vr6YBTw=="], - - "zod": ["zod@4.2.1", "", {}, "sha512-0wZ1IRqGGhMP76gLqz8EyfBXKk0J2qo2+H3fi4mcUP/KtTocoX08nmIAHl1Z2kJIZbZee8KOpBCSNPRgauucjw=="], - - "zod-to-json-schema": ["zod-to-json-schema@3.25.0", "", { "peerDependencies": { "zod": "^3.25 || ^4" } }, "sha512-HvWtU2UG41LALjajJrML6uQejQhNJx+JBO9IflpSja4R03iNWfKXrj6W2h7ljuLyc1nKS+9yDyL/9tD1U/yBnQ=="], - - "zod-validation-error": ["zod-validation-error@4.0.2", "", { "peerDependencies": { "zod": "^3.25.0 || ^4.0.0" } }, "sha512-Q6/nZLe6jxuU80qb/4uJ4t5v2VEZ44lzQjPDhYJNztRQ4wyWc6VF3D3Kb/fAuPetZQnhS3hnajCf9CsWesghLQ=="], - - "@apidevtools/swagger-parser/ajv": ["ajv@8.17.1", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g=="], - - "@dotenvx/dotenvx/commander": ["commander@11.1.0", "", {}, "sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ=="], - - "@dotenvx/dotenvx/which": ["which@4.0.0", "", { "dependencies": { "isexe": "^3.1.1" }, "bin": { "node-which": "bin/which.js" } }, "sha512-GlaYyEb07DPxYCKhKzplCWBJtvxZcZMrL+4UkrTSJHHPyZU4mYYTv3qaOe77H7EODLSSopAUFAc6W8U4yqvscg=="], - - "@eslint-community/eslint-utils/eslint-visitor-keys": ["eslint-visitor-keys@3.4.3", "", {}, "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag=="], - - "@eslint/eslintrc/globals": ["globals@14.0.0", "", {}, "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ=="], - - "@ibm-cloud/openapi-ruleset/minimatch": ["minimatch@6.2.0", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-sauLxniAmvnhhRjFwPNnJKaPFYyddAgbYdeUpHULtCT/GhzdCx/MDNy+Y40lBxTQUrMzDE8e0S43Z5uqfO0REg=="], - - "@inquirer/core/signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], - - "@modelcontextprotocol/sdk/ajv": ["ajv@8.17.1", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g=="], - - "@stoplight/better-ajv-errors/ajv": ["ajv@8.17.1", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g=="], - - "@stoplight/json-ref-readers/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="], - - "@stoplight/json-ref-readers/tslib": ["tslib@1.14.1", "", {}, "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="], - - "@stoplight/spectral-core/ajv": ["ajv@8.17.1", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g=="], - - "@stoplight/spectral-core/ajv-formats": ["ajv-formats@2.1.1", "", { "dependencies": { "ajv": "^8.0.0" } }, "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA=="], - - "@stoplight/spectral-functions/ajv": ["ajv@8.17.1", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g=="], - - "@stoplight/spectral-functions/ajv-formats": ["ajv-formats@2.1.1", "", { "dependencies": { "ajv": "^8.0.0" } }, "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA=="], - - "@stoplight/spectral-parsers/@stoplight/types": ["@stoplight/types@14.1.1", "", { "dependencies": { "@types/json-schema": "^7.0.4", "utility-types": "^3.10.0" } }, "sha512-/kjtr+0t0tjKr+heVfviO9FrU/uGLc+QNX3fHJc19xsCNYqU7lVhaXxDmEID9BZTjG+/r9pK9xP/xU02XGg65g=="], - - "@stoplight/spectral-rulesets/ajv": ["ajv@8.17.1", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g=="], - - "@stoplight/spectral-rulesets/ajv-formats": ["ajv-formats@2.1.1", "", { "dependencies": { "ajv": "^8.0.0" } }, "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA=="], - - "@stoplight/spectral-rulesets/json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="], - - "@stoplight/spectral-runtime/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="], - - "@stoplight/yaml/@stoplight/types": ["@stoplight/types@14.1.1", "", { "dependencies": { "@types/json-schema": "^7.0.4", "utility-types": "^3.10.0" } }, "sha512-/kjtr+0t0tjKr+heVfviO9FrU/uGLc+QNX3fHJc19xsCNYqU7lVhaXxDmEID9BZTjG+/r9pK9xP/xU02XGg65g=="], - - "@tailwindcss/oxide-wasm32-wasi/@emnapi/core": ["@emnapi/core@1.7.1", "", { "dependencies": { "@emnapi/wasi-threads": "1.1.0", "tslib": "^2.4.0" }, "bundled": true }, "sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg=="], - - "@tailwindcss/oxide-wasm32-wasi/@emnapi/runtime": ["@emnapi/runtime@1.7.1", "", { "dependencies": { "tslib": "^2.4.0" }, "bundled": true }, "sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA=="], - - "@tailwindcss/oxide-wasm32-wasi/@emnapi/wasi-threads": ["@emnapi/wasi-threads@1.1.0", "", { "dependencies": { "tslib": "^2.4.0" }, "bundled": true }, "sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ=="], - - "@tailwindcss/oxide-wasm32-wasi/@napi-rs/wasm-runtime": ["@napi-rs/wasm-runtime@1.1.0", "", { "dependencies": { "@emnapi/core": "^1.7.1", "@emnapi/runtime": "^1.7.1", "@tybys/wasm-util": "^0.10.1" }, "bundled": true }, "sha512-Fq6DJW+Bb5jaWE69/qOE0D1TUN9+6uWhCeZpdnSBk14pjLcCWR7Q8n49PTSPHazM37JqrsdpEthXy2xn6jWWiA=="], - - "@tailwindcss/oxide-wasm32-wasi/@tybys/wasm-util": ["@tybys/wasm-util@0.10.1", "", { "dependencies": { "tslib": "^2.4.0" }, "bundled": true }, "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg=="], - - "@tailwindcss/oxide-wasm32-wasi/tslib": ["tslib@2.8.1", "", { "bundled": true }, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], - - "@tanstack/form-core/@tanstack/store": ["@tanstack/store@0.7.7", "", {}, "sha512-xa6pTan1bcaqYDS9BDpSiS63qa6EoDkPN9RsRaxHuDdVDNntzq3xNwR5YKTU/V3SkSyC9T4YVOPh2zRQN0nhIQ=="], - - "@tanstack/router-generator/zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="], - - "@tanstack/router-plugin/zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="], - - "@ts-morph/common/minimatch": ["minimatch@10.1.1", "", { "dependencies": { "@isaacs/brace-expansion": "^5.0.0" } }, "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ=="], - - "@typescript-eslint/eslint-plugin/ignore": ["ignore@7.0.5", "", {}, "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg=="], - - "@typescript-eslint/typescript-estree/minimatch": ["minimatch@9.0.5", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow=="], - - "@typescript-eslint/typescript-estree/semver": ["semver@7.7.3", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q=="], - - "accepts/mime-types": ["mime-types@3.0.2", "", { "dependencies": { "mime-db": "^1.54.0" } }, "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A=="], - - "ajv-errors/ajv": ["ajv@8.17.1", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g=="], - - "ajv-formats/ajv": ["ajv@8.17.1", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2" } }, "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g=="], - - "anymatch/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], - - "chokidar/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], - - "cliui/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], - - "cliui/wrap-ansi": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="], - - "express/cookie": ["cookie@0.7.2", "", {}, "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w=="], - - "express/mime-types": ["mime-types@3.0.2", "", { "dependencies": { "mime-db": "^1.54.0" } }, "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A=="], - - "fast-glob/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], - - "log-symbols/chalk": ["chalk@5.6.2", "", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="], - - "log-symbols/is-unicode-supported": ["is-unicode-supported@1.3.0", "", {}, "sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ=="], - - "micromatch/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], - - "oas-linter/yaml": ["yaml@1.10.2", "", {}, "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg=="], - - "oas-resolver/yaml": ["yaml@1.10.2", "", {}, "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg=="], - - "oas-validator/yaml": ["yaml@1.10.2", "", {}, "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg=="], - - "ora/chalk": ["chalk@5.6.2", "", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="], - - "ora/strip-ansi": ["strip-ansi@7.1.2", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA=="], - - "orval/chokidar": ["chokidar@4.0.3", "", { "dependencies": { "readdirp": "^4.0.1" } }, "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA=="], - - "prompts/kleur": ["kleur@3.0.3", "", {}, "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w=="], - - "readdirp/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], - - "recast/source-map": ["source-map@0.6.1", "", {}, "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="], - - "restore-cursor/onetime": ["onetime@7.0.0", "", { "dependencies": { "mimic-function": "^5.0.0" } }, "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ=="], - - "restore-cursor/signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], - - "rolldown/@rolldown/pluginutils": ["@rolldown/pluginutils@1.0.0-beta.50", "", {}, "sha512-5e76wQiQVeL1ICOZVUg4LSOVYg9jyhGCin+icYozhsUzM+fHE7kddi1bdiE0jwVqTfkjba3jUFbEkoC9WkdvyA=="], - - "router/path-to-regexp": ["path-to-regexp@8.3.0", "", {}, "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA=="], - - "send/mime-types": ["mime-types@3.0.2", "", { "dependencies": { "mime-db": "^1.54.0" } }, "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A=="], - - "shadcn/execa": ["execa@9.6.1", "", { "dependencies": { "@sindresorhus/merge-streams": "^4.0.0", "cross-spawn": "^7.0.6", "figures": "^6.1.0", "get-stream": "^9.0.0", "human-signals": "^8.0.1", "is-plain-obj": "^4.1.0", "is-stream": "^4.0.1", "npm-run-path": "^6.0.0", "pretty-ms": "^9.2.0", "signal-exit": "^4.1.0", "strip-final-newline": "^4.0.0", "yoctocolors": "^2.1.1" } }, "sha512-9Be3ZoN4LmYR90tUoVu2te2BsbzHfhJyfEiAVfz7N5/zv+jduIfLrV2xdQXOHbaD6KgpGdO9PRPM1Y4Q9QkPkA=="], - - "shadcn/zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="], - - "solid-js/seroval": ["seroval@1.3.2", "", {}, "sha512-RbcPH1n5cfwKrru7v7+zrZvjLurgHhGyso3HTyGtRivGWgYjbOmGuivCQaORNELjNONoK35nj28EoWul9sb1zQ=="], - - "solid-js/seroval-plugins": ["seroval-plugins@1.3.3", "", { "peerDependencies": { "seroval": "^1.0" } }, "sha512-16OL3NnUBw8JG1jBLUoZJsLnQq0n5Ua6aHalhJK4fMQkz1lqR7Osz1sA30trBtd9VUDc2NgkuRCn8+/pBwqZ+w=="], - - "string-width/strip-ansi": ["strip-ansi@7.1.2", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA=="], - - "swagger2openapi/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="], - - "swagger2openapi/yaml": ["yaml@1.10.2", "", {}, "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg=="], - - "tsx/esbuild": ["esbuild@0.27.2", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.2", "@esbuild/android-arm": "0.27.2", "@esbuild/android-arm64": "0.27.2", "@esbuild/android-x64": "0.27.2", "@esbuild/darwin-arm64": "0.27.2", "@esbuild/darwin-x64": "0.27.2", "@esbuild/freebsd-arm64": "0.27.2", "@esbuild/freebsd-x64": "0.27.2", "@esbuild/linux-arm": "0.27.2", "@esbuild/linux-arm64": "0.27.2", "@esbuild/linux-ia32": "0.27.2", "@esbuild/linux-loong64": "0.27.2", "@esbuild/linux-mips64el": "0.27.2", "@esbuild/linux-ppc64": "0.27.2", "@esbuild/linux-riscv64": "0.27.2", "@esbuild/linux-s390x": "0.27.2", "@esbuild/linux-x64": "0.27.2", "@esbuild/netbsd-arm64": "0.27.2", "@esbuild/netbsd-x64": "0.27.2", "@esbuild/openbsd-arm64": "0.27.2", "@esbuild/openbsd-x64": "0.27.2", "@esbuild/openharmony-arm64": "0.27.2", "@esbuild/sunos-x64": "0.27.2", "@esbuild/win32-arm64": "0.27.2", "@esbuild/win32-ia32": "0.27.2", "@esbuild/win32-x64": "0.27.2" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw=="], - - "type-is/mime-types": ["mime-types@3.0.2", "", { "dependencies": { "mime-db": "^1.54.0" } }, "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A=="], - - "typedoc/minimatch": ["minimatch@9.0.5", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow=="], - - "wrap-ansi/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], - - "yargs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], - - "@apidevtools/swagger-parser/ajv/json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="], - - "@dotenvx/dotenvx/which/isexe": ["isexe@3.1.1", "", {}, "sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ=="], - - "@ibm-cloud/openapi-ruleset/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], - - "@modelcontextprotocol/sdk/ajv/json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="], - - "@stoplight/better-ajv-errors/ajv/json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="], - - "@stoplight/spectral-core/ajv/json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="], - - "@stoplight/spectral-functions/ajv/json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="], - - "@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], - - "accepts/mime-types/mime-db": ["mime-db@1.54.0", "", {}, "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ=="], - - "ajv-errors/ajv/json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="], - - "ajv-formats/ajv/json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="], - - "cliui/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], - - "express/mime-types/mime-db": ["mime-db@1.54.0", "", {}, "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ=="], - - "ora/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], - - "orval/chokidar/readdirp": ["readdirp@4.1.2", "", {}, "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg=="], - - "send/mime-types/mime-db": ["mime-db@1.54.0", "", {}, "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ=="], - - "shadcn/execa/get-stream": ["get-stream@9.0.1", "", { "dependencies": { "@sec-ant/readable-stream": "^0.4.1", "is-stream": "^4.0.1" } }, "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA=="], - - "shadcn/execa/human-signals": ["human-signals@8.0.1", "", {}, "sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ=="], - - "shadcn/execa/is-stream": ["is-stream@4.0.1", "", {}, "sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A=="], - - "shadcn/execa/npm-run-path": ["npm-run-path@6.0.0", "", { "dependencies": { "path-key": "^4.0.0", "unicorn-magic": "^0.3.0" } }, "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA=="], - - "shadcn/execa/signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], - - "shadcn/execa/strip-final-newline": ["strip-final-newline@4.0.0", "", {}, "sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw=="], - - "string-width/strip-ansi/ansi-regex": ["ansi-regex@6.2.2", "", {}, "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg=="], - - "tsx/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.2", "", { "os": "aix", "cpu": "ppc64" }, "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw=="], - - "tsx/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.27.2", "", { "os": "android", "cpu": "arm" }, "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA=="], - - "tsx/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.2", "", { "os": "android", "cpu": "arm64" }, "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA=="], - - "tsx/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.27.2", "", { "os": "android", "cpu": "x64" }, "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A=="], - - "tsx/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.2", "", { "os": "darwin", "cpu": "arm64" }, "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg=="], - - "tsx/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.2", "", { "os": "darwin", "cpu": "x64" }, "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA=="], - - "tsx/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.2", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g=="], - - "tsx/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.2", "", { "os": "freebsd", "cpu": "x64" }, "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA=="], - - "tsx/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.2", "", { "os": "linux", "cpu": "arm" }, "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw=="], - - "tsx/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.2", "", { "os": "linux", "cpu": "arm64" }, "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw=="], - - "tsx/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.2", "", { "os": "linux", "cpu": "ia32" }, "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w=="], - - "tsx/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg=="], - - "tsx/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw=="], - - "tsx/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.2", "", { "os": "linux", "cpu": "ppc64" }, "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ=="], - - "tsx/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.2", "", { "os": "linux", "cpu": "none" }, "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA=="], - - "tsx/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.2", "", { "os": "linux", "cpu": "s390x" }, "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w=="], - - "tsx/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.2", "", { "os": "linux", "cpu": "x64" }, "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA=="], - - "tsx/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.2", "", { "os": "none", "cpu": "arm64" }, "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw=="], - - "tsx/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.2", "", { "os": "none", "cpu": "x64" }, "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA=="], - - "tsx/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.2", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA=="], - - "tsx/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.2", "", { "os": "openbsd", "cpu": "x64" }, "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg=="], - - "tsx/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.2", "", { "os": "none", "cpu": "arm64" }, "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag=="], - - "tsx/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.2", "", { "os": "sunos", "cpu": "x64" }, "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg=="], - - "tsx/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.2", "", { "os": "win32", "cpu": "arm64" }, "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg=="], - - "tsx/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.2", "", { "os": "win32", "cpu": "ia32" }, "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ=="], - - "tsx/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.2", "", { "os": "win32", "cpu": "x64" }, "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ=="], - - "type-is/mime-types/mime-db": ["mime-db@1.54.0", "", {}, "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ=="], - - "typedoc/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], - - "wrap-ansi/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], - - "yargs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], - - "shadcn/execa/npm-run-path/path-key": ["path-key@4.0.0", "", {}, "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ=="], - } + "@antfu/ni": [ + "@antfu/ni@25.0.0", + "", + { + "dependencies": { + "ansis": "^4.0.0", + "fzf": "^0.5.2", + "package-manager-detector": "^1.3.0", + "tinyexec": "^1.0.1", + }, + "bin": { + "na": "bin/na.mjs", + "ni": "bin/ni.mjs", + "nr": "bin/nr.mjs", + "nci": "bin/nci.mjs", + "nlx": "bin/nlx.mjs", + "nun": "bin/nun.mjs", + "nup": "bin/nup.mjs", + }, + }, + "sha512-9q/yCljni37pkMr4sPrI3G4jqdIk074+iukc5aFJl7kmDCCsiJrbZ6zKxnES1Gwg+i9RcDZwvktl23puGslmvA==", + ], + + "@apidevtools/json-schema-ref-parser": [ + "@apidevtools/json-schema-ref-parser@14.0.1", + "", + { "dependencies": { "@types/json-schema": "^7.0.15", "js-yaml": "^4.1.0" } }, + "sha512-Oc96zvmxx1fqoSEdUmfmvvb59/KDOnUoJ7s2t7bISyAn0XEz57LCCw8k2Y4Pf3mwKaZLMciESALORLgfe2frCw==", + ], + + "@apidevtools/openapi-schemas": [ + "@apidevtools/openapi-schemas@2.1.0", + "", + {}, + "sha512-Zc1AlqrJlX3SlpupFGpiLi2EbteyP7fXmUOGup6/DnkRgjP9bgMM/ag+n91rsv0U1Gpz0H3VILA/o3bW7Ua6BQ==", + ], + + "@apidevtools/swagger-methods": [ + "@apidevtools/swagger-methods@3.0.2", + "", + {}, + "sha512-QAkD5kK2b1WfjDS/UQn/qQkbwF31uqRjPTrsCs5ZG9BQGAkjwvqGFjjPqAuzac/IYzpPtRzjCP1WrTuAIjMrXg==", + ], + + "@apidevtools/swagger-parser": [ + "@apidevtools/swagger-parser@12.1.0", + "", + { + "dependencies": { + "@apidevtools/json-schema-ref-parser": "14.0.1", + "@apidevtools/openapi-schemas": "^2.1.0", + "@apidevtools/swagger-methods": "^3.0.2", + "ajv": "^8.17.1", + "ajv-draft-04": "^1.0.0", + "call-me-maybe": "^1.0.2", + }, + "peerDependencies": { "openapi-types": ">=7" }, + }, + "sha512-e5mJoswsnAX0jG+J09xHFYQXb/bUc5S3pLpMxUuRUA2H8T2kni3yEoyz2R3Dltw5f4A6j6rPNMpWTK+iVDFlng==", + ], + + "@asyncapi/specs": [ + "@asyncapi/specs@6.10.0", + "", + { "dependencies": { "@types/json-schema": "^7.0.11" } }, + "sha512-vB5oKLsdrLUORIZ5BXortZTlVyGWWMC1Nud/0LtgxQ3Yn2738HigAD6EVqScvpPsDUI/bcLVsYEXN4dtXQHVng==", + ], + + "@babel/code-frame": [ + "@babel/code-frame@7.27.1", + "", + { + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1", + }, + }, + "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + ], + + "@babel/compat-data": [ + "@babel/compat-data@7.28.5", + "", + {}, + "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==", + ], + + "@babel/core": [ + "@babel/core@7.28.5", + "", + { + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.28.3", + "@babel/helpers": "^7.28.4", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1", + }, + }, + "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==", + ], + + "@babel/generator": [ + "@babel/generator@7.28.5", + "", + { + "dependencies": { + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2", + }, + }, + "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", + ], + + "@babel/helper-annotate-as-pure": [ + "@babel/helper-annotate-as-pure@7.27.3", + "", + { "dependencies": { "@babel/types": "^7.27.3" } }, + "sha512-fXSwMQqitTGeHLBC08Eq5yXz2m37E4pJX1qAU1+2cNedz/ifv/bVXft90VeSav5nFO61EcNgwr0aJxbyPaWBPg==", + ], + + "@babel/helper-compilation-targets": [ + "@babel/helper-compilation-targets@7.27.2", + "", + { + "dependencies": { + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1", + }, + }, + "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", + ], + + "@babel/helper-create-class-features-plugin": [ + "@babel/helper-create-class-features-plugin@7.28.5", + "", + { + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.3", + "@babel/helper-member-expression-to-functions": "^7.28.5", + "@babel/helper-optimise-call-expression": "^7.27.1", + "@babel/helper-replace-supers": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", + "@babel/traverse": "^7.28.5", + "semver": "^6.3.1", + }, + "peerDependencies": { "@babel/core": "^7.0.0" }, + }, + "sha512-q3WC4JfdODypvxArsJQROfupPBq9+lMwjKq7C33GhbFYJsufD0yd/ziwD+hJucLeWsnFPWZjsU2DNFqBPE7jwQ==", + ], + + "@babel/helper-globals": [ + "@babel/helper-globals@7.28.0", + "", + {}, + "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + ], + + "@babel/helper-member-expression-to-functions": [ + "@babel/helper-member-expression-to-functions@7.28.5", + "", + { "dependencies": { "@babel/traverse": "^7.28.5", "@babel/types": "^7.28.5" } }, + "sha512-cwM7SBRZcPCLgl8a7cY0soT1SptSzAlMH39vwiRpOQkJlh53r5hdHwLSCZpQdVLT39sZt+CRpNwYG4Y2v77atg==", + ], + + "@babel/helper-module-imports": [ + "@babel/helper-module-imports@7.27.1", + "", + { "dependencies": { "@babel/traverse": "^7.27.1", "@babel/types": "^7.27.1" } }, + "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + ], + + "@babel/helper-module-transforms": [ + "@babel/helper-module-transforms@7.28.3", + "", + { + "dependencies": { + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.28.3", + }, + "peerDependencies": { "@babel/core": "^7.0.0" }, + }, + "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==", + ], + + "@babel/helper-optimise-call-expression": [ + "@babel/helper-optimise-call-expression@7.27.1", + "", + { "dependencies": { "@babel/types": "^7.27.1" } }, + "sha512-URMGH08NzYFhubNSGJrpUEphGKQwMQYBySzat5cAByY1/YgIRkULnIy3tAMeszlL/so2HbeilYloUmSpd7GdVw==", + ], + + "@babel/helper-plugin-utils": [ + "@babel/helper-plugin-utils@7.27.1", + "", + {}, + "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", + ], + + "@babel/helper-replace-supers": [ + "@babel/helper-replace-supers@7.27.1", + "", + { + "dependencies": { + "@babel/helper-member-expression-to-functions": "^7.27.1", + "@babel/helper-optimise-call-expression": "^7.27.1", + "@babel/traverse": "^7.27.1", + }, + "peerDependencies": { "@babel/core": "^7.0.0" }, + }, + "sha512-7EHz6qDZc8RYS5ElPoShMheWvEgERonFCs7IAonWLLUTXW59DP14bCZt89/GKyreYn8g3S83m21FelHKbeDCKA==", + ], + + "@babel/helper-skip-transparent-expression-wrappers": [ + "@babel/helper-skip-transparent-expression-wrappers@7.27.1", + "", + { "dependencies": { "@babel/traverse": "^7.27.1", "@babel/types": "^7.27.1" } }, + "sha512-Tub4ZKEXqbPjXgWLl2+3JpQAYBJ8+ikpQ2Ocj/q/r0LwE3UhENh7EUabyHjz2kCEsrRY83ew2DQdHluuiDQFzg==", + ], + + "@babel/helper-string-parser": [ + "@babel/helper-string-parser@7.27.1", + "", + {}, + "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + ], + + "@babel/helper-validator-identifier": [ + "@babel/helper-validator-identifier@7.28.5", + "", + {}, + "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + ], + + "@babel/helper-validator-option": [ + "@babel/helper-validator-option@7.27.1", + "", + {}, + "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + ], + + "@babel/helpers": [ + "@babel/helpers@7.28.4", + "", + { "dependencies": { "@babel/template": "^7.27.2", "@babel/types": "^7.28.4" } }, + "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==", + ], + + "@babel/parser": [ + "@babel/parser@7.28.5", + "", + { "dependencies": { "@babel/types": "^7.28.5" }, "bin": "./bin/babel-parser.js" }, + "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", + ], + + "@babel/plugin-syntax-jsx": [ + "@babel/plugin-syntax-jsx@7.27.1", + "", + { + "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, + "peerDependencies": { "@babel/core": "^7.0.0-0" }, + }, + "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", + ], + + "@babel/plugin-syntax-typescript": [ + "@babel/plugin-syntax-typescript@7.27.1", + "", + { + "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, + "peerDependencies": { "@babel/core": "^7.0.0-0" }, + }, + "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", + ], + + "@babel/plugin-transform-modules-commonjs": [ + "@babel/plugin-transform-modules-commonjs@7.27.1", + "", + { + "dependencies": { + "@babel/helper-module-transforms": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + }, + "peerDependencies": { "@babel/core": "^7.0.0-0" }, + }, + "sha512-OJguuwlTYlN0gBZFRPqwOGNWssZjfIUdS7HMYtN8c1KmwpwHFBwTeFZrg9XZa+DFTitWOW5iTAG7tyCUPsCCyw==", + ], + + "@babel/plugin-transform-react-jsx-self": [ + "@babel/plugin-transform-react-jsx-self@7.27.1", + "", + { + "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, + "peerDependencies": { "@babel/core": "^7.0.0-0" }, + }, + "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + ], + + "@babel/plugin-transform-react-jsx-source": [ + "@babel/plugin-transform-react-jsx-source@7.27.1", + "", + { + "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, + "peerDependencies": { "@babel/core": "^7.0.0-0" }, + }, + "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + ], + + "@babel/plugin-transform-typescript": [ + "@babel/plugin-transform-typescript@7.28.5", + "", + { + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.3", + "@babel/helper-create-class-features-plugin": "^7.28.5", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", + "@babel/plugin-syntax-typescript": "^7.27.1", + }, + "peerDependencies": { "@babel/core": "^7.0.0-0" }, + }, + "sha512-x2Qa+v/CuEoX7Dr31iAfr0IhInrVOWZU/2vJMJ00FOR/2nM0BcBEclpaf9sWCDc+v5e9dMrhSH8/atq/kX7+bA==", + ], + + "@babel/preset-typescript": [ + "@babel/preset-typescript@7.28.5", + "", + { + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-validator-option": "^7.27.1", + "@babel/plugin-syntax-jsx": "^7.27.1", + "@babel/plugin-transform-modules-commonjs": "^7.27.1", + "@babel/plugin-transform-typescript": "^7.28.5", + }, + "peerDependencies": { "@babel/core": "^7.0.0-0" }, + }, + "sha512-+bQy5WOI2V6LJZpPVxY+yp66XdZ2yifu0Mc1aP5CQKgjn4QM5IN2i5fAZ4xKop47pr8rpVhiAeu+nDQa12C8+g==", + ], + + "@babel/runtime": [ + "@babel/runtime@7.28.4", + "", + {}, + "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==", + ], + + "@babel/template": [ + "@babel/template@7.27.2", + "", + { + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1", + }, + }, + "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + ], + + "@babel/traverse": [ + "@babel/traverse@7.28.5", + "", + { + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.5", + "debug": "^4.3.1", + }, + }, + "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", + ], + + "@babel/types": [ + "@babel/types@7.28.5", + "", + { + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5", + }, + }, + "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", + ], + + "@base-ui/react": [ + "@base-ui/react@1.0.0", + "", + { + "dependencies": { + "@babel/runtime": "^7.28.4", + "@base-ui/utils": "0.2.3", + "@floating-ui/react-dom": "^2.1.6", + "@floating-ui/utils": "^0.2.10", + "reselect": "^5.1.1", + "tabbable": "^6.3.0", + "use-sync-external-store": "^1.6.0", + }, + "peerDependencies": { + "@types/react": "^17 || ^18 || ^19", + "react": "^17 || ^18 || ^19", + "react-dom": "^17 || ^18 || ^19", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-4USBWz++DUSLTuIYpbYkSgy1F9ZmNG9S/lXvlUN6qMK0P0RlW+6eQmDUB4DgZ7HVvtXl4pvi4z5J2fv6Z3+9hg==", + ], + + "@base-ui/utils": [ + "@base-ui/utils@0.2.3", + "", + { + "dependencies": { + "@babel/runtime": "^7.28.4", + "@floating-ui/utils": "^0.2.10", + "reselect": "^5.1.1", + "use-sync-external-store": "^1.6.0", + }, + "peerDependencies": { + "@types/react": "^17 || ^18 || ^19", + "react": "^17 || ^18 || ^19", + "react-dom": "^17 || ^18 || ^19", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-/CguQ2PDaOzeVOkllQR8nocJ0FFIDqsWIcURsVmm53QGo8NhFNpePjNlyPIB41luxfOqnG7PU0xicMEw3ls7XQ==", + ], + + "@commander-js/extra-typings": [ + "@commander-js/extra-typings@14.0.0", + "", + { "peerDependencies": { "commander": "~14.0.0" } }, + "sha512-hIn0ncNaJRLkZrxBIp5AsW/eXEHNKYQBh0aPdoUqNgD+Io3NIykQqpKFyKcuasZhicGaEZJX/JBSIkZ4e5x8Dg==", + ], + + "@dotenvx/dotenvx": [ + "@dotenvx/dotenvx@1.51.2", + "", + { + "dependencies": { + "commander": "^11.1.0", + "dotenv": "^17.2.1", + "eciesjs": "^0.4.10", + "execa": "^5.1.1", + "fdir": "^6.2.0", + "ignore": "^5.3.0", + "object-treeify": "1.1.33", + "picomatch": "^4.0.2", + "which": "^4.0.0", + }, + "bin": { "dotenvx": "src/cli/dotenvx.js" }, + }, + "sha512-+693mNflujDZxudSEqSNGpn92QgFhJlBn9q2mDQ9yGWyHuz3hZ8B5g3EXCwdAz4DMJAI+OFCIbfEFZS+YRdrEA==", + ], + + "@ecies/ciphers": [ + "@ecies/ciphers@0.2.5", + "", + { "peerDependencies": { "@noble/ciphers": "^1.0.0" } }, + "sha512-GalEZH4JgOMHYYcYmVqnFirFsjZHeoGMDt9IxEnM9F7GRUUyUksJ7Ou53L83WHJq3RWKD3AcBpo0iQh0oMpf8A==", + ], + + "@emnapi/core": [ + "@emnapi/core@1.7.1", + "", + { "dependencies": { "@emnapi/wasi-threads": "1.1.0", "tslib": "^2.4.0" } }, + "sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg==", + ], + + "@emnapi/runtime": [ + "@emnapi/runtime@1.7.1", + "", + { "dependencies": { "tslib": "^2.4.0" } }, + "sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA==", + ], + + "@emnapi/wasi-threads": [ + "@emnapi/wasi-threads@1.1.0", + "", + { "dependencies": { "tslib": "^2.4.0" } }, + "sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==", + ], + + "@esbuild/aix-ppc64": [ + "@esbuild/aix-ppc64@0.25.12", + "", + { "os": "aix", "cpu": "ppc64" }, + "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==", + ], + + "@esbuild/android-arm": [ + "@esbuild/android-arm@0.25.12", + "", + { "os": "android", "cpu": "arm" }, + "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==", + ], + + "@esbuild/android-arm64": [ + "@esbuild/android-arm64@0.25.12", + "", + { "os": "android", "cpu": "arm64" }, + "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==", + ], + + "@esbuild/android-x64": [ + "@esbuild/android-x64@0.25.12", + "", + { "os": "android", "cpu": "x64" }, + "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==", + ], + + "@esbuild/darwin-arm64": [ + "@esbuild/darwin-arm64@0.25.12", + "", + { "os": "darwin", "cpu": "arm64" }, + "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==", + ], + + "@esbuild/darwin-x64": [ + "@esbuild/darwin-x64@0.25.12", + "", + { "os": "darwin", "cpu": "x64" }, + "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==", + ], + + "@esbuild/freebsd-arm64": [ + "@esbuild/freebsd-arm64@0.25.12", + "", + { "os": "freebsd", "cpu": "arm64" }, + "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==", + ], + + "@esbuild/freebsd-x64": [ + "@esbuild/freebsd-x64@0.25.12", + "", + { "os": "freebsd", "cpu": "x64" }, + "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==", + ], + + "@esbuild/linux-arm": [ + "@esbuild/linux-arm@0.25.12", + "", + { "os": "linux", "cpu": "arm" }, + "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==", + ], + + "@esbuild/linux-arm64": [ + "@esbuild/linux-arm64@0.25.12", + "", + { "os": "linux", "cpu": "arm64" }, + "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==", + ], + + "@esbuild/linux-ia32": [ + "@esbuild/linux-ia32@0.25.12", + "", + { "os": "linux", "cpu": "ia32" }, + "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==", + ], + + "@esbuild/linux-loong64": [ + "@esbuild/linux-loong64@0.25.12", + "", + { "os": "linux", "cpu": "none" }, + "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==", + ], + + "@esbuild/linux-mips64el": [ + "@esbuild/linux-mips64el@0.25.12", + "", + { "os": "linux", "cpu": "none" }, + "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==", + ], + + "@esbuild/linux-ppc64": [ + "@esbuild/linux-ppc64@0.25.12", + "", + { "os": "linux", "cpu": "ppc64" }, + "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==", + ], + + "@esbuild/linux-riscv64": [ + "@esbuild/linux-riscv64@0.25.12", + "", + { "os": "linux", "cpu": "none" }, + "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==", + ], + + "@esbuild/linux-s390x": [ + "@esbuild/linux-s390x@0.25.12", + "", + { "os": "linux", "cpu": "s390x" }, + "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==", + ], + + "@esbuild/linux-x64": [ + "@esbuild/linux-x64@0.25.12", + "", + { "os": "linux", "cpu": "x64" }, + "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==", + ], + + "@esbuild/netbsd-arm64": [ + "@esbuild/netbsd-arm64@0.25.12", + "", + { "os": "none", "cpu": "arm64" }, + "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==", + ], + + "@esbuild/netbsd-x64": [ + "@esbuild/netbsd-x64@0.25.12", + "", + { "os": "none", "cpu": "x64" }, + "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==", + ], + + "@esbuild/openbsd-arm64": [ + "@esbuild/openbsd-arm64@0.25.12", + "", + { "os": "openbsd", "cpu": "arm64" }, + "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==", + ], + + "@esbuild/openbsd-x64": [ + "@esbuild/openbsd-x64@0.25.12", + "", + { "os": "openbsd", "cpu": "x64" }, + "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==", + ], + + "@esbuild/openharmony-arm64": [ + "@esbuild/openharmony-arm64@0.25.12", + "", + { "os": "none", "cpu": "arm64" }, + "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==", + ], + + "@esbuild/sunos-x64": [ + "@esbuild/sunos-x64@0.25.12", + "", + { "os": "sunos", "cpu": "x64" }, + "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==", + ], + + "@esbuild/win32-arm64": [ + "@esbuild/win32-arm64@0.25.12", + "", + { "os": "win32", "cpu": "arm64" }, + "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==", + ], + + "@esbuild/win32-ia32": [ + "@esbuild/win32-ia32@0.25.12", + "", + { "os": "win32", "cpu": "ia32" }, + "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==", + ], + + "@esbuild/win32-x64": [ + "@esbuild/win32-x64@0.25.12", + "", + { "os": "win32", "cpu": "x64" }, + "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==", + ], + + "@eslint-community/eslint-utils": [ + "@eslint-community/eslint-utils@4.9.0", + "", + { + "dependencies": { "eslint-visitor-keys": "^3.4.3" }, + "peerDependencies": { "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" }, + }, + "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", + ], + + "@eslint-community/regexpp": [ + "@eslint-community/regexpp@4.12.2", + "", + {}, + "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + ], + + "@eslint/config-array": [ + "@eslint/config-array@0.21.1", + "", + { + "dependencies": { + "@eslint/object-schema": "^2.1.7", + "debug": "^4.3.1", + "minimatch": "^3.1.2", + }, + }, + "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", + ], + + "@eslint/config-helpers": [ + "@eslint/config-helpers@0.4.2", + "", + { "dependencies": { "@eslint/core": "^0.17.0" } }, + "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", + ], + + "@eslint/core": [ + "@eslint/core@0.17.0", + "", + { "dependencies": { "@types/json-schema": "^7.0.15" } }, + "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", + ], + + "@eslint/eslintrc": [ + "@eslint/eslintrc@3.3.3", + "", + { + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.1", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1", + }, + }, + "sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ==", + ], + + "@eslint/js": [ + "@eslint/js@9.39.2", + "", + {}, + "sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA==", + ], + + "@eslint/object-schema": [ + "@eslint/object-schema@2.1.7", + "", + {}, + "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", + ], + + "@eslint/plugin-kit": [ + "@eslint/plugin-kit@0.4.1", + "", + { "dependencies": { "@eslint/core": "^0.17.0", "levn": "^0.4.1" } }, + "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", + ], + + "@exodus/schemasafe": [ + "@exodus/schemasafe@1.3.0", + "", + {}, + "sha512-5Aap/GaRupgNx/feGBwLLTVv8OQFfv3pq2lPRzPg9R+IOBnDgghTGW7l7EuVXOvg5cc/xSAlRW8rBrjIC3Nvqw==", + ], + + "@faker-js/faker": [ + "@faker-js/faker@10.1.0", + "", + {}, + "sha512-C3mrr3b5dRVlKPJdfrAXS8+dq+rq8Qm5SNRazca0JKgw1HQERFmrVb0towvMmw5uu8hHKNiQasMaR/tydf3Zsg==", + ], + + "@floating-ui/core": [ + "@floating-ui/core@1.7.3", + "", + { "dependencies": { "@floating-ui/utils": "^0.2.10" } }, + "sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==", + ], + + "@floating-ui/dom": [ + "@floating-ui/dom@1.7.4", + "", + { "dependencies": { "@floating-ui/core": "^1.7.3", "@floating-ui/utils": "^0.2.10" } }, + "sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA==", + ], + + "@floating-ui/react-dom": [ + "@floating-ui/react-dom@2.1.6", + "", + { + "dependencies": { "@floating-ui/dom": "^1.7.4" }, + "peerDependencies": { "react": ">=16.8.0", "react-dom": ">=16.8.0" }, + }, + "sha512-4JX6rEatQEvlmgU80wZyq9RT96HZJa88q8hp0pBd+LrczeDI4o6uA2M+uvxngVHo4Ihr8uibXxH6+70zhAFrVw==", + ], + + "@floating-ui/utils": [ + "@floating-ui/utils@0.2.10", + "", + {}, + "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==", + ], + + "@fontsource-variable/jetbrains-mono": [ + "@fontsource-variable/jetbrains-mono@5.2.8", + "", + {}, + "sha512-WBA9elru6Jdp5df2mES55wuOO0WIrn3kpXnI4+W2ek5u3ZgLS9XS4gmIlcQhiZOWEKl95meYdvK7xI+ETLCq/Q==", + ], + + "@gerrit0/mini-shiki": [ + "@gerrit0/mini-shiki@3.20.0", + "", + { + "dependencies": { + "@shikijs/engine-oniguruma": "^3.20.0", + "@shikijs/langs": "^3.20.0", + "@shikijs/themes": "^3.20.0", + "@shikijs/types": "^3.20.0", + "@shikijs/vscode-textmate": "^10.0.2", + }, + }, + "sha512-Wa57i+bMpK6PGJZ1f2myxo3iO+K/kZikcyvH8NIqNNZhQUbDav7V9LQmWOXhf946mz5c1NZ19WMsGYiDKTryzQ==", + ], + + "@hono/node-server": [ + "@hono/node-server@1.19.7", + "", + { "peerDependencies": { "hono": "^4" } }, + "sha512-vUcD0uauS7EU2caukW8z5lJKtoGMokxNbJtBiwHgpqxEXokaHCBkQUmCHhjFB1VUTWdqj25QoMkMKzgjq+uhrw==", + ], + + "@humanfs/core": [ + "@humanfs/core@0.19.1", + "", + {}, + "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + ], + + "@humanfs/node": [ + "@humanfs/node@0.16.7", + "", + { "dependencies": { "@humanfs/core": "^0.19.1", "@humanwhocodes/retry": "^0.4.0" } }, + "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + ], + + "@humanwhocodes/module-importer": [ + "@humanwhocodes/module-importer@1.0.1", + "", + {}, + "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + ], + + "@humanwhocodes/retry": [ + "@humanwhocodes/retry@0.4.3", + "", + {}, + "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + ], + + "@ibm-cloud/openapi-ruleset": [ + "@ibm-cloud/openapi-ruleset@1.33.5", + "", + { + "dependencies": { + "@ibm-cloud/openapi-ruleset-utilities": "1.9.0", + "@stoplight/spectral-formats": "^1.8.2", + "@stoplight/spectral-functions": "^1.9.3", + "@stoplight/spectral-rulesets": "^1.21.3", + "chalk": "^4.1.2", + "inflected": "^2.1.0", + "jsonschema": "^1.5.0", + "lodash": "^4.17.21", + "loglevel": "^1.9.2", + "loglevel-plugin-prefix": "0.8.4", + "minimatch": "^6.2.0", + "validator": "^13.15.23", + }, + }, + "sha512-oT8USsTulFAA8FiBN0lA2rJqQI2lIt+HP2pdakGQXo3EviL2vqJTgpSCRwjl6mLJL158f1BVcdQUOEFGxomK3w==", + ], + + "@ibm-cloud/openapi-ruleset-utilities": [ + "@ibm-cloud/openapi-ruleset-utilities@1.9.0", + "", + {}, + "sha512-AoFbSarOqFBYH+1TZ9Ahkm2IWYSi5v0pBk88fpV+5b3qGJukypX8PwvCWADjuyIccKg48/F73a6hTTkBzDQ2UA==", + ], + + "@inquirer/ansi": [ + "@inquirer/ansi@1.0.2", + "", + {}, + "sha512-S8qNSZiYzFd0wAcyG5AXCvUHC5Sr7xpZ9wZ2py9XR88jUz8wooStVx5M6dRzczbBWjic9NP7+rY0Xi7qqK/aMQ==", + ], + + "@inquirer/confirm": [ + "@inquirer/confirm@5.1.21", + "", + { + "dependencies": { "@inquirer/core": "^10.3.2", "@inquirer/type": "^3.0.10" }, + "peerDependencies": { "@types/node": ">=18" }, + "optionalPeers": ["@types/node"], + }, + "sha512-KR8edRkIsUayMXV+o3Gv+q4jlhENF9nMYUZs9PA2HzrXeHI8M5uDag70U7RJn9yyiMZSbtF5/UexBtAVtZGSbQ==", + ], + + "@inquirer/core": [ + "@inquirer/core@10.3.2", + "", + { + "dependencies": { + "@inquirer/ansi": "^1.0.2", + "@inquirer/figures": "^1.0.15", + "@inquirer/type": "^3.0.10", + "cli-width": "^4.1.0", + "mute-stream": "^2.0.0", + "signal-exit": "^4.1.0", + "wrap-ansi": "^6.2.0", + "yoctocolors-cjs": "^2.1.3", + }, + "peerDependencies": { "@types/node": ">=18" }, + "optionalPeers": ["@types/node"], + }, + "sha512-43RTuEbfP8MbKzedNqBrlhhNKVwoK//vUFNW3Q3vZ88BLcrs4kYpGg+B2mm5p2K/HfygoCxuKwJJiv8PbGmE0A==", + ], + + "@inquirer/figures": [ + "@inquirer/figures@1.0.15", + "", + {}, + "sha512-t2IEY+unGHOzAaVM5Xx6DEWKeXlDDcNPeDyUpsRc6CUhBfU3VQOEl+Vssh7VNp1dR8MdUJBWhuObjXCsVpjN5g==", + ], + + "@inquirer/type": [ + "@inquirer/type@3.0.10", + "", + { "peerDependencies": { "@types/node": ">=18" }, "optionalPeers": ["@types/node"] }, + "sha512-BvziSRxfz5Ov8ch0z/n3oijRSEcEsHnhggm4xFZe93DHcUCTlutlq9Ox4SVENAfcRD22UQq7T/atg9Wr3k09eA==", + ], + + "@isaacs/balanced-match": [ + "@isaacs/balanced-match@4.0.1", + "", + {}, + "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==", + ], + + "@isaacs/brace-expansion": [ + "@isaacs/brace-expansion@5.0.0", + "", + { "dependencies": { "@isaacs/balanced-match": "^4.0.1" } }, + "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==", + ], + + "@jridgewell/gen-mapping": [ + "@jridgewell/gen-mapping@0.3.13", + "", + { + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24", + }, + }, + "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + ], + + "@jridgewell/remapping": [ + "@jridgewell/remapping@2.3.5", + "", + { + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24", + }, + }, + "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + ], + + "@jridgewell/resolve-uri": [ + "@jridgewell/resolve-uri@3.1.2", + "", + {}, + "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + ], + + "@jridgewell/sourcemap-codec": [ + "@jridgewell/sourcemap-codec@1.5.5", + "", + {}, + "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + ], + + "@jridgewell/trace-mapping": [ + "@jridgewell/trace-mapping@0.3.31", + "", + { + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14", + }, + }, + "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + ], + + "@jsep-plugin/assignment": [ + "@jsep-plugin/assignment@1.3.0", + "", + { "peerDependencies": { "jsep": "^0.4.0||^1.0.0" } }, + "sha512-VVgV+CXrhbMI3aSusQyclHkenWSAm95WaiKrMxRFam3JSUiIaQjoMIw2sEs/OX4XifnqeQUN4DYbJjlA8EfktQ==", + ], + + "@jsep-plugin/regex": [ + "@jsep-plugin/regex@1.0.4", + "", + { "peerDependencies": { "jsep": "^0.4.0||^1.0.0" } }, + "sha512-q7qL4Mgjs1vByCaTnDFcBnV9HS7GVPJX5vyVoCgZHNSC9rjwIlmbXG5sUuorR5ndfHAIlJ8pVStxvjXHbNvtUg==", + ], + + "@jsep-plugin/ternary": [ + "@jsep-plugin/ternary@1.1.4", + "", + { "peerDependencies": { "jsep": "^0.4.0||^1.0.0" } }, + "sha512-ck5wiqIbqdMX6WRQztBL7ASDty9YLgJ3sSAK5ZpBzXeySvFGCzIvM6UiAI4hTZ22fEcYQVV/zhUbNscggW+Ukg==", + ], + + "@mdx-js/mdx": [ + "@mdx-js/mdx@3.1.1", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdx": "^2.0.0", + "acorn": "^8.0.0", + "collapse-white-space": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-util-scope": "^1.0.0", + "estree-walker": "^3.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "markdown-extensions": "^2.0.0", + "recma-build-jsx": "^1.0.0", + "recma-jsx": "^1.0.0", + "recma-stringify": "^1.0.0", + "rehype-recma": "^1.0.0", + "remark-mdx": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "source-map": "^0.7.0", + "unified": "^11.0.0", + "unist-util-position-from-estree": "^2.0.0", + "unist-util-stringify-position": "^4.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0", + }, + }, + "sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ==", + ], + + "@mdx-js/rollup": [ + "@mdx-js/rollup@3.1.1", + "", + { + "dependencies": { + "@mdx-js/mdx": "^3.0.0", + "@rollup/pluginutils": "^5.0.0", + "source-map": "^0.7.0", + "vfile": "^6.0.0", + }, + "peerDependencies": { "rollup": ">=2" }, + }, + "sha512-v8satFmBB+DqDzYohnm1u2JOvxx6Hl3pUvqzJvfs2Zk/ngZ1aRUhsWpXvwPkNeGN9c2NCm/38H29ZqXQUjf8dw==", + ], + + "@modelcontextprotocol/sdk": [ + "@modelcontextprotocol/sdk@1.25.1", + "", + { + "dependencies": { + "@hono/node-server": "^1.19.7", + "ajv": "^8.17.1", + "ajv-formats": "^3.0.1", + "content-type": "^1.0.5", + "cors": "^2.8.5", + "cross-spawn": "^7.0.5", + "eventsource": "^3.0.2", + "eventsource-parser": "^3.0.0", + "express": "^5.0.1", + "express-rate-limit": "^7.5.0", + "jose": "^6.1.1", + "json-schema-typed": "^8.0.2", + "pkce-challenge": "^5.0.0", + "raw-body": "^3.0.0", + "zod": "^3.25 || ^4.0", + "zod-to-json-schema": "^3.25.0", + }, + "peerDependencies": { "@cfworker/json-schema": "^4.1.1" }, + "optionalPeers": ["@cfworker/json-schema"], + }, + "sha512-yO28oVFFC7EBoiKdAn+VqRm+plcfv4v0xp6osG/VsCB0NlPZWi87ajbCZZ8f/RvOFLEu7//rSRmuZZ7lMoe3gQ==", + ], + + "@mswjs/interceptors": [ + "@mswjs/interceptors@0.40.0", + "", + { + "dependencies": { + "@open-draft/deferred-promise": "^2.2.0", + "@open-draft/logger": "^0.3.0", + "@open-draft/until": "^2.0.0", + "is-node-process": "^1.2.0", + "outvariant": "^1.4.3", + "strict-event-emitter": "^0.5.1", + }, + }, + "sha512-EFd6cVbHsgLa6wa4RljGj6Wk75qoHxUSyc5asLyyPSyuhIcdS2Q3Phw6ImS1q+CkALthJRShiYfKANcQMuMqsQ==", + ], + + "@napi-rs/wasm-runtime": [ + "@napi-rs/wasm-runtime@1.1.0", + "", + { + "dependencies": { + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1", + "@tybys/wasm-util": "^0.10.1", + }, + }, + "sha512-Fq6DJW+Bb5jaWE69/qOE0D1TUN9+6uWhCeZpdnSBk14pjLcCWR7Q8n49PTSPHazM37JqrsdpEthXy2xn6jWWiA==", + ], + + "@noble/ciphers": [ + "@noble/ciphers@1.3.0", + "", + {}, + "sha512-2I0gnIVPtfnMw9ee9h1dJG7tp81+8Ob3OJb3Mv37rx5L40/b0i7djjCVvGOVqc9AEIQyvyu1i6ypKdFw8R8gQw==", + ], + + "@noble/curves": [ + "@noble/curves@1.9.7", + "", + { "dependencies": { "@noble/hashes": "1.8.0" } }, + "sha512-gbKGcRUYIjA3/zCCNaWDciTMFI0dCkvou3TL8Zmy5Nc7sJ47a0jtOeZoTaMxkuqRo9cRhjOdZJXegxYE5FN/xw==", + ], + + "@noble/hashes": [ + "@noble/hashes@1.8.0", + "", + {}, + "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==", + ], + + "@nodelib/fs.scandir": [ + "@nodelib/fs.scandir@2.1.5", + "", + { "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" } }, + "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + ], + + "@nodelib/fs.stat": [ + "@nodelib/fs.stat@2.0.5", + "", + {}, + "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + ], + + "@nodelib/fs.walk": [ + "@nodelib/fs.walk@1.2.8", + "", + { "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" } }, + "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + ], + + "@open-draft/deferred-promise": [ + "@open-draft/deferred-promise@2.2.0", + "", + {}, + "sha512-CecwLWx3rhxVQF6V4bAgPS5t+So2sTbPgAzafKkVizyi7tlwpcFpdFqq+wqF2OwNBmqFuu6tOyouTuxgpMfzmA==", + ], + + "@open-draft/logger": [ + "@open-draft/logger@0.3.0", + "", + { "dependencies": { "is-node-process": "^1.2.0", "outvariant": "^1.4.0" } }, + "sha512-X2g45fzhxH238HKO4xbSr7+wBS8Fvw6ixhTDuvLd5mqh6bJJCFAPwU9mPDxbcrRtfxv4u5IHCEH77BmxvXmmxQ==", + ], + + "@open-draft/until": [ + "@open-draft/until@2.1.0", + "", + {}, + "sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==", + ], + + "@opentelemetry/api": [ + "@opentelemetry/api@1.9.0", + "", + {}, + "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==", + ], + + "@opentelemetry/api-logs": [ + "@opentelemetry/api-logs@0.208.0", + "", + { "dependencies": { "@opentelemetry/api": "^1.3.0" } }, + "sha512-CjruKY9V6NMssL/T1kAFgzosF1v9o6oeN+aX5JB/C/xPNtmgIJqcXHG7fA82Ou1zCpWGl4lROQUKwUNE1pMCyg==", + ], + + "@opentelemetry/core": [ + "@opentelemetry/core@2.2.0", + "", + { + "dependencies": { "@opentelemetry/semantic-conventions": "^1.29.0" }, + "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" }, + }, + "sha512-FuabnnUm8LflnieVxs6eP7Z383hgQU4W1e3KJS6aOG3RxWxcHyBxH8fDMHNgu/gFx/M2jvTOW/4/PHhLz6bjWw==", + ], + + "@opentelemetry/exporter-logs-otlp-http": [ + "@opentelemetry/exporter-logs-otlp-http@0.208.0", + "", + { + "dependencies": { + "@opentelemetry/api-logs": "0.208.0", + "@opentelemetry/core": "2.2.0", + "@opentelemetry/otlp-exporter-base": "0.208.0", + "@opentelemetry/otlp-transformer": "0.208.0", + "@opentelemetry/sdk-logs": "0.208.0", + }, + "peerDependencies": { "@opentelemetry/api": "^1.3.0" }, + }, + "sha512-jOv40Bs9jy9bZVLo/i8FwUiuCvbjWDI+ZW13wimJm4LjnlwJxGgB+N/VWOZUTpM+ah/awXeQqKdNlpLf2EjvYg==", + ], + + "@opentelemetry/otlp-exporter-base": [ + "@opentelemetry/otlp-exporter-base@0.208.0", + "", + { + "dependencies": { + "@opentelemetry/core": "2.2.0", + "@opentelemetry/otlp-transformer": "0.208.0", + }, + "peerDependencies": { "@opentelemetry/api": "^1.3.0" }, + }, + "sha512-gMd39gIfVb2OgxldxUtOwGJYSH8P1kVFFlJLuut32L6KgUC4gl1dMhn+YC2mGn0bDOiQYSk/uHOdSjuKp58vvA==", + ], + + "@opentelemetry/otlp-transformer": [ + "@opentelemetry/otlp-transformer@0.208.0", + "", + { + "dependencies": { + "@opentelemetry/api-logs": "0.208.0", + "@opentelemetry/core": "2.2.0", + "@opentelemetry/resources": "2.2.0", + "@opentelemetry/sdk-logs": "0.208.0", + "@opentelemetry/sdk-metrics": "2.2.0", + "@opentelemetry/sdk-trace-base": "2.2.0", + "protobufjs": "^7.3.0", + }, + "peerDependencies": { "@opentelemetry/api": "^1.3.0" }, + }, + "sha512-DCFPY8C6lAQHUNkzcNT9R+qYExvsk6C5Bto2pbNxgicpcSWbe2WHShLxkOxIdNcBiYPdVHv/e7vH7K6TI+C+fQ==", + ], + + "@opentelemetry/resources": [ + "@opentelemetry/resources@2.5.1", + "", + { + "dependencies": { + "@opentelemetry/core": "2.5.1", + "@opentelemetry/semantic-conventions": "^1.29.0", + }, + "peerDependencies": { "@opentelemetry/api": ">=1.3.0 <1.10.0" }, + }, + "sha512-BViBCdE/GuXRlp9k7nS1w6wJvY5fnFX5XvuEtWsTAOQFIO89Eru7lGW3WbfbxtCuZ/GbrJfAziXG0w0dpxL7eQ==", + ], + + "@opentelemetry/sdk-logs": [ + "@opentelemetry/sdk-logs@0.208.0", + "", + { + "dependencies": { + "@opentelemetry/api-logs": "0.208.0", + "@opentelemetry/core": "2.2.0", + "@opentelemetry/resources": "2.2.0", + }, + "peerDependencies": { "@opentelemetry/api": ">=1.4.0 <1.10.0" }, + }, + "sha512-QlAyL1jRpOeaqx7/leG1vJMp84g0xKP6gJmfELBpnI4O/9xPX+Hu5m1POk9Kl+veNkyth5t19hRlN6tNY1sjbA==", + ], + + "@opentelemetry/sdk-metrics": [ + "@opentelemetry/sdk-metrics@2.2.0", + "", + { + "dependencies": { "@opentelemetry/core": "2.2.0", "@opentelemetry/resources": "2.2.0" }, + "peerDependencies": { "@opentelemetry/api": ">=1.9.0 <1.10.0" }, + }, + "sha512-G5KYP6+VJMZzpGipQw7Giif48h6SGQ2PFKEYCybeXJsOCB4fp8azqMAAzE5lnnHK3ZVwYQrgmFbsUJO/zOnwGw==", + ], + + "@opentelemetry/sdk-trace-base": [ + "@opentelemetry/sdk-trace-base@2.2.0", + "", + { + "dependencies": { + "@opentelemetry/core": "2.2.0", + "@opentelemetry/resources": "2.2.0", + "@opentelemetry/semantic-conventions": "^1.29.0", + }, + "peerDependencies": { "@opentelemetry/api": ">=1.3.0 <1.10.0" }, + }, + "sha512-xWQgL0Bmctsalg6PaXExmzdedSp3gyKV8mQBwK/j9VGdCDu2fmXIb2gAehBKbkXCpJ4HPkgv3QfoJWRT4dHWbw==", + ], + + "@opentelemetry/semantic-conventions": [ + "@opentelemetry/semantic-conventions@1.39.0", + "", + {}, + "sha512-R5R9tb2AXs2IRLNKLBJDynhkfmx7mX0vi8NkhZb3gUkPWHn6HXk5J8iQ/dql0U3ApfWym4kXXmBDRGO+oeOfjg==", + ], + + "@orval/angular": [ + "@orval/angular@7.17.2", + "", + { "dependencies": { "@orval/core": "7.17.2" } }, + "sha512-+WBILd8KJD/I8eyX2eyTmzhyY+oTvbCTV25YoF2/UzWaBIq0reIhe5i2J2vUBXFnhZDLfwJYn70v341y7NSPlQ==", + ], + + "@orval/axios": [ + "@orval/axios@7.17.2", + "", + { "dependencies": { "@orval/core": "7.17.2" } }, + "sha512-9wNxnEtNSI4S22V5ta4f5jaFAfrJ5PEcQwALP6ogsBxT0lUhzHWhRGWaopA2SJLW8eCGfPVo4fF/x/2i9ecG8w==", + ], + + "@orval/core": [ + "@orval/core@7.17.2", + "", + { + "dependencies": { + "@apidevtools/swagger-parser": "^12.1.0", + "@ibm-cloud/openapi-ruleset": "^1.33.1", + "@stoplight/spectral-core": "^1.20.0", + "acorn": "^8.15.0", + "chalk": "^4.1.2", + "compare-versions": "^6.1.1", + "debug": "^4.4.3", + "esbuild": "^0.25.11", + "esutils": "2.0.3", + "fs-extra": "^11.3.1", + "globby": "11.1.0", + "lodash.isempty": "^4.4.0", + "lodash.uniq": "^4.5.0", + "lodash.uniqby": "^4.7.0", + "lodash.uniqwith": "^4.5.0", + "micromatch": "^4.0.8", + "openapi3-ts": "4.5.0", + "swagger2openapi": "^7.0.8", + "typedoc": "^0.28.14", + }, + }, + "sha512-7kv7JgC6Va9hE/OiYTaJEU52Uy52y8Ill26QZufgP9yeMT60h3yiycF//LSQp83P+fGzcS9PLuCS4uRPmrMtgA==", + ], + + "@orval/fetch": [ + "@orval/fetch@7.17.2", + "", + { "dependencies": { "@orval/core": "7.17.2", "openapi3-ts": "4.5.0" } }, + "sha512-gBgk4POAacKs4c7VKCQPTkdLWvxF2s1p/EBKT1N6h2QKUxZcO7MbV141FQXFghFGFYgpuNiUTlox4BbSeCVrXw==", + ], + + "@orval/hono": [ + "@orval/hono@7.17.2", + "", + { + "dependencies": { + "@orval/core": "7.17.2", + "@orval/zod": "7.17.2", + "fs-extra": "^11.3.2", + "lodash.uniq": "^4.5.0", + "openapi3-ts": "4.5.0", + }, + }, + "sha512-YSTDYPysnD5LyugRKGSU4kZEMgK0HeLeZ7r1X/4u6U5FrRdhmjf+gjnCseBR61yKwS/8BLMogIe7qw8uOs83Hg==", + ], + + "@orval/mcp": [ + "@orval/mcp@7.17.2", + "", + { + "dependencies": { + "@orval/core": "7.17.2", + "@orval/fetch": "7.17.2", + "@orval/zod": "7.17.2", + "openapi3-ts": "4.5.0", + }, + }, + "sha512-7aJcZMSh2ZPHdcgz0JdilVTLcaMbPeEHJgpJTvSUZembgQvBwJYfalaaROfBmRH9nSfHP11DYmsuG5kTIfd3JA==", + ], + + "@orval/mock": [ + "@orval/mock@7.17.2", + "", + { "dependencies": { "@orval/core": "7.17.2", "openapi3-ts": "4.5.0" } }, + "sha512-otVzA/yR2lKQJRSIeVyuh+tl/uE/EJQ82oJg9Zu11S70QzxMTCg/IEe8fbTeE3APtoujhACB6aZ/qqbmK29ugw==", + ], + + "@orval/query": [ + "@orval/query@7.17.2", + "", + { + "dependencies": { + "@orval/core": "7.17.2", + "@orval/fetch": "7.17.2", + "chalk": "^4.1.2", + "lodash.omitby": "^4.6.0", + }, + }, + "sha512-dbD3U6CuRBYszMQOlitO4MryLnrlZIHwjNGsree0l5WO25e9QrfEMPxFU9wVaLnP/w10ODMBsBIBNUmy5ifXxQ==", + ], + + "@orval/swr": [ + "@orval/swr@7.17.2", + "", + { "dependencies": { "@orval/core": "7.17.2", "@orval/fetch": "7.17.2" } }, + "sha512-BWN7E41JEfr2qVFi+sNwPH3jXJgL4BiIAJTqZraZW0S89EmI4kU2yh3YveNmeopXai7mssaiiAlE8qe4wFcWnw==", + ], + + "@orval/zod": [ + "@orval/zod@7.17.2", + "", + { + "dependencies": { + "@orval/core": "7.17.2", + "lodash.uniq": "^4.5.0", + "openapi3-ts": "4.5.0", + }, + }, + "sha512-Zf2sAQAGZ99VzummF4/Tq5pbXe6qR+w37XmLX5bMokshT8lEuV9goT4gftW4AWGY3Azrp/Syj3fdbSBthN/KPw==", + ], + + "@oxc-project/runtime": [ + "@oxc-project/runtime@0.97.0", + "", + {}, + "sha512-yH0zw7z+jEws4dZ4IUKoix5Lh3yhqIJWF9Dc8PWvhpo7U7O+lJrv7ZZL4BeRO0la8LBQFwcCewtLBnVV7hPe/w==", + ], + + "@oxc-project/types": [ + "@oxc-project/types@0.97.0", + "", + {}, + "sha512-lxmZK4xFrdvU0yZiDwgVQTCvh2gHWBJCBk5ALsrtsBWhs0uDIi+FTOnXRQeQfs304imdvTdaakT/lqwQ8hkOXQ==", + ], + + "@posthog/core": [ + "@posthog/core@1.23.1", + "", + { "dependencies": { "cross-spawn": "^7.0.6" } }, + "sha512-GViD5mOv/mcbZcyzz3z9CS0R79JzxVaqEz4sP5Dsea178M/j3ZWe6gaHDZB9yuyGfcmIMQ/8K14yv+7QrK4sQQ==", + ], + + "@posthog/types": [ + "@posthog/types@1.351.1", + "", + {}, + "sha512-hdHQtgXmmInxvNpoKQR/XbigutXQ9/GNX2H9VkrCh0IWybRr8irPUY3EojTYWmukL/N0Kn616eNNQCGFuE1RaA==", + ], + + "@protobufjs/aspromise": [ + "@protobufjs/aspromise@1.1.2", + "", + {}, + "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", + ], + + "@protobufjs/base64": [ + "@protobufjs/base64@1.1.2", + "", + {}, + "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", + ], + + "@protobufjs/codegen": [ + "@protobufjs/codegen@2.0.4", + "", + {}, + "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==", + ], + + "@protobufjs/eventemitter": [ + "@protobufjs/eventemitter@1.1.0", + "", + {}, + "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", + ], + + "@protobufjs/fetch": [ + "@protobufjs/fetch@1.1.0", + "", + { "dependencies": { "@protobufjs/aspromise": "^1.1.1", "@protobufjs/inquire": "^1.1.0" } }, + "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + ], + + "@protobufjs/float": [ + "@protobufjs/float@1.0.2", + "", + {}, + "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", + ], + + "@protobufjs/inquire": [ + "@protobufjs/inquire@1.1.0", + "", + {}, + "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==", + ], + + "@protobufjs/path": [ + "@protobufjs/path@1.1.2", + "", + {}, + "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", + ], + + "@protobufjs/pool": [ + "@protobufjs/pool@1.1.0", + "", + {}, + "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", + ], + + "@protobufjs/utf8": [ + "@protobufjs/utf8@1.1.0", + "", + {}, + "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", + ], + + "@radix-ui/number": [ + "@radix-ui/number@1.1.1", + "", + {}, + "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g==", + ], + + "@radix-ui/primitive": [ + "@radix-ui/primitive@1.1.3", + "", + {}, + "sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==", + ], + + "@radix-ui/react-accessible-icon": [ + "@radix-ui/react-accessible-icon@1.1.7", + "", + { + "dependencies": { "@radix-ui/react-visually-hidden": "1.2.3" }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-XM+E4WXl0OqUJFovy6GjmxxFyx9opfCAIUku4dlKRd5YEPqt4kALOkQOp0Of6reHuUkJuiPBEc5k0o4z4lTC8A==", + ], + + "@radix-ui/react-accordion": [ + "@radix-ui/react-accordion@1.2.12", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collapsible": "1.1.12", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-T4nygeh9YE9dLRPhAHSeOZi7HBXo+0kYIPJXayZfvWOWA0+n3dESrZbjfDPUABkUNym6Hd+f2IR113To8D2GPA==", + ], + + "@radix-ui/react-alert-dialog": [ + "@radix-ui/react-alert-dialog@1.1.15", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dialog": "1.1.15", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-oTVLkEw5GpdRe29BqJ0LSDFWI3qu0vR1M0mUkOQWDIUnY/QIkLpgDMWuKxP94c2NAC2LGcgVhG1ImF3jkZ5wXw==", + ], + + "@radix-ui/react-arrow": [ + "@radix-ui/react-arrow@1.1.7", + "", + { + "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==", + ], + + "@radix-ui/react-aspect-ratio": [ + "@radix-ui/react-aspect-ratio@1.1.7", + "", + { + "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-Yq6lvO9HQyPwev1onK1daHCHqXVLzPhSVjmsNjCa2Zcxy2f7uJD2itDtxknv6FzAKCwD1qQkeVDmX/cev13n/g==", + ], + + "@radix-ui/react-avatar": [ + "@radix-ui/react-avatar@1.1.10", + "", + { + "dependencies": { + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-is-hydrated": "0.1.0", + "@radix-ui/react-use-layout-effect": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-V8piFfWapM5OmNCXTzVQY+E1rDa53zY+MQ4Y7356v4fFz6vqCyUtIz2rUD44ZEdwg78/jKmMJHj07+C/Z/rcog==", + ], + + "@radix-ui/react-checkbox": [ + "@radix-ui/react-checkbox@1.3.3", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-wBbpv+NQftHDdG86Qc0pIyXk5IR3tM8Vd0nWLKDcX8nNn4nXFOFwsKuqw2okA/1D/mpaAkmuyndrPJTYDNZtFw==", + ], + + "@radix-ui/react-collapsible": [ + "@radix-ui/react-collapsible@1.1.12", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-Uu+mSh4agx2ib1uIGPP4/CKNULyajb3p92LsVXmH2EHVMTfZWpll88XJ0j4W0z3f8NK1eYl1+Mf/szHPmcHzyA==", + ], + + "@radix-ui/react-collection": [ + "@radix-ui/react-collection@1.1.7", + "", + { + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==", + ], + + "@radix-ui/react-compose-refs": [ + "@radix-ui/react-compose-refs@1.1.2", + "", + { + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", + ], + + "@radix-ui/react-context": [ + "@radix-ui/react-context@1.1.2", + "", + { + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==", + ], + + "@radix-ui/react-context-menu": [ + "@radix-ui/react-context-menu@2.2.16", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-menu": "2.1.16", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-O8morBEW+HsVG28gYDZPTrT9UUovQUlJue5YO836tiTJhuIWBm/zQHc7j388sHWtdH/xUZurK9olD2+pcqx5ww==", + ], + + "@radix-ui/react-dialog": [ + "@radix-ui/react-dialog@1.1.15", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-TCglVRtzlffRNxRMEyR36DGBLJpeusFcgMVD9PZEzAKnUs1lKCgX5u9BmC2Yg+LL9MgZDugFFs1Vl+Jp4t/PGw==", + ], + + "@radix-ui/react-direction": [ + "@radix-ui/react-direction@1.1.1", + "", + { + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw==", + ], + + "@radix-ui/react-dismissable-layer": [ + "@radix-ui/react-dismissable-layer@1.1.11", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-escape-keydown": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==", + ], + + "@radix-ui/react-dropdown-menu": [ + "@radix-ui/react-dropdown-menu@2.1.16", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-menu": "2.1.16", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-1PLGQEynI/3OX/ftV54COn+3Sud/Mn8vALg2rWnBLnRaGtJDduNW/22XjlGgPdpcIbiQxjKtb7BkcjP00nqfJw==", + ], + + "@radix-ui/react-focus-guards": [ + "@radix-ui/react-focus-guards@1.1.3", + "", + { + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-0rFg/Rj2Q62NCm62jZw0QX7a3sz6QCQU0LpZdNrJX8byRGaGVTqbrW9jAoIAHyMQqsNpeZ81YgSizOt5WXq0Pw==", + ], + + "@radix-ui/react-focus-scope": [ + "@radix-ui/react-focus-scope@1.1.7", + "", + { + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", + ], + + "@radix-ui/react-form": [ + "@radix-ui/react-form@0.1.8", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-label": "2.1.7", + "@radix-ui/react-primitive": "2.1.3", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-QM70k4Zwjttifr5a4sZFts9fn8FzHYvQ5PiB19O2HsYibaHSVt9fH9rzB0XZo/YcM+b7t/p7lYCT/F5eOeF5yQ==", + ], + + "@radix-ui/react-hover-card": [ + "@radix-ui/react-hover-card@1.1.15", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-qgTkjNT1CfKMoP0rcasmlH2r1DAiYicWsDsufxl940sT2wHNEWWv6FMWIQXWhVdmC1d/HYfbhQx60KYyAtKxjg==", + ], + + "@radix-ui/react-id": [ + "@radix-ui/react-id@1.1.1", + "", + { + "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==", + ], + + "@radix-ui/react-label": [ + "@radix-ui/react-label@2.1.7", + "", + { + "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-YT1GqPSL8kJn20djelMX7/cTRp/Y9w5IZHvfxQTVHrOqa2yMl7i/UfMqKRU5V7mEyKTrUVgJXhNQPVCG8PBLoQ==", + ], + + "@radix-ui/react-menu": [ + "@radix-ui/react-menu@2.1.16", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-72F2T+PLlphrqLcAotYPp0uJMr5SjP5SL01wfEspJbru5Zs5vQaSHb4VB3ZMJPimgHHCHG7gMOeOB9H3Hdmtxg==", + ], + + "@radix-ui/react-menubar": [ + "@radix-ui/react-menubar@1.1.16", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-menu": "2.1.16", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-EB1FktTz5xRRi2Er974AUQZWg2yVBb1yjip38/lgwtCVRd3a+maUoGHN/xs9Yv8SY8QwbSEb+YrxGadVWbEutA==", + ], + + "@radix-ui/react-navigation-menu": [ + "@radix-ui/react-navigation-menu@1.2.14", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-YB9mTFQvCOAQMHU+C/jVl96WmuWeltyUEpRJJky51huhds5W2FQr1J8D/16sQlf0ozxkPK8uF3niQMdUwZPv5w==", + ], + + "@radix-ui/react-one-time-password-field": [ + "@radix-ui/react-one-time-password-field@0.1.8", + "", + { + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-is-hydrated": "0.1.0", + "@radix-ui/react-use-layout-effect": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-ycS4rbwURavDPVjCb5iS3aG4lURFDILi6sKI/WITUMZ13gMmn/xGjpLoqBAalhJaDk8I3UbCM5GzKHrnzwHbvg==", + ], + + "@radix-ui/react-password-toggle-field": [ + "@radix-ui/react-password-toggle-field@0.1.3", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-is-hydrated": "0.1.0", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-/UuCrDBWravcaMix4TdT+qlNdVwOM1Nck9kWx/vafXsdfj1ChfhOdfi3cy9SGBpWgTXwYCuboT/oYpJy3clqfw==", + ], + + "@radix-ui/react-popover": [ + "@radix-ui/react-popover@1.1.15", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-kr0X2+6Yy/vJzLYJUPCZEc8SfQcf+1COFoAqauJm74umQhta9M7lNJHP7QQS3vkvcGLQUbWpMzwrXYwrYztHKA==", + ], + + "@radix-ui/react-popper": [ + "@radix-ui/react-popper@1.2.8", + "", + { + "dependencies": { + "@floating-ui/react-dom": "^2.0.0", + "@radix-ui/react-arrow": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-rect": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + "@radix-ui/rect": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==", + ], + + "@radix-ui/react-portal": [ + "@radix-ui/react-portal@1.1.9", + "", + { + "dependencies": { + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-layout-effect": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", + ], + + "@radix-ui/react-presence": [ + "@radix-ui/react-presence@1.1.5", + "", + { + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==", + ], + + "@radix-ui/react-primitive": [ + "@radix-ui/react-primitive@2.1.3", + "", + { + "dependencies": { "@radix-ui/react-slot": "1.2.3" }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", + ], + + "@radix-ui/react-progress": [ + "@radix-ui/react-progress@1.1.7", + "", + { + "dependencies": { + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-vPdg/tF6YC/ynuBIJlk1mm7Le0VgW6ub6J2UWnTQ7/D23KXcPI1qy+0vBkgKgd38RCMJavBXpB83HPNFMTb0Fg==", + ], + + "@radix-ui/react-radio-group": [ + "@radix-ui/react-radio-group@1.3.8", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-VBKYIYImA5zsxACdisNQ3BjCBfmbGH3kQlnFVqlWU4tXwjy7cGX8ta80BcrO+WJXIn5iBylEH3K6ZTlee//lgQ==", + ], + + "@radix-ui/react-roving-focus": [ + "@radix-ui/react-roving-focus@1.1.11", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-7A6S9jSgm/S+7MdtNDSb+IU859vQqJ/QAtcYQcfFC6W8RS4IxIZDldLR0xqCFZ6DCyrQLjLPsxtTNch5jVA4lA==", + ], + + "@radix-ui/react-scroll-area": [ + "@radix-ui/react-scroll-area@1.2.10", + "", + { + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-layout-effect": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-tAXIa1g3sM5CGpVT0uIbUx/U3Gs5N8T52IICuCtObaos1S8fzsrPXG5WObkQN3S6NVl6wKgPhAIiBGbWnvc97A==", + ], + + "@radix-ui/react-select": [ + "@radix-ui/react-select@2.2.6", + "", + { + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3", + "aria-hidden": "^1.2.4", + "react-remove-scroll": "^2.6.3", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-I30RydO+bnn2PQztvo25tswPH+wFBjehVGtmagkU78yMdwTwVf12wnAOF+AeP8S2N8xD+5UPbGhkUfPyvT+mwQ==", + ], + + "@radix-ui/react-separator": [ + "@radix-ui/react-separator@1.1.7", + "", + { + "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-0HEb8R9E8A+jZjvmFCy/J4xhbXy3TV+9XSnGJ3KvTtjlIUy/YQ/p6UYZvi7YbeoeXdyU9+Y3scizK6hkY37baA==", + ], + + "@radix-ui/react-slider": [ + "@radix-ui/react-slider@1.3.6", + "", + { + "dependencies": { + "@radix-ui/number": "1.1.1", + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-JPYb1GuM1bxfjMRlNLE+BcmBC8onfCi60Blk7OBqi2MLTFdS+8401U4uFjnwkOr49BLmXxLC6JHkvAsx5OJvHw==", + ], + + "@radix-ui/react-slot": [ + "@radix-ui/react-slot@1.2.3", + "", + { + "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", + ], + + "@radix-ui/react-switch": [ + "@radix-ui/react-switch@1.2.6", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-bByzr1+ep1zk4VubeEVViV592vu2lHE2BZY5OnzehZqOOgogN80+mNtCqPkhn2gklJqOpxWgPoYTSnhBCqpOXQ==", + ], + + "@radix-ui/react-tabs": [ + "@radix-ui/react-tabs@1.1.13", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-use-controllable-state": "1.2.2", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-7xdcatg7/U+7+Udyoj2zodtI9H/IIopqo+YOIcZOq1nJwXWBZ9p8xiu5llXlekDbZkca79a/fozEYQXIA4sW6A==", + ], + + "@radix-ui/react-toast": [ + "@radix-ui/react-toast@1.2.15", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-3OSz3TacUWy4WtOXV38DggwxoqJK4+eDkNMl5Z/MJZaoUPaP4/9lf81xXMe1I2ReTAptverZUpbPY4wWwWyL5g==", + ], + + "@radix-ui/react-toggle": [ + "@radix-ui/react-toggle@1.1.10", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-lS1odchhFTeZv3xwHH31YPObmJn8gOg7Lq12inrr0+BH/l3Tsq32VfjqH1oh80ARM3mlkfMic15n0kg4sD1poQ==", + ], + + "@radix-ui/react-toggle-group": [ + "@radix-ui/react-toggle-group@1.1.11", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-toggle": "1.1.10", + "@radix-ui/react-use-controllable-state": "1.2.2", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-5umnS0T8JQzQT6HbPyO7Hh9dgd82NmS36DQr+X/YJ9ctFNCiiQd6IJAYYZ33LUwm8M+taCz5t2ui29fHZc4Y6Q==", + ], + + "@radix-ui/react-toolbar": [ + "@radix-ui/react-toolbar@1.1.11", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-separator": "1.1.7", + "@radix-ui/react-toggle-group": "1.1.11", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-4ol06/1bLoFu1nwUqzdD4Y5RZ9oDdKeiHIsntug54Hcr1pgaHiPqHFEaXI1IFP/EsOfROQZ8Mig9VTIRza6Tjg==", + ], + + "@radix-ui/react-tooltip": [ + "@radix-ui/react-tooltip@1.2.8", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-visually-hidden": "1.2.3", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-tY7sVt1yL9ozIxvmbtN5qtmH2krXcBCfjEiCgKGLqunJHvgvZG2Pcl2oQ3kbcZARb1BGEHdkLzcYGO8ynVlieg==", + ], + + "@radix-ui/react-use-callback-ref": [ + "@radix-ui/react-use-callback-ref@1.1.1", + "", + { + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==", + ], + + "@radix-ui/react-use-controllable-state": [ + "@radix-ui/react-use-controllable-state@1.2.2", + "", + { + "dependencies": { + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-layout-effect": "1.1.1", + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==", + ], + + "@radix-ui/react-use-effect-event": [ + "@radix-ui/react-use-effect-event@0.0.2", + "", + { + "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==", + ], + + "@radix-ui/react-use-escape-keydown": [ + "@radix-ui/react-use-escape-keydown@1.1.1", + "", + { + "dependencies": { "@radix-ui/react-use-callback-ref": "1.1.1" }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==", + ], + + "@radix-ui/react-use-is-hydrated": [ + "@radix-ui/react-use-is-hydrated@0.1.0", + "", + { + "dependencies": { "use-sync-external-store": "^1.5.0" }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-U+UORVEq+cTnRIaostJv9AGdV3G6Y+zbVd+12e18jQ5A3c0xL03IhnHuiU4UV69wolOQp5GfR58NW/EgdQhwOA==", + ], + + "@radix-ui/react-use-layout-effect": [ + "@radix-ui/react-use-layout-effect@1.1.1", + "", + { + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==", + ], + + "@radix-ui/react-use-previous": [ + "@radix-ui/react-use-previous@1.1.1", + "", + { + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ==", + ], + + "@radix-ui/react-use-rect": [ + "@radix-ui/react-use-rect@1.1.1", + "", + { + "dependencies": { "@radix-ui/rect": "1.1.1" }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==", + ], + + "@radix-ui/react-use-size": [ + "@radix-ui/react-use-size@1.1.1", + "", + { + "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==", + ], + + "@radix-ui/react-visually-hidden": [ + "@radix-ui/react-visually-hidden@1.2.3", + "", + { + "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==", + ], + + "@radix-ui/rect": [ + "@radix-ui/rect@1.1.1", + "", + {}, + "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==", + ], + + "@rolldown/binding-android-arm64": [ + "@rolldown/binding-android-arm64@1.0.0-beta.50", + "", + { "os": "android", "cpu": "arm64" }, + "sha512-XlEkrOIHLyGT3avOgzfTFSjG+f+dZMw+/qd+Y3HLN86wlndrB/gSimrJCk4gOhr1XtRtEKfszpadI3Md4Z4/Ag==", + ], + + "@rolldown/binding-darwin-arm64": [ + "@rolldown/binding-darwin-arm64@1.0.0-beta.50", + "", + { "os": "darwin", "cpu": "arm64" }, + "sha512-+JRqKJhoFlt5r9q+DecAGPLZ5PxeLva+wCMtAuoFMWPoZzgcYrr599KQ+Ix0jwll4B4HGP43avu9My8KtSOR+w==", + ], + + "@rolldown/binding-darwin-x64": [ + "@rolldown/binding-darwin-x64@1.0.0-beta.50", + "", + { "os": "darwin", "cpu": "x64" }, + "sha512-fFXDjXnuX7/gQZQm/1FoivVtRcyAzdjSik7Eo+9iwPQ9EgtA5/nB2+jmbzaKtMGG3q+BnZbdKHCtOacmNrkIDA==", + ], + + "@rolldown/binding-freebsd-x64": [ + "@rolldown/binding-freebsd-x64@1.0.0-beta.50", + "", + { "os": "freebsd", "cpu": "x64" }, + "sha512-F1b6vARy49tjmT/hbloplzgJS7GIvwWZqt+tAHEstCh0JIh9sa8FAMVqEmYxDviqKBaAI8iVvUREm/Kh/PD26Q==", + ], + + "@rolldown/binding-linux-arm-gnueabihf": [ + "@rolldown/binding-linux-arm-gnueabihf@1.0.0-beta.50", + "", + { "os": "linux", "cpu": "arm" }, + "sha512-U6cR76N8T8M6lHj7EZrQ3xunLPxSvYYxA8vJsBKZiFZkT8YV4kjgCO3KwMJL0NOjQCPGKyiXO07U+KmJzdPGRw==", + ], + + "@rolldown/binding-linux-arm64-gnu": [ + "@rolldown/binding-linux-arm64-gnu@1.0.0-beta.50", + "", + { "os": "linux", "cpu": "arm64" }, + "sha512-ONgyjofCrrE3bnh5GZb8EINSFyR/hmwTzZ7oVuyUB170lboza1VMCnb8jgE6MsyyRgHYmN8Lb59i3NKGrxrYjw==", + ], + + "@rolldown/binding-linux-arm64-musl": [ + "@rolldown/binding-linux-arm64-musl@1.0.0-beta.50", + "", + { "os": "linux", "cpu": "arm64" }, + "sha512-L0zRdH2oDPkmB+wvuTl+dJbXCsx62SkqcEqdM+79LOcB+PxbAxxjzHU14BuZIQdXcAVDzfpMfaHWzZuwhhBTcw==", + ], + + "@rolldown/binding-linux-x64-gnu": [ + "@rolldown/binding-linux-x64-gnu@1.0.0-beta.50", + "", + { "os": "linux", "cpu": "x64" }, + "sha512-gyoI8o/TGpQd3OzkJnh1M2kxy1Bisg8qJ5Gci0sXm9yLFzEXIFdtc4EAzepxGvrT2ri99ar5rdsmNG0zP0SbIg==", + ], + + "@rolldown/binding-linux-x64-musl": [ + "@rolldown/binding-linux-x64-musl@1.0.0-beta.50", + "", + { "os": "linux", "cpu": "x64" }, + "sha512-zti8A7M+xFDpKlghpcCAzyOi+e5nfUl3QhU023ce5NCgUxRG5zGP2GR9LTydQ1rnIPwZUVBWd4o7NjZDaQxaXA==", + ], + + "@rolldown/binding-openharmony-arm64": [ + "@rolldown/binding-openharmony-arm64@1.0.0-beta.50", + "", + { "os": "none", "cpu": "arm64" }, + "sha512-eZUssog7qljrrRU9Mi0eqYEPm3Ch0UwB+qlWPMKSUXHNqhm3TvDZarJQdTevGEfu3EHAXJvBIe0YFYr0TPVaMA==", + ], + + "@rolldown/binding-wasm32-wasi": [ + "@rolldown/binding-wasm32-wasi@1.0.0-beta.50", + "", + { "dependencies": { "@napi-rs/wasm-runtime": "^1.0.7" }, "cpu": "none" }, + "sha512-nmCN0nIdeUnmgeDXiQ+2HU6FT162o+rxnF7WMkBm4M5Ds8qTU7Dzv2Wrf22bo4ftnlrb2hKK6FSwAJSAe2FWLg==", + ], + + "@rolldown/binding-win32-arm64-msvc": [ + "@rolldown/binding-win32-arm64-msvc@1.0.0-beta.50", + "", + { "os": "win32", "cpu": "arm64" }, + "sha512-7kcNLi7Ua59JTTLvbe1dYb028QEPaJPJQHqkmSZ5q3tJueUeb6yjRtx8mw4uIqgWZcnQHAR3PrLN4XRJxvgIkA==", + ], + + "@rolldown/binding-win32-ia32-msvc": [ + "@rolldown/binding-win32-ia32-msvc@1.0.0-beta.50", + "", + { "os": "win32", "cpu": "ia32" }, + "sha512-lL70VTNvSCdSZkDPPVMwWn/M2yQiYvSoXw9hTLgdIWdUfC3g72UaruezusR6ceRuwHCY1Ayu2LtKqXkBO5LIwg==", + ], + + "@rolldown/binding-win32-x64-msvc": [ + "@rolldown/binding-win32-x64-msvc@1.0.0-beta.50", + "", + { "os": "win32", "cpu": "x64" }, + "sha512-4qU4x5DXWB4JPjyTne/wBNPqkbQU8J45bl21geERBKtEittleonioACBL1R0PsBu0Aq21SwMK5a9zdBkWSlQtQ==", + ], + + "@rolldown/pluginutils": [ + "@rolldown/pluginutils@1.0.0-beta.53", + "", + {}, + "sha512-vENRlFU4YbrwVqNDZ7fLvy+JR1CRkyr01jhSiDpE1u6py3OMzQfztQU2jxykW3ALNxO4kSlqIDeYyD0Y9RcQeQ==", + ], + + "@rollup/pluginutils": [ + "@rollup/pluginutils@5.3.0", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^2.0.2", + "picomatch": "^4.0.2", + }, + "peerDependencies": { "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" }, + "optionalPeers": ["rollup"], + }, + "sha512-5EdhGZtnu3V88ces7s53hhfK5KSASnJZv8Lulpc04cWO3REESroJXg73DFsOmgbU2BhwV0E20bu2IDZb3VKW4Q==", + ], + + "@rollup/rollup-android-arm-eabi": [ + "@rollup/rollup-android-arm-eabi@4.57.1", + "", + { "os": "android", "cpu": "arm" }, + "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==", + ], + + "@rollup/rollup-android-arm64": [ + "@rollup/rollup-android-arm64@4.57.1", + "", + { "os": "android", "cpu": "arm64" }, + "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==", + ], + + "@rollup/rollup-darwin-arm64": [ + "@rollup/rollup-darwin-arm64@4.57.1", + "", + { "os": "darwin", "cpu": "arm64" }, + "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==", + ], + + "@rollup/rollup-darwin-x64": [ + "@rollup/rollup-darwin-x64@4.57.1", + "", + { "os": "darwin", "cpu": "x64" }, + "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==", + ], + + "@rollup/rollup-freebsd-arm64": [ + "@rollup/rollup-freebsd-arm64@4.57.1", + "", + { "os": "freebsd", "cpu": "arm64" }, + "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==", + ], + + "@rollup/rollup-freebsd-x64": [ + "@rollup/rollup-freebsd-x64@4.57.1", + "", + { "os": "freebsd", "cpu": "x64" }, + "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==", + ], + + "@rollup/rollup-linux-arm-gnueabihf": [ + "@rollup/rollup-linux-arm-gnueabihf@4.57.1", + "", + { "os": "linux", "cpu": "arm" }, + "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==", + ], + + "@rollup/rollup-linux-arm-musleabihf": [ + "@rollup/rollup-linux-arm-musleabihf@4.57.1", + "", + { "os": "linux", "cpu": "arm" }, + "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==", + ], + + "@rollup/rollup-linux-arm64-gnu": [ + "@rollup/rollup-linux-arm64-gnu@4.57.1", + "", + { "os": "linux", "cpu": "arm64" }, + "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==", + ], + + "@rollup/rollup-linux-arm64-musl": [ + "@rollup/rollup-linux-arm64-musl@4.57.1", + "", + { "os": "linux", "cpu": "arm64" }, + "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==", + ], + + "@rollup/rollup-linux-loong64-gnu": [ + "@rollup/rollup-linux-loong64-gnu@4.57.1", + "", + { "os": "linux", "cpu": "none" }, + "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==", + ], + + "@rollup/rollup-linux-loong64-musl": [ + "@rollup/rollup-linux-loong64-musl@4.57.1", + "", + { "os": "linux", "cpu": "none" }, + "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==", + ], + + "@rollup/rollup-linux-ppc64-gnu": [ + "@rollup/rollup-linux-ppc64-gnu@4.57.1", + "", + { "os": "linux", "cpu": "ppc64" }, + "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==", + ], + + "@rollup/rollup-linux-ppc64-musl": [ + "@rollup/rollup-linux-ppc64-musl@4.57.1", + "", + { "os": "linux", "cpu": "ppc64" }, + "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==", + ], + + "@rollup/rollup-linux-riscv64-gnu": [ + "@rollup/rollup-linux-riscv64-gnu@4.57.1", + "", + { "os": "linux", "cpu": "none" }, + "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==", + ], + + "@rollup/rollup-linux-riscv64-musl": [ + "@rollup/rollup-linux-riscv64-musl@4.57.1", + "", + { "os": "linux", "cpu": "none" }, + "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==", + ], + + "@rollup/rollup-linux-s390x-gnu": [ + "@rollup/rollup-linux-s390x-gnu@4.57.1", + "", + { "os": "linux", "cpu": "s390x" }, + "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==", + ], + + "@rollup/rollup-linux-x64-gnu": [ + "@rollup/rollup-linux-x64-gnu@4.57.1", + "", + { "os": "linux", "cpu": "x64" }, + "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==", + ], + + "@rollup/rollup-linux-x64-musl": [ + "@rollup/rollup-linux-x64-musl@4.57.1", + "", + { "os": "linux", "cpu": "x64" }, + "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==", + ], + + "@rollup/rollup-openbsd-x64": [ + "@rollup/rollup-openbsd-x64@4.57.1", + "", + { "os": "openbsd", "cpu": "x64" }, + "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==", + ], + + "@rollup/rollup-openharmony-arm64": [ + "@rollup/rollup-openharmony-arm64@4.57.1", + "", + { "os": "none", "cpu": "arm64" }, + "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==", + ], + + "@rollup/rollup-win32-arm64-msvc": [ + "@rollup/rollup-win32-arm64-msvc@4.57.1", + "", + { "os": "win32", "cpu": "arm64" }, + "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==", + ], + + "@rollup/rollup-win32-ia32-msvc": [ + "@rollup/rollup-win32-ia32-msvc@4.57.1", + "", + { "os": "win32", "cpu": "ia32" }, + "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==", + ], + + "@rollup/rollup-win32-x64-gnu": [ + "@rollup/rollup-win32-x64-gnu@4.57.1", + "", + { "os": "win32", "cpu": "x64" }, + "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==", + ], + + "@rollup/rollup-win32-x64-msvc": [ + "@rollup/rollup-win32-x64-msvc@4.57.1", + "", + { "os": "win32", "cpu": "x64" }, + "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==", + ], + + "@sec-ant/readable-stream": [ + "@sec-ant/readable-stream@0.4.1", + "", + {}, + "sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==", + ], + + "@shikijs/core": [ + "@shikijs/core@3.22.0", + "", + { + "dependencies": { + "@shikijs/types": "3.22.0", + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4", + "hast-util-to-html": "^9.0.5", + }, + }, + "sha512-iAlTtSDDbJiRpvgL5ugKEATDtHdUVkqgHDm/gbD2ZS9c88mx7G1zSYjjOxp5Qa0eaW0MAQosFRmJSk354PRoQA==", + ], + + "@shikijs/engine-javascript": [ + "@shikijs/engine-javascript@3.22.0", + "", + { + "dependencies": { + "@shikijs/types": "3.22.0", + "@shikijs/vscode-textmate": "^10.0.2", + "oniguruma-to-es": "^4.3.4", + }, + }, + "sha512-jdKhfgW9CRtj3Tor0L7+yPwdG3CgP7W+ZEqSsojrMzCjD1e0IxIbwUMDDpYlVBlC08TACg4puwFGkZfLS+56Tw==", + ], + + "@shikijs/engine-oniguruma": [ + "@shikijs/engine-oniguruma@3.22.0", + "", + { "dependencies": { "@shikijs/types": "3.22.0", "@shikijs/vscode-textmate": "^10.0.2" } }, + "sha512-DyXsOG0vGtNtl7ygvabHd7Mt5EY8gCNqR9Y7Lpbbd/PbJvgWrqaKzH1JW6H6qFkuUa8aCxoiYVv8/YfFljiQxA==", + ], + + "@shikijs/langs": [ + "@shikijs/langs@3.22.0", + "", + { "dependencies": { "@shikijs/types": "3.22.0" } }, + "sha512-x/42TfhWmp6H00T6uwVrdTJGKgNdFbrEdhaDwSR5fd5zhQ1Q46bHq9EO61SCEWJR0HY7z2HNDMaBZp8JRmKiIA==", + ], + + "@shikijs/themes": [ + "@shikijs/themes@3.22.0", + "", + { "dependencies": { "@shikijs/types": "3.22.0" } }, + "sha512-o+tlOKqsr6FE4+mYJG08tfCFDS+3CG20HbldXeVoyP+cYSUxDhrFf3GPjE60U55iOkkjbpY2uC3It/eeja35/g==", + ], + + "@shikijs/types": [ + "@shikijs/types@3.22.0", + "", + { "dependencies": { "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, + "sha512-491iAekgKDBFE67z70Ok5a8KBMsQ2IJwOWw3us/7ffQkIBCyOQfm/aNwVMBUriP02QshIfgHCBSIYAl3u2eWjg==", + ], + + "@shikijs/vscode-textmate": [ + "@shikijs/vscode-textmate@10.0.2", + "", + {}, + "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==", + ], + + "@sindresorhus/merge-streams": [ + "@sindresorhus/merge-streams@4.0.0", + "", + {}, + "sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ==", + ], + + "@stoplight/better-ajv-errors": [ + "@stoplight/better-ajv-errors@1.0.3", + "", + { + "dependencies": { "jsonpointer": "^5.0.0", "leven": "^3.1.0" }, + "peerDependencies": { "ajv": ">=8" }, + }, + "sha512-0p9uXkuB22qGdNfy3VeEhxkU5uwvp/KrBTAbrLBURv6ilxIVwanKwjMc41lQfIVgPGcOkmLbTolfFrSsueu7zA==", + ], + + "@stoplight/json": [ + "@stoplight/json@3.21.7", + "", + { + "dependencies": { + "@stoplight/ordered-object-literal": "^1.0.3", + "@stoplight/path": "^1.3.2", + "@stoplight/types": "^13.6.0", + "jsonc-parser": "~2.2.1", + "lodash": "^4.17.21", + "safe-stable-stringify": "^1.1", + }, + }, + "sha512-xcJXgKFqv/uCEgtGlPxy3tPA+4I+ZI4vAuMJ885+ThkTHFVkC+0Fm58lA9NlsyjnkpxFh4YiQWpH+KefHdbA0A==", + ], + + "@stoplight/json-ref-readers": [ + "@stoplight/json-ref-readers@1.2.2", + "", + { "dependencies": { "node-fetch": "^2.6.0", "tslib": "^1.14.1" } }, + "sha512-nty0tHUq2f1IKuFYsLM4CXLZGHdMn+X/IwEUIpeSOXt0QjMUbL0Em57iJUDzz+2MkWG83smIigNZ3fauGjqgdQ==", + ], + + "@stoplight/json-ref-resolver": [ + "@stoplight/json-ref-resolver@3.1.6", + "", + { + "dependencies": { + "@stoplight/json": "^3.21.0", + "@stoplight/path": "^1.3.2", + "@stoplight/types": "^12.3.0 || ^13.0.0", + "@types/urijs": "^1.19.19", + "dependency-graph": "~0.11.0", + "fast-memoize": "^2.5.2", + "immer": "^9.0.6", + "lodash": "^4.17.21", + "tslib": "^2.6.0", + "urijs": "^1.19.11", + }, + }, + "sha512-YNcWv3R3n3U6iQYBsFOiWSuRGE5su1tJSiX6pAPRVk7dP0L7lqCteXGzuVRQ0gMZqUl8v1P0+fAKxF6PLo9B5A==", + ], + + "@stoplight/ordered-object-literal": [ + "@stoplight/ordered-object-literal@1.0.5", + "", + {}, + "sha512-COTiuCU5bgMUtbIFBuyyh2/yVVzlr5Om0v5utQDgBCuQUOPgU1DwoffkTfg4UBQOvByi5foF4w4T+H9CoRe5wg==", + ], + + "@stoplight/path": [ + "@stoplight/path@1.3.2", + "", + {}, + "sha512-lyIc6JUlUA8Ve5ELywPC8I2Sdnh1zc1zmbYgVarhXIp9YeAB0ReeqmGEOWNtlHkbP2DAA1AL65Wfn2ncjK/jtQ==", + ], + + "@stoplight/spectral-core": [ + "@stoplight/spectral-core@1.20.0", + "", + { + "dependencies": { + "@stoplight/better-ajv-errors": "1.0.3", + "@stoplight/json": "~3.21.0", + "@stoplight/path": "1.3.2", + "@stoplight/spectral-parsers": "^1.0.0", + "@stoplight/spectral-ref-resolver": "^1.0.4", + "@stoplight/spectral-runtime": "^1.1.2", + "@stoplight/types": "~13.6.0", + "@types/es-aggregate-error": "^1.0.2", + "@types/json-schema": "^7.0.11", + "ajv": "^8.17.1", + "ajv-errors": "~3.0.0", + "ajv-formats": "~2.1.1", + "es-aggregate-error": "^1.0.7", + "jsonpath-plus": "^10.3.0", + "lodash": "~4.17.21", + "lodash.topath": "^4.5.2", + "minimatch": "3.1.2", + "nimma": "0.2.3", + "pony-cause": "^1.1.1", + "simple-eval": "1.0.1", + "tslib": "^2.8.1", + }, + }, + "sha512-5hBP81nCC1zn1hJXL/uxPNRKNcB+/pEIHgCjPRpl/w/qy9yC9ver04tw1W0l/PMiv0UeB5dYgozXVQ4j5a6QQQ==", + ], + + "@stoplight/spectral-formats": [ + "@stoplight/spectral-formats@1.8.2", + "", + { + "dependencies": { + "@stoplight/json": "^3.17.0", + "@stoplight/spectral-core": "^1.19.2", + "@types/json-schema": "^7.0.7", + "tslib": "^2.8.1", + }, + }, + "sha512-c06HB+rOKfe7tuxg0IdKDEA5XnjL2vrn/m/OVIIxtINtBzphZrOgtRn7epQ5bQF5SWp84Ue7UJWaGgDwVngMFw==", + ], + + "@stoplight/spectral-functions": [ + "@stoplight/spectral-functions@1.10.1", + "", + { + "dependencies": { + "@stoplight/better-ajv-errors": "1.0.3", + "@stoplight/json": "^3.17.1", + "@stoplight/spectral-core": "^1.19.4", + "@stoplight/spectral-formats": "^1.8.1", + "@stoplight/spectral-runtime": "^1.1.2", + "ajv": "^8.17.1", + "ajv-draft-04": "~1.0.0", + "ajv-errors": "~3.0.0", + "ajv-formats": "~2.1.1", + "lodash": "~4.17.21", + "tslib": "^2.8.1", + }, + }, + "sha512-obu8ZfoHxELOapfGsCJixKZXZcffjg+lSoNuttpmUFuDzVLT3VmH8QkPXfOGOL5Pz80BR35ClNAToDkdnYIURg==", + ], + + "@stoplight/spectral-parsers": [ + "@stoplight/spectral-parsers@1.0.5", + "", + { + "dependencies": { + "@stoplight/json": "~3.21.0", + "@stoplight/types": "^14.1.1", + "@stoplight/yaml": "~4.3.0", + "tslib": "^2.8.1", + }, + }, + "sha512-ANDTp2IHWGvsQDAY85/jQi9ZrF4mRrA5bciNHX+PUxPr4DwS6iv4h+FVWJMVwcEYdpyoIdyL+SRmHdJfQEPmwQ==", + ], + + "@stoplight/spectral-ref-resolver": [ + "@stoplight/spectral-ref-resolver@1.0.5", + "", + { + "dependencies": { + "@stoplight/json-ref-readers": "1.2.2", + "@stoplight/json-ref-resolver": "~3.1.6", + "@stoplight/spectral-runtime": "^1.1.2", + "dependency-graph": "0.11.0", + "tslib": "^2.8.1", + }, + }, + "sha512-gj3TieX5a9zMW29z3mBlAtDOCgN3GEc1VgZnCVlr5irmR4Qi5LuECuFItAq4pTn5Zu+sW5bqutsCH7D4PkpyAA==", + ], + + "@stoplight/spectral-rulesets": [ + "@stoplight/spectral-rulesets@1.22.0", + "", + { + "dependencies": { + "@asyncapi/specs": "^6.8.0", + "@stoplight/better-ajv-errors": "1.0.3", + "@stoplight/json": "^3.17.0", + "@stoplight/spectral-core": "^1.19.4", + "@stoplight/spectral-formats": "^1.8.1", + "@stoplight/spectral-functions": "^1.9.1", + "@stoplight/spectral-runtime": "^1.1.2", + "@stoplight/types": "^13.6.0", + "@types/json-schema": "^7.0.7", + "ajv": "^8.17.1", + "ajv-formats": "~2.1.1", + "json-schema-traverse": "^1.0.0", + "leven": "3.1.0", + "lodash": "~4.17.21", + "tslib": "^2.8.1", + }, + }, + "sha512-l2EY2jiKKLsvnPfGy+pXC0LeGsbJzcQP5G/AojHgf+cwN//VYxW1Wvv4WKFx/CLmLxc42mJYF2juwWofjWYNIQ==", + ], + + "@stoplight/spectral-runtime": [ + "@stoplight/spectral-runtime@1.1.4", + "", + { + "dependencies": { + "@stoplight/json": "^3.20.1", + "@stoplight/path": "^1.3.2", + "@stoplight/types": "^13.6.0", + "abort-controller": "^3.0.0", + "lodash": "^4.17.21", + "node-fetch": "^2.7.0", + "tslib": "^2.8.1", + }, + }, + "sha512-YHbhX3dqW0do6DhiPSgSGQzr6yQLlWybhKwWx0cqxjMwxej3TqLv3BXMfIUYFKKUqIwH4Q2mV8rrMM8qD2N0rQ==", + ], + + "@stoplight/types": [ + "@stoplight/types@13.6.0", + "", + { "dependencies": { "@types/json-schema": "^7.0.4", "utility-types": "^3.10.0" } }, + "sha512-dzyuzvUjv3m1wmhPfq82lCVYGcXG0xUYgqnWfCq3PCVR4BKFhjdkHrnJ+jIDoMKvXb05AZP/ObQF6+NpDo29IQ==", + ], + + "@stoplight/yaml": [ + "@stoplight/yaml@4.3.0", + "", + { + "dependencies": { + "@stoplight/ordered-object-literal": "^1.0.5", + "@stoplight/types": "^14.1.1", + "@stoplight/yaml-ast-parser": "0.0.50", + "tslib": "^2.2.0", + }, + }, + "sha512-JZlVFE6/dYpP9tQmV0/ADfn32L9uFarHWxfcRhReKUnljz1ZiUM5zpX+PH8h5CJs6lao3TuFqnPm9IJJCEkE2w==", + ], + + "@stoplight/yaml-ast-parser": [ + "@stoplight/yaml-ast-parser@0.0.50", + "", + {}, + "sha512-Pb6M8TDO9DtSVla9yXSTAxmo9GVEouq5P40DWXdOie69bXogZTkgvopCq+yEvTMA0F6PEvdJmbtTV3ccIp11VQ==", + ], + + "@tailwindcss/node": [ + "@tailwindcss/node@4.1.18", + "", + { + "dependencies": { + "@jridgewell/remapping": "^2.3.4", + "enhanced-resolve": "^5.18.3", + "jiti": "^2.6.1", + "lightningcss": "1.30.2", + "magic-string": "^0.30.21", + "source-map-js": "^1.2.1", + "tailwindcss": "4.1.18", + }, + }, + "sha512-DoR7U1P7iYhw16qJ49fgXUlry1t4CpXeErJHnQ44JgTSKMaZUdf17cfn5mHchfJ4KRBZRFA/Coo+MUF5+gOaCQ==", + ], + + "@tailwindcss/oxide": [ + "@tailwindcss/oxide@4.1.18", + "", + { + "optionalDependencies": { + "@tailwindcss/oxide-android-arm64": "4.1.18", + "@tailwindcss/oxide-darwin-arm64": "4.1.18", + "@tailwindcss/oxide-darwin-x64": "4.1.18", + "@tailwindcss/oxide-freebsd-x64": "4.1.18", + "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.18", + "@tailwindcss/oxide-linux-arm64-gnu": "4.1.18", + "@tailwindcss/oxide-linux-arm64-musl": "4.1.18", + "@tailwindcss/oxide-linux-x64-gnu": "4.1.18", + "@tailwindcss/oxide-linux-x64-musl": "4.1.18", + "@tailwindcss/oxide-wasm32-wasi": "4.1.18", + "@tailwindcss/oxide-win32-arm64-msvc": "4.1.18", + "@tailwindcss/oxide-win32-x64-msvc": "4.1.18", + }, + }, + "sha512-EgCR5tTS5bUSKQgzeMClT6iCY3ToqE1y+ZB0AKldj809QXk1Y+3jB0upOYZrn9aGIzPtUsP7sX4QQ4XtjBB95A==", + ], + + "@tailwindcss/oxide-android-arm64": [ + "@tailwindcss/oxide-android-arm64@4.1.18", + "", + { "os": "android", "cpu": "arm64" }, + "sha512-dJHz7+Ugr9U/diKJA0W6N/6/cjI+ZTAoxPf9Iz9BFRF2GzEX8IvXxFIi/dZBloVJX/MZGvRuFA9rqwdiIEZQ0Q==", + ], + + "@tailwindcss/oxide-darwin-arm64": [ + "@tailwindcss/oxide-darwin-arm64@4.1.18", + "", + { "os": "darwin", "cpu": "arm64" }, + "sha512-Gc2q4Qhs660bhjyBSKgq6BYvwDz4G+BuyJ5H1xfhmDR3D8HnHCmT/BSkvSL0vQLy/nkMLY20PQ2OoYMO15Jd0A==", + ], + + "@tailwindcss/oxide-darwin-x64": [ + "@tailwindcss/oxide-darwin-x64@4.1.18", + "", + { "os": "darwin", "cpu": "x64" }, + "sha512-FL5oxr2xQsFrc3X9o1fjHKBYBMD1QZNyc1Xzw/h5Qu4XnEBi3dZn96HcHm41c/euGV+GRiXFfh2hUCyKi/e+yw==", + ], + + "@tailwindcss/oxide-freebsd-x64": [ + "@tailwindcss/oxide-freebsd-x64@4.1.18", + "", + { "os": "freebsd", "cpu": "x64" }, + "sha512-Fj+RHgu5bDodmV1dM9yAxlfJwkkWvLiRjbhuO2LEtwtlYlBgiAT4x/j5wQr1tC3SANAgD+0YcmWVrj8R9trVMA==", + ], + + "@tailwindcss/oxide-linux-arm-gnueabihf": [ + "@tailwindcss/oxide-linux-arm-gnueabihf@4.1.18", + "", + { "os": "linux", "cpu": "arm" }, + "sha512-Fp+Wzk/Ws4dZn+LV2Nqx3IilnhH51YZoRaYHQsVq3RQvEl+71VGKFpkfHrLM/Li+kt5c0DJe/bHXK1eHgDmdiA==", + ], + + "@tailwindcss/oxide-linux-arm64-gnu": [ + "@tailwindcss/oxide-linux-arm64-gnu@4.1.18", + "", + { "os": "linux", "cpu": "arm64" }, + "sha512-S0n3jboLysNbh55Vrt7pk9wgpyTTPD0fdQeh7wQfMqLPM/Hrxi+dVsLsPrycQjGKEQk85Kgbx+6+QnYNiHalnw==", + ], + + "@tailwindcss/oxide-linux-arm64-musl": [ + "@tailwindcss/oxide-linux-arm64-musl@4.1.18", + "", + { "os": "linux", "cpu": "arm64" }, + "sha512-1px92582HkPQlaaCkdRcio71p8bc8i/ap5807tPRDK/uw953cauQBT8c5tVGkOwrHMfc2Yh6UuxaH4vtTjGvHg==", + ], + + "@tailwindcss/oxide-linux-x64-gnu": [ + "@tailwindcss/oxide-linux-x64-gnu@4.1.18", + "", + { "os": "linux", "cpu": "x64" }, + "sha512-v3gyT0ivkfBLoZGF9LyHmts0Isc8jHZyVcbzio6Wpzifg/+5ZJpDiRiUhDLkcr7f/r38SWNe7ucxmGW3j3Kb/g==", + ], + + "@tailwindcss/oxide-linux-x64-musl": [ + "@tailwindcss/oxide-linux-x64-musl@4.1.18", + "", + { "os": "linux", "cpu": "x64" }, + "sha512-bhJ2y2OQNlcRwwgOAGMY0xTFStt4/wyU6pvI6LSuZpRgKQwxTec0/3Scu91O8ir7qCR3AuepQKLU/kX99FouqQ==", + ], + + "@tailwindcss/oxide-wasm32-wasi": [ + "@tailwindcss/oxide-wasm32-wasi@4.1.18", + "", + { + "dependencies": { + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1", + "@emnapi/wasi-threads": "^1.1.0", + "@napi-rs/wasm-runtime": "^1.1.0", + "@tybys/wasm-util": "^0.10.1", + "tslib": "^2.4.0", + }, + "cpu": "none", + }, + "sha512-LffYTvPjODiP6PT16oNeUQJzNVyJl1cjIebq/rWWBF+3eDst5JGEFSc5cWxyRCJ0Mxl+KyIkqRxk1XPEs9x8TA==", + ], + + "@tailwindcss/oxide-win32-arm64-msvc": [ + "@tailwindcss/oxide-win32-arm64-msvc@4.1.18", + "", + { "os": "win32", "cpu": "arm64" }, + "sha512-HjSA7mr9HmC8fu6bdsZvZ+dhjyGCLdotjVOgLA2vEqxEBZaQo9YTX4kwgEvPCpRh8o4uWc4J/wEoFzhEmjvPbA==", + ], + + "@tailwindcss/oxide-win32-x64-msvc": [ + "@tailwindcss/oxide-win32-x64-msvc@4.1.18", + "", + { "os": "win32", "cpu": "x64" }, + "sha512-bJWbyYpUlqamC8dpR7pfjA0I7vdF6t5VpUGMWRkXVE3AXgIZjYUYAK7II1GNaxR8J1SSrSrppRar8G++JekE3Q==", + ], + + "@tailwindcss/vite": [ + "@tailwindcss/vite@4.1.18", + "", + { + "dependencies": { + "@tailwindcss/node": "4.1.18", + "@tailwindcss/oxide": "4.1.18", + "tailwindcss": "4.1.18", + }, + "peerDependencies": { "vite": "^5.2.0 || ^6 || ^7" }, + }, + "sha512-jVA+/UpKL1vRLg6Hkao5jldawNmRo7mQYrZtNHMIVpLfLhDml5nMRUo/8MwoX2vNXvnaXNNMedrMfMugAVX1nA==", + ], + + "@tanstack/devtools-event-client": [ + "@tanstack/devtools-event-client@0.4.0", + "", + {}, + "sha512-RPfGuk2bDZgcu9bAJodvO2lnZeHuz4/71HjZ0bGb/SPg8+lyTA+RLSKQvo7fSmPSi8/vcH3aKQ8EM9ywf1olaw==", + ], + + "@tanstack/form-core": [ + "@tanstack/form-core@1.27.6", + "", + { + "dependencies": { + "@tanstack/devtools-event-client": "^0.4.0", + "@tanstack/pacer-lite": "^0.1.1", + "@tanstack/store": "^0.7.7", + }, + }, + "sha512-1C4PUpOcCpivddKxtAeqdeqncxnPKiPpTVDRknDExCba+6zCsAjxgL+p3qYA3hu+EFyUAdW71rU+uqYbEa7qqA==", + ], + + "@tanstack/history": [ + "@tanstack/history@1.141.0", + "", + {}, + "sha512-LS54XNyxyTs5m/pl1lkwlg7uZM3lvsv2FIIV1rsJgnfwVCnI+n4ZGZ2CcjNT13BPu/3hPP+iHmliBSscJxW5FQ==", + ], + + "@tanstack/pacer-lite": [ + "@tanstack/pacer-lite@0.1.1", + "", + {}, + "sha512-y/xtNPNt/YeyoVxE/JCx+T7yjEzpezmbb+toK8DDD1P4m7Kzs5YR956+7OKexG3f8aXgC3rLZl7b1V+yNUSy5w==", + ], + + "@tanstack/query-core": [ + "@tanstack/query-core@5.90.12", + "", + {}, + "sha512-T1/8t5DhV/SisWjDnaiU2drl6ySvsHj1bHBCWNXd+/T+Hh1cf6JodyEYMd5sgwm+b/mETT4EV3H+zCVczCU5hg==", + ], + + "@tanstack/react-form": [ + "@tanstack/react-form@1.27.6", + "", + { + "dependencies": { "@tanstack/form-core": "1.27.6", "@tanstack/react-store": "^0.8.0" }, + "peerDependencies": { "react": "^17.0.0 || ^18.0.0 || ^19.0.0" }, + }, + "sha512-kq/68CKbCxK6TkFnGihtQ3qdrD5GPrVjfhkcqMFH/+X9jYOZDai52864T4997lC3nSEKFbUhkkXlaIy/wCSuNQ==", + ], + + "@tanstack/react-query": [ + "@tanstack/react-query@5.90.12", + "", + { + "dependencies": { "@tanstack/query-core": "5.90.12" }, + "peerDependencies": { "react": "^18 || ^19" }, + }, + "sha512-graRZspg7EoEaw0a8faiUASCyJrqjKPdqJ9EwuDRUF9mEYJ1YPczI9H+/agJ0mOJkPCJDk0lsz5QTrLZ/jQ2rg==", + ], + + "@tanstack/react-router": [ + "@tanstack/react-router@1.143.4", + "", + { + "dependencies": { + "@tanstack/history": "1.141.0", + "@tanstack/react-store": "^0.8.0", + "@tanstack/router-core": "1.143.4", + "isbot": "^5.1.22", + "tiny-invariant": "^1.3.3", + "tiny-warning": "^1.0.3", + }, + "peerDependencies": { + "react": ">=18.0.0 || >=19.0.0", + "react-dom": ">=18.0.0 || >=19.0.0", + }, + }, + "sha512-7Tz7YwJc8RKDQga3yNY03zNc/ey+AIDA1A5ppGYqIM+UR47uGdAKc/4MSpItznqkSUi1Csrw2nVtICSkGanKdQ==", + ], + + "@tanstack/react-router-devtools": [ + "@tanstack/react-router-devtools@1.143.4", + "", + { + "dependencies": { "@tanstack/router-devtools-core": "1.143.4" }, + "peerDependencies": { + "@tanstack/react-router": "^1.143.4", + "@tanstack/router-core": "^1.143.4", + "react": ">=18.0.0 || >=19.0.0", + "react-dom": ">=18.0.0 || >=19.0.0", + }, + "optionalPeers": ["@tanstack/router-core"], + }, + "sha512-+AKGHkC2aDL93XCWDMB9/cf8+N4awGylCK0mk0kJ5BUBVSoZpNVLtZiBFCxRuvZCQtY5PbdYT4xeUA0dbgH9Eg==", + ], + + "@tanstack/react-store": [ + "@tanstack/react-store@0.8.0", + "", + { + "dependencies": { "@tanstack/store": "0.8.0", "use-sync-external-store": "^1.6.0" }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + }, + }, + "sha512-1vG9beLIuB7q69skxK9r5xiLN3ztzIPfSQSs0GfeqWGO2tGIyInZx0x1COhpx97RKaONSoAb8C3dxacWksm1ow==", + ], + + "@tanstack/react-table": [ + "@tanstack/react-table@8.21.3", + "", + { + "dependencies": { "@tanstack/table-core": "8.21.3" }, + "peerDependencies": { "react": ">=16.8", "react-dom": ">=16.8" }, + }, + "sha512-5nNMTSETP4ykGegmVkhjcS8tTLW6Vl4axfEGQN3v0zdHYbK4UfoqfPChclTrJ4EoK9QynqAu9oUf8VEmrpZ5Ww==", + ], + + "@tanstack/router-core": [ + "@tanstack/router-core@1.143.4", + "", + { + "dependencies": { + "@tanstack/history": "1.141.0", + "@tanstack/store": "^0.8.0", + "cookie-es": "^2.0.0", + "seroval": "^1.4.1", + "seroval-plugins": "^1.4.0", + "tiny-invariant": "^1.3.3", + "tiny-warning": "^1.0.3", + }, + }, + "sha512-VlSXrYQ/oBoUUGJx6t93KfzGHeBvL6GOmKRouPbHNqKi4ueVnQ2PdRX+s9eZoDAdcVsgmS7YlTCRgIbh2sAQpA==", + ], + + "@tanstack/router-devtools": [ + "@tanstack/router-devtools@1.143.4", + "", + { + "dependencies": { + "@tanstack/react-router-devtools": "1.143.4", + "clsx": "^2.1.1", + "goober": "^2.1.16", + }, + "peerDependencies": { + "@tanstack/react-router": "^1.143.4", + "csstype": "^3.0.10", + "react": ">=18.0.0 || >=19.0.0", + "react-dom": ">=18.0.0 || >=19.0.0", + }, + "optionalPeers": ["csstype"], + }, + "sha512-FycfcOodSRjc3Gx5i1rt/+gwiin+muSt6y2ZxmcsOwzg1QpdZAeELG9qZfqR6qg38nhJu9ixhmA/M4kf82cH8g==", + ], + + "@tanstack/router-devtools-core": [ + "@tanstack/router-devtools-core@1.143.4", + "", + { + "dependencies": { "clsx": "^2.1.1", "goober": "^2.1.16", "tiny-invariant": "^1.3.3" }, + "peerDependencies": { + "@tanstack/router-core": "^1.143.4", + "csstype": "^3.0.10", + "solid-js": ">=1.9.5", + }, + "optionalPeers": ["csstype"], + }, + "sha512-f5uatl8LIlMS4O2uIQ/oh58pF62/N1qKrBPtYvc7B1Tvf16ER8Nr1t8d4a85MiQyyA4kgiqfnYryOfW+diLjwg==", + ], + + "@tanstack/router-generator": [ + "@tanstack/router-generator@1.143.4", + "", + { + "dependencies": { + "@tanstack/router-core": "1.143.4", + "@tanstack/router-utils": "1.141.0", + "@tanstack/virtual-file-routes": "1.141.0", + "prettier": "^3.5.0", + "recast": "^0.23.11", + "source-map": "^0.7.4", + "tsx": "^4.19.2", + "zod": "^3.24.2", + }, + }, + "sha512-QBqJCNoXJQmWkoAR6VqSuA7nBUSf3y5p8t4JpbtLGUgQ7pLu03nUSjcnLqN84BednhpZXnh/Mw3jxnpA//UWCQ==", + ], + + "@tanstack/router-plugin": [ + "@tanstack/router-plugin@1.143.4", + "", + { + "dependencies": { + "@babel/core": "^7.27.7", + "@babel/plugin-syntax-jsx": "^7.27.1", + "@babel/plugin-syntax-typescript": "^7.27.1", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.27.7", + "@babel/types": "^7.27.7", + "@tanstack/router-core": "1.143.4", + "@tanstack/router-generator": "1.143.4", + "@tanstack/router-utils": "1.141.0", + "@tanstack/virtual-file-routes": "1.141.0", + "babel-dead-code-elimination": "^1.0.10", + "chokidar": "^3.6.0", + "unplugin": "^2.1.2", + "zod": "^3.24.2", + }, + "peerDependencies": { + "@rsbuild/core": ">=1.0.2", + "@tanstack/react-router": "^1.143.4", + "vite": ">=5.0.0 || >=6.0.0 || >=7.0.0", + "vite-plugin-solid": "^2.11.10", + "webpack": ">=5.92.0", + }, + "optionalPeers": [ + "@rsbuild/core", + "@tanstack/react-router", + "vite", + "vite-plugin-solid", + "webpack", + ], + }, + "sha512-gjqkdAHJ8lZ1pOcK2noboyLKtbwIH59H/3/a4OQu30yNmuRnDTN75OrSBMvHvgYnXM3a0qUo9uFCphsRbS9N6g==", + ], + + "@tanstack/router-utils": [ + "@tanstack/router-utils@1.141.0", + "", + { + "dependencies": { + "@babel/core": "^7.27.4", + "@babel/generator": "^7.27.5", + "@babel/parser": "^7.27.5", + "@babel/preset-typescript": "^7.27.1", + "ansis": "^4.1.0", + "diff": "^8.0.2", + "pathe": "^2.0.3", + "tinyglobby": "^0.2.15", + }, + }, + "sha512-/eFGKCiix1SvjxwgzrmH4pHjMiMxc+GA4nIbgEkG2RdAJqyxLcRhd7RPLG0/LZaJ7d0ad3jrtRqsHLv2152Vbw==", + ], + + "@tanstack/store": [ + "@tanstack/store@0.8.0", + "", + {}, + "sha512-Om+BO0YfMZe//X2z0uLF2j+75nQga6TpTJgLJQBiq85aOyZNIhkCgleNcud2KQg4k4v9Y9l+Uhru3qWMPGTOzQ==", + ], + + "@tanstack/table-core": [ + "@tanstack/table-core@8.21.3", + "", + {}, + "sha512-ldZXEhOBb8Is7xLs01fR3YEc3DERiz5silj8tnGkFZytt1abEvl/GhUmCE0PMLaMPTa3Jk4HbKmRlHmu+gCftg==", + ], + + "@tanstack/virtual-file-routes": [ + "@tanstack/virtual-file-routes@1.141.0", + "", + {}, + "sha512-CJrWtr6L9TVzEImm9S7dQINx+xJcYP/aDkIi6gnaWtIgbZs1pnzsE0yJc2noqXZ+yAOqLx3TBGpBEs9tS0P9/A==", + ], + + "@ts-morph/common": [ + "@ts-morph/common@0.27.0", + "", + { + "dependencies": { + "fast-glob": "^3.3.3", + "minimatch": "^10.0.1", + "path-browserify": "^1.0.1", + }, + }, + "sha512-Wf29UqxWDpc+i61k3oIOzcUfQt79PIT9y/MWfAGlrkjg6lBC1hwDECLXPVJAhWjiGbfBCxZd65F/LIZF3+jeJQ==", + ], + + "@tybys/wasm-util": [ + "@tybys/wasm-util@0.10.1", + "", + { "dependencies": { "tslib": "^2.4.0" } }, + "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==", + ], + + "@types/babel__core": [ + "@types/babel__core@7.20.5", + "", + { + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*", + }, + }, + "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + ], + + "@types/babel__generator": [ + "@types/babel__generator@7.27.0", + "", + { "dependencies": { "@babel/types": "^7.0.0" } }, + "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + ], + + "@types/babel__template": [ + "@types/babel__template@7.4.4", + "", + { "dependencies": { "@babel/parser": "^7.1.0", "@babel/types": "^7.0.0" } }, + "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + ], + + "@types/babel__traverse": [ + "@types/babel__traverse@7.28.0", + "", + { "dependencies": { "@babel/types": "^7.28.2" } }, + "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + ], + + "@types/debug": [ + "@types/debug@4.1.12", + "", + { "dependencies": { "@types/ms": "*" } }, + "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + ], + + "@types/es-aggregate-error": [ + "@types/es-aggregate-error@1.0.6", + "", + { "dependencies": { "@types/node": "*" } }, + "sha512-qJ7LIFp06h1QE1aVxbVd+zJP2wdaugYXYfd6JxsyRMrYHaxb6itXPogW2tz+ylUJ1n1b+JF1PHyYCfYHm0dvUg==", + ], + + "@types/estree": [ + "@types/estree@1.0.8", + "", + {}, + "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + ], + + "@types/estree-jsx": [ + "@types/estree-jsx@1.0.5", + "", + { "dependencies": { "@types/estree": "*" } }, + "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + ], + + "@types/hast": [ + "@types/hast@3.0.4", + "", + { "dependencies": { "@types/unist": "*" } }, + "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + ], + + "@types/json-schema": [ + "@types/json-schema@7.0.15", + "", + {}, + "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + ], + + "@types/mdast": [ + "@types/mdast@4.0.4", + "", + { "dependencies": { "@types/unist": "*" } }, + "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + ], + + "@types/mdx": [ + "@types/mdx@2.0.13", + "", + {}, + "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==", + ], + + "@types/ms": [ + "@types/ms@2.1.0", + "", + {}, + "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==", + ], + + "@types/node": [ + "@types/node@25.0.3", + "", + { "dependencies": { "undici-types": "~7.16.0" } }, + "sha512-W609buLVRVmeW693xKfzHeIV6nJGGz98uCPfeXI1ELMLXVeKYZ9m15fAMSaUPBHYLGFsVRcMmSCksQOrZV9BYA==", + ], + + "@types/react": [ + "@types/react@19.2.7", + "", + { "dependencies": { "csstype": "^3.2.2" } }, + "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==", + ], + + "@types/react-dom": [ + "@types/react-dom@19.2.3", + "", + { "peerDependencies": { "@types/react": "^19.2.0" } }, + "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", + ], + + "@types/statuses": [ + "@types/statuses@2.0.6", + "", + {}, + "sha512-xMAgYwceFhRA2zY+XbEA7mxYbA093wdiW8Vu6gZPGWy9cmOyU9XesH1tNcEWsKFd5Vzrqx5T3D38PWx1FIIXkA==", + ], + + "@types/trusted-types": [ + "@types/trusted-types@2.0.7", + "", + {}, + "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==", + ], + + "@types/unist": [ + "@types/unist@3.0.3", + "", + {}, + "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + ], + + "@types/urijs": [ + "@types/urijs@1.19.26", + "", + {}, + "sha512-wkXrVzX5yoqLnndOwFsieJA7oKM8cNkOKJtf/3vVGSUFkWDKZvFHpIl9Pvqb/T9UsawBBFMTTD8xu7sK5MWuvg==", + ], + + "@typescript-eslint/eslint-plugin": [ + "@typescript-eslint/eslint-plugin@8.50.1", + "", + { + "dependencies": { + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "8.50.1", + "@typescript-eslint/type-utils": "8.50.1", + "@typescript-eslint/utils": "8.50.1", + "@typescript-eslint/visitor-keys": "8.50.1", + "ignore": "^7.0.0", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.1.0", + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.50.1", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0", + }, + }, + "sha512-PKhLGDq3JAg0Jk/aK890knnqduuI/Qj+udH7wCf0217IGi4gt+acgCyPVe79qoT+qKUvHMDQkwJeKW9fwl8Cyw==", + ], + + "@typescript-eslint/parser": [ + "@typescript-eslint/parser@8.50.1", + "", + { + "dependencies": { + "@typescript-eslint/scope-manager": "8.50.1", + "@typescript-eslint/types": "8.50.1", + "@typescript-eslint/typescript-estree": "8.50.1", + "@typescript-eslint/visitor-keys": "8.50.1", + "debug": "^4.3.4", + }, + "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" }, + }, + "sha512-hM5faZwg7aVNa819m/5r7D0h0c9yC4DUlWAOvHAtISdFTc8xB86VmX5Xqabrama3wIPJ/q9RbGS1worb6JfnMg==", + ], + + "@typescript-eslint/project-service": [ + "@typescript-eslint/project-service@8.50.1", + "", + { + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.50.1", + "@typescript-eslint/types": "^8.50.1", + "debug": "^4.3.4", + }, + "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" }, + }, + "sha512-E1ur1MCVf+YiP89+o4Les/oBAVzmSbeRB0MQLfSlYtbWU17HPxZ6Bhs5iYmKZRALvEuBoXIZMOIRRc/P++Ortg==", + ], + + "@typescript-eslint/scope-manager": [ + "@typescript-eslint/scope-manager@8.50.1", + "", + { + "dependencies": { + "@typescript-eslint/types": "8.50.1", + "@typescript-eslint/visitor-keys": "8.50.1", + }, + }, + "sha512-mfRx06Myt3T4vuoHaKi8ZWNTPdzKPNBhiblze5N50//TSHOAQQevl/aolqA/BcqqbJ88GUnLqjjcBc8EWdBcVw==", + ], + + "@typescript-eslint/tsconfig-utils": [ + "@typescript-eslint/tsconfig-utils@8.50.1", + "", + { "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" } }, + "sha512-ooHmotT/lCWLXi55G4mvaUF60aJa012QzvLK0Y+Mp4WdSt17QhMhWOaBWeGTFVkb2gDgBe19Cxy1elPXylslDw==", + ], + + "@typescript-eslint/type-utils": [ + "@typescript-eslint/type-utils@8.50.1", + "", + { + "dependencies": { + "@typescript-eslint/types": "8.50.1", + "@typescript-eslint/typescript-estree": "8.50.1", + "@typescript-eslint/utils": "8.50.1", + "debug": "^4.3.4", + "ts-api-utils": "^2.1.0", + }, + "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" }, + }, + "sha512-7J3bf022QZE42tYMO6SL+6lTPKFk/WphhRPe9Tw/el+cEwzLz1Jjz2PX3GtGQVxooLDKeMVmMt7fWpYRdG5Etg==", + ], + + "@typescript-eslint/types": [ + "@typescript-eslint/types@8.50.1", + "", + {}, + "sha512-v5lFIS2feTkNyMhd7AucE/9j/4V9v5iIbpVRncjk/K0sQ6Sb+Np9fgYS/63n6nwqahHQvbmujeBL7mp07Q9mlA==", + ], + + "@typescript-eslint/typescript-estree": [ + "@typescript-eslint/typescript-estree@8.50.1", + "", + { + "dependencies": { + "@typescript-eslint/project-service": "8.50.1", + "@typescript-eslint/tsconfig-utils": "8.50.1", + "@typescript-eslint/types": "8.50.1", + "@typescript-eslint/visitor-keys": "8.50.1", + "debug": "^4.3.4", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "tinyglobby": "^0.2.15", + "ts-api-utils": "^2.1.0", + }, + "peerDependencies": { "typescript": ">=4.8.4 <6.0.0" }, + }, + "sha512-woHPdW+0gj53aM+cxchymJCrh0cyS7BTIdcDxWUNsclr9VDkOSbqC13juHzxOmQ22dDkMZEpZB+3X1WpUvzgVQ==", + ], + + "@typescript-eslint/utils": [ + "@typescript-eslint/utils@8.50.1", + "", + { + "dependencies": { + "@eslint-community/eslint-utils": "^4.7.0", + "@typescript-eslint/scope-manager": "8.50.1", + "@typescript-eslint/types": "8.50.1", + "@typescript-eslint/typescript-estree": "8.50.1", + }, + "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" }, + }, + "sha512-lCLp8H1T9T7gPbEuJSnHwnSuO9mDf8mfK/Nion5mZmiEaQD9sWf9W4dfeFqRyqRjF06/kBuTmAqcs9sewM2NbQ==", + ], + + "@typescript-eslint/visitor-keys": [ + "@typescript-eslint/visitor-keys@8.50.1", + "", + { "dependencies": { "@typescript-eslint/types": "8.50.1", "eslint-visitor-keys": "^4.2.1" } }, + "sha512-IrDKrw7pCRUR94zeuCSUWQ+w8JEf5ZX5jl/e6AHGSLi1/zIr0lgutfn/7JpfCey+urpgQEdrZVYzCaVVKiTwhQ==", + ], + + "@ungap/structured-clone": [ + "@ungap/structured-clone@1.3.0", + "", + {}, + "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + ], + + "@vitejs/plugin-react": [ + "@vitejs/plugin-react@5.1.2", + "", + { + "dependencies": { + "@babel/core": "^7.28.5", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-beta.53", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.18.0", + }, + "peerDependencies": { "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" }, + }, + "sha512-EcA07pHJouywpzsoTUqNh5NwGayl2PPVEJKUSinGGSxFGYn+shYbqMGBg6FXDqgXum9Ou/ecb+411ssw8HImJQ==", + ], + + "@xterm/addon-fit": [ + "@xterm/addon-fit@0.11.0", + "", + {}, + "sha512-jYcgT6xtVYhnhgxh3QgYDnnNMYTcf8ElbxxFzX0IZo+vabQqSPAjC3c1wJrKB5E19VwQei89QCiZZP86DCPF7g==", + ], + + "@xterm/xterm": [ + "@xterm/xterm@6.0.0", + "", + {}, + "sha512-TQwDdQGtwwDt+2cgKDLn0IRaSxYu1tSUjgKarSDkUM0ZNiSRXFpjxEsvc/Zgc5kq5omJ+V0a8/kIM2WD3sMOYg==", + ], + + "abort-controller": [ + "abort-controller@3.0.0", + "", + { "dependencies": { "event-target-shim": "^5.0.0" } }, + "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + ], + + "accepts": [ + "accepts@2.0.0", + "", + { "dependencies": { "mime-types": "^3.0.0", "negotiator": "^1.0.0" } }, + "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==", + ], + + "acorn": [ + "acorn@8.15.0", + "", + { "bin": { "acorn": "bin/acorn" } }, + "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + ], + + "acorn-jsx": [ + "acorn-jsx@5.3.2", + "", + { "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, + "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + ], + + "agent-base": [ + "agent-base@7.1.4", + "", + {}, + "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + ], + + "ajv": [ + "ajv@6.12.6", + "", + { + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2", + }, + }, + "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + ], + + "ajv-draft-04": [ + "ajv-draft-04@1.0.0", + "", + { "peerDependencies": { "ajv": "^8.5.0" }, "optionalPeers": ["ajv"] }, + "sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==", + ], + + "ajv-errors": [ + "ajv-errors@3.0.0", + "", + { "peerDependencies": { "ajv": "^8.0.1" } }, + "sha512-V3wD15YHfHz6y0KdhYFjyy9vWtEVALT9UrxfN3zqlI6dMioHnJrqOYfyPKol3oqrnCM9uwkcdCwkJ0WUcbLMTQ==", + ], + + "ajv-formats": [ + "ajv-formats@3.0.1", + "", + { "dependencies": { "ajv": "^8.0.0" } }, + "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==", + ], + + "ansi-colors": [ + "ansi-colors@4.1.3", + "", + {}, + "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==", + ], + + "ansi-regex": [ + "ansi-regex@5.0.1", + "", + {}, + "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + ], + + "ansi-styles": [ + "ansi-styles@4.3.0", + "", + { "dependencies": { "color-convert": "^2.0.1" } }, + "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + ], + + "ansis": [ + "ansis@4.2.0", + "", + {}, + "sha512-HqZ5rWlFjGiV0tDm3UxxgNRqsOTniqoKZu0pIAfh7TZQMGuZK+hH0drySty0si0QXj1ieop4+SkSfPZBPPkHig==", + ], + + "anymatch": [ + "anymatch@3.1.3", + "", + { "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" } }, + "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + ], + + "argparse": [ + "argparse@2.0.1", + "", + {}, + "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + ], + + "aria-hidden": [ + "aria-hidden@1.2.6", + "", + { "dependencies": { "tslib": "^2.0.0" } }, + "sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA==", + ], + + "array-buffer-byte-length": [ + "array-buffer-byte-length@1.0.2", + "", + { "dependencies": { "call-bound": "^1.0.3", "is-array-buffer": "^3.0.5" } }, + "sha512-LHE+8BuR7RYGDKvnrmcuSq3tDcKv9OFEXQt/HpbZhY7V6h0zlUXutnAD82GiFx9rdieCMjkvtcsPqBwgUl1Iiw==", + ], + + "array-union": [ + "array-union@2.1.0", + "", + {}, + "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + ], + + "arraybuffer.prototype.slice": [ + "arraybuffer.prototype.slice@1.0.4", + "", + { + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "is-array-buffer": "^3.0.4", + }, + }, + "sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ==", + ], + + "ast-types": [ + "ast-types@0.16.1", + "", + { "dependencies": { "tslib": "^2.0.1" } }, + "sha512-6t10qk83GOG8p0vKmaCr8eiilZwO171AvbROMtvvNiwrTly62t+7XkA8RdIIVbpMhCASAsxgAzdRSwh6nw/5Dg==", + ], + + "astring": [ + "astring@1.9.0", + "", + { "bin": { "astring": "bin/astring" } }, + "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==", + ], + + "async-function": [ + "async-function@1.0.0", + "", + {}, + "sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA==", + ], + + "asynckit": [ + "asynckit@0.4.0", + "", + {}, + "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + ], + + "available-typed-arrays": [ + "available-typed-arrays@1.0.7", + "", + { "dependencies": { "possible-typed-array-names": "^1.0.0" } }, + "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + ], + + "axios": [ + "axios@1.13.2", + "", + { + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.4", + "proxy-from-env": "^1.1.0", + }, + }, + "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==", + ], + + "babel-dead-code-elimination": [ + "babel-dead-code-elimination@1.0.11", + "", + { + "dependencies": { + "@babel/core": "^7.23.7", + "@babel/parser": "^7.23.6", + "@babel/traverse": "^7.23.7", + "@babel/types": "^7.23.6", + }, + }, + "sha512-mwq3W3e/pKSI6TG8lXMiDWvEi1VXYlSBlJlB3l+I0bAb5u1RNUl88udos85eOPNK3m5EXK9uO7d2g08pesTySQ==", + ], + + "bail": [ + "bail@2.0.2", + "", + {}, + "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + ], + + "balanced-match": [ + "balanced-match@1.0.2", + "", + {}, + "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + ], + + "baseline-browser-mapping": [ + "baseline-browser-mapping@2.9.11", + "", + { "bin": { "baseline-browser-mapping": "dist/cli.js" } }, + "sha512-Sg0xJUNDU1sJNGdfGWhVHX0kkZ+HWcvmVymJbj6NSgZZmW/8S9Y2HQ5euytnIgakgxN6papOAWiwDo1ctFDcoQ==", + ], + + "binary-extensions": [ + "binary-extensions@2.3.0", + "", + {}, + "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + ], + + "body-parser": [ + "body-parser@2.2.1", + "", + { + "dependencies": { + "bytes": "^3.1.2", + "content-type": "^1.0.5", + "debug": "^4.4.3", + "http-errors": "^2.0.0", + "iconv-lite": "^0.7.0", + "on-finished": "^2.4.1", + "qs": "^6.14.0", + "raw-body": "^3.0.1", + "type-is": "^2.0.1", + }, + }, + "sha512-nfDwkulwiZYQIGwxdy0RUmowMhKcFVcYXUU7m4QlKYim1rUtg83xm2yjZ40QjDuc291AJjjeSc9b++AWHSgSHw==", + ], + + "brace-expansion": [ + "brace-expansion@1.1.12", + "", + { "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, + "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + ], + + "braces": [ + "braces@3.0.3", + "", + { "dependencies": { "fill-range": "^7.1.1" } }, + "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + ], + + "browserslist": [ + "browserslist@4.28.1", + "", + { + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0", + }, + "bin": { "browserslist": "cli.js" }, + }, + "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + ], + + "bundle-name": [ + "bundle-name@4.1.0", + "", + { "dependencies": { "run-applescript": "^7.0.0" } }, + "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==", + ], + + "bytes": [ + "bytes@3.1.2", + "", + {}, + "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + ], + + "call-bind": [ + "call-bind@1.0.8", + "", + { + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2", + }, + }, + "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + ], + + "call-bind-apply-helpers": [ + "call-bind-apply-helpers@1.0.2", + "", + { "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" } }, + "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + ], + + "call-bound": [ + "call-bound@1.0.4", + "", + { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "get-intrinsic": "^1.3.0" } }, + "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + ], + + "call-me-maybe": [ + "call-me-maybe@1.0.2", + "", + {}, + "sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ==", + ], + + "callsites": [ + "callsites@3.1.0", + "", + {}, + "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + ], + + "caniuse-lite": [ + "caniuse-lite@1.0.30001761", + "", + {}, + "sha512-JF9ptu1vP2coz98+5051jZ4PwQgd2ni8A+gYSN7EA7dPKIMf0pDlSUxhdmVOaV3/fYK5uWBkgSXJaRLr4+3A6g==", + ], + + "ccount": [ + "ccount@2.0.1", + "", + {}, + "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + ], + + "chalk": [ + "chalk@4.1.2", + "", + { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, + "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + ], + + "character-entities": [ + "character-entities@2.0.2", + "", + {}, + "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + ], + + "character-entities-html4": [ + "character-entities-html4@2.1.0", + "", + {}, + "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + ], + + "character-entities-legacy": [ + "character-entities-legacy@3.0.0", + "", + {}, + "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + ], + + "character-reference-invalid": [ + "character-reference-invalid@2.0.1", + "", + {}, + "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + ], + + "chokidar": [ + "chokidar@3.6.0", + "", + { + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0", + }, + "optionalDependencies": { "fsevents": "~2.3.2" }, + }, + "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + ], + + "class-variance-authority": [ + "class-variance-authority@0.7.1", + "", + { "dependencies": { "clsx": "^2.1.1" } }, + "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==", + ], + + "cli-cursor": [ + "cli-cursor@5.0.0", + "", + { "dependencies": { "restore-cursor": "^5.0.0" } }, + "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==", + ], + + "cli-spinners": [ + "cli-spinners@2.9.2", + "", + {}, + "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + ], + + "cli-width": [ + "cli-width@4.1.0", + "", + {}, + "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==", + ], + + "cliui": [ + "cliui@8.0.1", + "", + { + "dependencies": { "string-width": "^4.2.0", "strip-ansi": "^6.0.1", "wrap-ansi": "^7.0.0" }, + }, + "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + ], + + "clsx": [ + "clsx@2.1.1", + "", + {}, + "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + ], + + "code-block-writer": [ + "code-block-writer@13.0.3", + "", + {}, + "sha512-Oofo0pq3IKnsFtuHqSF7TqBfr71aeyZDVJ0HpmqB7FBM2qEigL0iPONSCZSO9pE9dZTAxANe5XHG9Uy0YMv8cg==", + ], + + "collapse-white-space": [ + "collapse-white-space@2.1.0", + "", + {}, + "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==", + ], + + "color-convert": [ + "color-convert@2.0.1", + "", + { "dependencies": { "color-name": "~1.1.4" } }, + "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + ], + + "color-name": [ + "color-name@1.1.4", + "", + {}, + "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + ], + + "combined-stream": [ + "combined-stream@1.0.8", + "", + { "dependencies": { "delayed-stream": "~1.0.0" } }, + "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + ], + + "comma-separated-tokens": [ + "comma-separated-tokens@2.0.3", + "", + {}, + "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + ], + + "commander": [ + "commander@14.0.2", + "", + {}, + "sha512-TywoWNNRbhoD0BXs1P3ZEScW8W5iKrnbithIl0YH+uCmBd0QpPOA8yc82DS3BIE5Ma6FnBVUsJ7wVUDz4dvOWQ==", + ], + + "compare-versions": [ + "compare-versions@6.1.1", + "", + {}, + "sha512-4hm4VPpIecmlg59CHXnRDnqGplJFrbLG4aFEl5vl6cK1u76ws3LLvX7ikFnTDl5vo39sjWD6AaDPYodJp/NNHg==", + ], + + "concat-map": [ + "concat-map@0.0.1", + "", + {}, + "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + ], + + "content-disposition": [ + "content-disposition@1.0.1", + "", + {}, + "sha512-oIXISMynqSqm241k6kcQ5UwttDILMK4BiurCfGEREw6+X9jkkpEe5T9FZaApyLGGOnFuyMWZpdolTXMtvEJ08Q==", + ], + + "content-type": [ + "content-type@1.0.5", + "", + {}, + "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + ], + + "convert-source-map": [ + "convert-source-map@2.0.0", + "", + {}, + "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + ], + + "cookie": [ + "cookie@1.1.1", + "", + {}, + "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==", + ], + + "cookie-es": [ + "cookie-es@2.0.0", + "", + {}, + "sha512-RAj4E421UYRgqokKUmotqAwuplYw15qtdXfY+hGzgCJ/MBjCVZcSoHK/kH9kocfjRjcDME7IiDWR/1WX1TM2Pg==", + ], + + "cookie-signature": [ + "cookie-signature@1.2.2", + "", + {}, + "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==", + ], + + "core-js": [ + "core-js@3.48.0", + "", + {}, + "sha512-zpEHTy1fjTMZCKLHUZoVeylt9XrzaIN2rbPXEt0k+q7JE5CkCZdo6bNq55bn24a69CH7ErAVLKijxJja4fw+UQ==", + ], + + "cors": [ + "cors@2.8.5", + "", + { "dependencies": { "object-assign": "^4", "vary": "^1" } }, + "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==", + ], + + "cosmiconfig": [ + "cosmiconfig@9.0.0", + "", + { + "dependencies": { + "env-paths": "^2.2.1", + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + }, + "peerDependencies": { "typescript": ">=4.9.5" }, + "optionalPeers": ["typescript"], + }, + "sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg==", + ], + + "cross-spawn": [ + "cross-spawn@7.0.6", + "", + { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, + "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + ], + + "cssesc": [ + "cssesc@3.0.0", + "", + { "bin": { "cssesc": "bin/cssesc" } }, + "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + ], + + "csstype": [ + "csstype@3.2.3", + "", + {}, + "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + ], + + "data-uri-to-buffer": [ + "data-uri-to-buffer@4.0.1", + "", + {}, + "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==", + ], + + "data-view-buffer": [ + "data-view-buffer@1.0.2", + "", + { + "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-data-view": "^1.0.2" }, + }, + "sha512-EmKO5V3OLXh1rtK2wgXRansaK1/mtVdTUEiEI0W8RkvgT05kfxaH29PliLnpLP73yYO6142Q72QNa8Wx/A5CqQ==", + ], + + "data-view-byte-length": [ + "data-view-byte-length@1.0.2", + "", + { + "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-data-view": "^1.0.2" }, + }, + "sha512-tuhGbE6CfTM9+5ANGf+oQb72Ky/0+s3xKUpHvShfiz2RxMFgFPjsXuRLBVMtvMs15awe45SRb83D6wH4ew6wlQ==", + ], + + "data-view-byte-offset": [ + "data-view-byte-offset@1.0.1", + "", + { + "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "is-data-view": "^1.0.1" }, + }, + "sha512-BS8PfmtDGnrgYdOonGZQdLZslWIeCGFP9tpan0hi1Co2Zr2NKADsvGYA8XxuG/4UWgJ6Cjtv+YJnB6MM69QGlQ==", + ], + + "debug": [ + "debug@4.4.3", + "", + { "dependencies": { "ms": "^2.1.3" } }, + "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + ], + + "decode-named-character-reference": [ + "decode-named-character-reference@1.3.0", + "", + { "dependencies": { "character-entities": "^2.0.0" } }, + "sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==", + ], + + "dedent": [ + "dedent@1.7.1", + "", + { + "peerDependencies": { "babel-plugin-macros": "^3.1.0" }, + "optionalPeers": ["babel-plugin-macros"], + }, + "sha512-9JmrhGZpOlEgOLdQgSm0zxFaYoQon408V1v49aqTWuXENVlnCuY9JBZcXZiCsZQWDjTm5Qf/nIvAy77mXDAjEg==", + ], + + "deep-is": [ + "deep-is@0.1.4", + "", + {}, + "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + ], + + "deepmerge": [ + "deepmerge@4.3.1", + "", + {}, + "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + ], + + "default-browser": [ + "default-browser@5.4.0", + "", + { "dependencies": { "bundle-name": "^4.1.0", "default-browser-id": "^5.0.0" } }, + "sha512-XDuvSq38Hr1MdN47EDvYtx3U0MTqpCEn+F6ft8z2vYDzMrvQhVp0ui9oQdqW3MvK3vqUETglt1tVGgjLuJ5izg==", + ], + + "default-browser-id": [ + "default-browser-id@5.0.1", + "", + {}, + "sha512-x1VCxdX4t+8wVfd1so/9w+vQ4vx7lKd2Qp5tDRutErwmR85OgmfX7RlLRMWafRMY7hbEiXIbudNrjOAPa/hL8Q==", + ], + + "define-data-property": [ + "define-data-property@1.1.4", + "", + { + "dependencies": { "es-define-property": "^1.0.0", "es-errors": "^1.3.0", "gopd": "^1.0.1" }, + }, + "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + ], + + "define-lazy-prop": [ + "define-lazy-prop@3.0.0", + "", + {}, + "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==", + ], + + "define-properties": [ + "define-properties@1.2.1", + "", + { + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1", + }, + }, + "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + ], + + "delayed-stream": [ + "delayed-stream@1.0.0", + "", + {}, + "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + ], + + "depd": [ + "depd@2.0.0", + "", + {}, + "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + ], + + "dependency-graph": [ + "dependency-graph@0.11.0", + "", + {}, + "sha512-JeMq7fEshyepOWDfcfHK06N3MhyPhz++vtqWhMT5O9A3K42rdsEDpfdVqjaqaAhsw6a+ZqeDvQVtD0hFHQWrzg==", + ], + + "dequal": [ + "dequal@2.0.3", + "", + {}, + "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + ], + + "detect-libc": [ + "detect-libc@2.1.2", + "", + {}, + "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + ], + + "detect-node-es": [ + "detect-node-es@1.1.0", + "", + {}, + "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==", + ], + + "devlop": [ + "devlop@1.1.0", + "", + { "dependencies": { "dequal": "^2.0.0" } }, + "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + ], + + "diff": [ + "diff@8.0.2", + "", + {}, + "sha512-sSuxWU5j5SR9QQji/o2qMvqRNYRDOcBTgsJ/DeCf4iSN4gW+gNMXM7wFIP+fdXZxoNiAnHUTGjCr+TSWXdRDKg==", + ], + + "dir-glob": [ + "dir-glob@3.0.1", + "", + { "dependencies": { "path-type": "^4.0.0" } }, + "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + ], + + "dompurify": [ + "dompurify@3.3.1", + "", + { "optionalDependencies": { "@types/trusted-types": "^2.0.7" } }, + "sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q==", + ], + + "dotenv": [ + "dotenv@17.2.3", + "", + {}, + "sha512-JVUnt+DUIzu87TABbhPmNfVdBDt18BLOWjMUFJMSi/Qqg7NTYtabbvSNJGOJ7afbRuv9D/lngizHtP7QyLQ+9w==", + ], + + "dunder-proto": [ + "dunder-proto@1.0.1", + "", + { + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0", + }, + }, + "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + ], + + "eciesjs": [ + "eciesjs@0.4.16", + "", + { + "dependencies": { + "@ecies/ciphers": "^0.2.4", + "@noble/ciphers": "^1.3.0", + "@noble/curves": "^1.9.7", + "@noble/hashes": "^1.8.0", + }, + }, + "sha512-dS5cbA9rA2VR4Ybuvhg6jvdmp46ubLn3E+px8cG/35aEDNclrqoCjg6mt0HYZ/M+OoESS3jSkCrqk1kWAEhWAw==", + ], + + "ee-first": [ + "ee-first@1.1.1", + "", + {}, + "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + ], + + "electron-to-chromium": [ + "electron-to-chromium@1.5.267", + "", + {}, + "sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==", + ], + + "emoji-regex": [ + "emoji-regex@10.6.0", + "", + {}, + "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", + ], + + "encodeurl": [ + "encodeurl@2.0.0", + "", + {}, + "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + ], + + "enhanced-resolve": [ + "enhanced-resolve@5.18.4", + "", + { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" } }, + "sha512-LgQMM4WXU3QI+SYgEc2liRgznaD5ojbmY3sb8LxyguVkIg5FxdpTkvk72te2R38/TGKxH634oLxXRGY6d7AP+Q==", + ], + + "enquirer": [ + "enquirer@2.4.1", + "", + { "dependencies": { "ansi-colors": "^4.1.1", "strip-ansi": "^6.0.1" } }, + "sha512-rRqJg/6gd538VHvR3PSrdRBb/1Vy2YfzHqzvbhGIQpDRKIa4FgV/54b5Q1xYSxOOwKvjXweS26E0Q+nAMwp2pQ==", + ], + + "entities": [ + "entities@4.5.0", + "", + {}, + "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + ], + + "env-paths": [ + "env-paths@2.2.1", + "", + {}, + "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + ], + + "error-ex": [ + "error-ex@1.3.4", + "", + { "dependencies": { "is-arrayish": "^0.2.1" } }, + "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + ], + + "es-abstract": [ + "es-abstract@1.24.1", + "", + { + "dependencies": { + "array-buffer-byte-length": "^1.0.2", + "arraybuffer.prototype.slice": "^1.0.4", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "data-view-buffer": "^1.0.2", + "data-view-byte-length": "^1.0.2", + "data-view-byte-offset": "^1.0.1", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "es-set-tostringtag": "^2.1.0", + "es-to-primitive": "^1.3.0", + "function.prototype.name": "^1.1.8", + "get-intrinsic": "^1.3.0", + "get-proto": "^1.0.1", + "get-symbol-description": "^1.1.0", + "globalthis": "^1.0.4", + "gopd": "^1.2.0", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "internal-slot": "^1.1.0", + "is-array-buffer": "^3.0.5", + "is-callable": "^1.2.7", + "is-data-view": "^1.0.2", + "is-negative-zero": "^2.0.3", + "is-regex": "^1.2.1", + "is-set": "^2.0.3", + "is-shared-array-buffer": "^1.0.4", + "is-string": "^1.1.1", + "is-typed-array": "^1.1.15", + "is-weakref": "^1.1.1", + "math-intrinsics": "^1.1.0", + "object-inspect": "^1.13.4", + "object-keys": "^1.1.1", + "object.assign": "^4.1.7", + "own-keys": "^1.0.1", + "regexp.prototype.flags": "^1.5.4", + "safe-array-concat": "^1.1.3", + "safe-push-apply": "^1.0.0", + "safe-regex-test": "^1.1.0", + "set-proto": "^1.0.0", + "stop-iteration-iterator": "^1.1.0", + "string.prototype.trim": "^1.2.10", + "string.prototype.trimend": "^1.0.9", + "string.prototype.trimstart": "^1.0.8", + "typed-array-buffer": "^1.0.3", + "typed-array-byte-length": "^1.0.3", + "typed-array-byte-offset": "^1.0.4", + "typed-array-length": "^1.0.7", + "unbox-primitive": "^1.1.0", + "which-typed-array": "^1.1.19", + }, + }, + "sha512-zHXBLhP+QehSSbsS9Pt23Gg964240DPd6QCf8WpkqEXxQ7fhdZzYsocOr5u7apWonsS5EjZDmTF+/slGMyasvw==", + ], + + "es-aggregate-error": [ + "es-aggregate-error@1.0.14", + "", + { + "dependencies": { + "define-data-property": "^1.1.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.24.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "globalthis": "^1.0.4", + "has-property-descriptors": "^1.0.2", + "set-function-name": "^2.0.2", + }, + }, + "sha512-3YxX6rVb07B5TV11AV5wsL7nQCHXNwoHPsQC8S4AmBiqYhyNCJ5BRKXkXyDJvs8QzXN20NgRtxe3dEEQD9NLHA==", + ], + + "es-define-property": [ + "es-define-property@1.0.1", + "", + {}, + "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + ], + + "es-errors": [ + "es-errors@1.3.0", + "", + {}, + "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + ], + + "es-object-atoms": [ + "es-object-atoms@1.1.1", + "", + { "dependencies": { "es-errors": "^1.3.0" } }, + "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + ], + + "es-set-tostringtag": [ + "es-set-tostringtag@2.1.0", + "", + { + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2", + }, + }, + "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + ], + + "es-to-primitive": [ + "es-to-primitive@1.3.0", + "", + { + "dependencies": { + "is-callable": "^1.2.7", + "is-date-object": "^1.0.5", + "is-symbol": "^1.0.4", + }, + }, + "sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g==", + ], + + "es6-promise": [ + "es6-promise@3.3.1", + "", + {}, + "sha512-SOp9Phqvqn7jtEUxPWdWfWoLmyt2VaJ6MpvP9Comy1MceMXqE6bxvaTu4iaxpYYPzhny28Lc+M87/c2cPK6lDg==", + ], + + "esast-util-from-estree": [ + "esast-util-from-estree@2.0.0", + "", + { + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + }, + }, + "sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ==", + ], + + "esast-util-from-js": [ + "esast-util-from-js@2.0.1", + "", + { + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "acorn": "^8.0.0", + "esast-util-from-estree": "^2.0.0", + "vfile-message": "^4.0.0", + }, + }, + "sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==", + ], + + "esbuild": [ + "esbuild@0.25.12", + "", + { + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.12", + "@esbuild/android-arm": "0.25.12", + "@esbuild/android-arm64": "0.25.12", + "@esbuild/android-x64": "0.25.12", + "@esbuild/darwin-arm64": "0.25.12", + "@esbuild/darwin-x64": "0.25.12", + "@esbuild/freebsd-arm64": "0.25.12", + "@esbuild/freebsd-x64": "0.25.12", + "@esbuild/linux-arm": "0.25.12", + "@esbuild/linux-arm64": "0.25.12", + "@esbuild/linux-ia32": "0.25.12", + "@esbuild/linux-loong64": "0.25.12", + "@esbuild/linux-mips64el": "0.25.12", + "@esbuild/linux-ppc64": "0.25.12", + "@esbuild/linux-riscv64": "0.25.12", + "@esbuild/linux-s390x": "0.25.12", + "@esbuild/linux-x64": "0.25.12", + "@esbuild/netbsd-arm64": "0.25.12", + "@esbuild/netbsd-x64": "0.25.12", + "@esbuild/openbsd-arm64": "0.25.12", + "@esbuild/openbsd-x64": "0.25.12", + "@esbuild/openharmony-arm64": "0.25.12", + "@esbuild/sunos-x64": "0.25.12", + "@esbuild/win32-arm64": "0.25.12", + "@esbuild/win32-ia32": "0.25.12", + "@esbuild/win32-x64": "0.25.12", + }, + "bin": { "esbuild": "bin/esbuild" }, + }, + "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==", + ], + + "escalade": [ + "escalade@3.2.0", + "", + {}, + "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + ], + + "escape-html": [ + "escape-html@1.0.3", + "", + {}, + "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + ], + + "escape-string-regexp": [ + "escape-string-regexp@4.0.0", + "", + {}, + "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + ], + + "eslint": [ + "eslint@9.39.2", + "", + { + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.1", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.39.2", + "@eslint/plugin-kit": "^0.4.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + }, + "peerDependencies": { "jiti": "*" }, + "optionalPeers": ["jiti"], + "bin": { "eslint": "bin/eslint.js" }, + }, + "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", + ], + + "eslint-plugin-react-hooks": [ + "eslint-plugin-react-hooks@7.0.1", + "", + { + "dependencies": { + "@babel/core": "^7.24.4", + "@babel/parser": "^7.24.4", + "hermes-parser": "^0.25.1", + "zod": "^3.25.0 || ^4.0.0", + "zod-validation-error": "^3.5.0 || ^4.0.0", + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0", + }, + }, + "sha512-O0d0m04evaNzEPoSW+59Mezf8Qt0InfgGIBJnpC0h3NH/WjUAR7BIKUfysC6todmtiZ/A0oUVS8Gce0WhBrHsA==", + ], + + "eslint-plugin-react-refresh": [ + "eslint-plugin-react-refresh@0.4.26", + "", + { "peerDependencies": { "eslint": ">=8.40" } }, + "sha512-1RETEylht2O6FM/MvgnyvT+8K21wLqDNg4qD51Zj3guhjt433XbnnkVttHMyaVyAFD03QSV4LPS5iE3VQmO7XQ==", + ], + + "eslint-scope": [ + "eslint-scope@8.4.0", + "", + { "dependencies": { "esrecurse": "^4.3.0", "estraverse": "^5.2.0" } }, + "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + ], + + "eslint-visitor-keys": [ + "eslint-visitor-keys@4.2.1", + "", + {}, + "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + ], + + "espree": [ + "espree@10.4.0", + "", + { + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1", + }, + }, + "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + ], + + "esprima": [ + "esprima@4.0.1", + "", + { "bin": { "esparse": "./bin/esparse.js", "esvalidate": "./bin/esvalidate.js" } }, + "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + ], + + "esquery": [ + "esquery@1.6.0", + "", + { "dependencies": { "estraverse": "^5.1.0" } }, + "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", + ], + + "esrecurse": [ + "esrecurse@4.3.0", + "", + { "dependencies": { "estraverse": "^5.2.0" } }, + "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + ], + + "estraverse": [ + "estraverse@5.3.0", + "", + {}, + "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + ], + + "estree-util-attach-comments": [ + "estree-util-attach-comments@3.0.0", + "", + { "dependencies": { "@types/estree": "^1.0.0" } }, + "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", + ], + + "estree-util-build-jsx": [ + "estree-util-build-jsx@3.0.1", + "", + { + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-walker": "^3.0.0", + }, + }, + "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", + ], + + "estree-util-is-identifier-name": [ + "estree-util-is-identifier-name@3.0.0", + "", + {}, + "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + ], + + "estree-util-scope": [ + "estree-util-scope@1.0.0", + "", + { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0" } }, + "sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ==", + ], + + "estree-util-to-js": [ + "estree-util-to-js@2.0.0", + "", + { + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0", + }, + }, + "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", + ], + + "estree-util-value-to-estree": [ + "estree-util-value-to-estree@3.5.0", + "", + { "dependencies": { "@types/estree": "^1.0.0" } }, + "sha512-aMV56R27Gv3QmfmF1MY12GWkGzzeAezAX+UplqHVASfjc9wNzI/X6hC0S9oxq61WT4aQesLGslWP9tKk6ghRZQ==", + ], + + "estree-util-visit": [ + "estree-util-visit@2.0.0", + "", + { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/unist": "^3.0.0" } }, + "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", + ], + + "estree-walker": [ + "estree-walker@3.0.3", + "", + { "dependencies": { "@types/estree": "^1.0.0" } }, + "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + ], + + "esutils": [ + "esutils@2.0.3", + "", + {}, + "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + ], + + "etag": [ + "etag@1.8.1", + "", + {}, + "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + ], + + "event-target-shim": [ + "event-target-shim@5.0.1", + "", + {}, + "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + ], + + "eventsource": [ + "eventsource@3.0.7", + "", + { "dependencies": { "eventsource-parser": "^3.0.1" } }, + "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==", + ], + + "eventsource-parser": [ + "eventsource-parser@3.0.6", + "", + {}, + "sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==", + ], + + "execa": [ + "execa@5.1.1", + "", + { + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0", + }, + }, + "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + ], + + "express": [ + "express@5.2.1", + "", + { + "dependencies": { + "accepts": "^2.0.0", + "body-parser": "^2.2.1", + "content-disposition": "^1.0.0", + "content-type": "^1.0.5", + "cookie": "^0.7.1", + "cookie-signature": "^1.2.1", + "debug": "^4.4.0", + "depd": "^2.0.0", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "finalhandler": "^2.1.0", + "fresh": "^2.0.0", + "http-errors": "^2.0.0", + "merge-descriptors": "^2.0.0", + "mime-types": "^3.0.0", + "on-finished": "^2.4.1", + "once": "^1.4.0", + "parseurl": "^1.3.3", + "proxy-addr": "^2.0.7", + "qs": "^6.14.0", + "range-parser": "^1.2.1", + "router": "^2.2.0", + "send": "^1.1.0", + "serve-static": "^2.2.0", + "statuses": "^2.0.1", + "type-is": "^2.0.1", + "vary": "^1.1.2", + }, + }, + "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==", + ], + + "express-rate-limit": [ + "express-rate-limit@7.5.1", + "", + { "peerDependencies": { "express": ">= 4.11" } }, + "sha512-7iN8iPMDzOMHPUYllBEsQdWVB6fPDMPqwjBaFrgr4Jgr/+okjvzAy+UHlYYL/Vs0OsOrMkwS6PJDkFlJwoxUnw==", + ], + + "extend": [ + "extend@3.0.2", + "", + {}, + "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + ], + + "fast-deep-equal": [ + "fast-deep-equal@3.1.3", + "", + {}, + "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + ], + + "fast-glob": [ + "fast-glob@3.3.3", + "", + { + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8", + }, + }, + "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + ], + + "fast-json-stable-stringify": [ + "fast-json-stable-stringify@2.1.0", + "", + {}, + "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + ], + + "fast-levenshtein": [ + "fast-levenshtein@2.0.6", + "", + {}, + "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + ], + + "fast-memoize": [ + "fast-memoize@2.5.2", + "", + {}, + "sha512-Ue0LwpDYErFbmNnZSF0UH6eImUwDmogUO1jyE+JbN2gsQz/jICm1Ve7t9QT0rNSsfJt+Hs4/S3GnsDVjL4HVrw==", + ], + + "fast-safe-stringify": [ + "fast-safe-stringify@2.1.1", + "", + {}, + "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==", + ], + + "fast-uri": [ + "fast-uri@3.1.0", + "", + {}, + "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==", + ], + + "fastq": [ + "fastq@1.20.1", + "", + { "dependencies": { "reusify": "^1.0.4" } }, + "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + ], + + "fault": [ + "fault@2.0.1", + "", + { "dependencies": { "format": "^0.2.0" } }, + "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==", + ], + + "fdir": [ + "fdir@6.5.0", + "", + { "peerDependencies": { "picomatch": "^3 || ^4" }, "optionalPeers": ["picomatch"] }, + "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + ], + + "fetch-blob": [ + "fetch-blob@3.2.0", + "", + { "dependencies": { "node-domexception": "^1.0.0", "web-streams-polyfill": "^3.0.3" } }, + "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==", + ], + + "fflate": [ + "fflate@0.4.8", + "", + {}, + "sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==", + ], + + "figures": [ + "figures@6.1.0", + "", + { "dependencies": { "is-unicode-supported": "^2.0.0" } }, + "sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg==", + ], + + "file-entry-cache": [ + "file-entry-cache@8.0.0", + "", + { "dependencies": { "flat-cache": "^4.0.0" } }, + "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + ], + + "fill-range": [ + "fill-range@7.1.1", + "", + { "dependencies": { "to-regex-range": "^5.0.1" } }, + "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + ], + + "finalhandler": [ + "finalhandler@2.1.1", + "", + { + "dependencies": { + "debug": "^4.4.0", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "on-finished": "^2.4.1", + "parseurl": "^1.3.3", + "statuses": "^2.0.1", + }, + }, + "sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==", + ], + + "find-up": [ + "find-up@5.0.0", + "", + { "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" } }, + "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + ], + + "flat-cache": [ + "flat-cache@4.0.1", + "", + { "dependencies": { "flatted": "^3.2.9", "keyv": "^4.5.4" } }, + "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + ], + + "flatted": [ + "flatted@3.3.3", + "", + {}, + "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + ], + + "follow-redirects": [ + "follow-redirects@1.15.11", + "", + {}, + "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + ], + + "for-each": [ + "for-each@0.3.5", + "", + { "dependencies": { "is-callable": "^1.2.7" } }, + "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", + ], + + "form-data": [ + "form-data@4.0.5", + "", + { + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12", + }, + }, + "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + ], + + "format": [ + "format@0.2.2", + "", + {}, + "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", + ], + + "formdata-polyfill": [ + "formdata-polyfill@4.0.10", + "", + { "dependencies": { "fetch-blob": "^3.1.2" } }, + "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", + ], + + "forwarded": [ + "forwarded@0.2.0", + "", + {}, + "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + ], + + "framer-motion": [ + "framer-motion@12.34.0", + "", + { + "dependencies": { "motion-dom": "^12.34.0", "motion-utils": "^12.29.2", "tslib": "^2.4.0" }, + "peerDependencies": { + "@emotion/is-prop-valid": "*", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0", + }, + "optionalPeers": ["@emotion/is-prop-valid", "react", "react-dom"], + }, + "sha512-+/H49owhzkzQyxtn7nZeF4kdH++I2FWrESQ184Zbcw5cEqNHYkE5yxWxcTLSj5lNx3NWdbIRy5FHqUvetD8FWg==", + ], + + "fresh": [ + "fresh@2.0.0", + "", + {}, + "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==", + ], + + "fs-extra": [ + "fs-extra@11.3.3", + "", + { + "dependencies": { "graceful-fs": "^4.2.0", "jsonfile": "^6.0.1", "universalify": "^2.0.0" }, + }, + "sha512-VWSRii4t0AFm6ixFFmLLx1t7wS1gh+ckoa84aOeapGum0h+EZd1EhEumSB+ZdDLnEPuucsVB9oB7cxJHap6Afg==", + ], + + "fsevents": [ + "fsevents@2.3.3", + "", + { "os": "darwin" }, + "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + ], + + "function-bind": [ + "function-bind@1.1.2", + "", + {}, + "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + ], + + "function.prototype.name": [ + "function.prototype.name@1.1.8", + "", + { + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "functions-have-names": "^1.2.3", + "hasown": "^2.0.2", + "is-callable": "^1.2.7", + }, + }, + "sha512-e5iwyodOHhbMr/yNrc7fDYG4qlbIvI5gajyzPnb5TCwyhjApznQh1BMFou9b30SevY43gCJKXycoCBjMbsuW0Q==", + ], + + "functions-have-names": [ + "functions-have-names@1.2.3", + "", + {}, + "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==", + ], + + "fuzzysort": [ + "fuzzysort@3.1.0", + "", + {}, + "sha512-sR9BNCjBg6LNgwvxlBd0sBABvQitkLzoVY9MYYROQVX/FvfJ4Mai9LsGhDgd8qYdds0bY77VzYd5iuB+v5rwQQ==", + ], + + "fzf": [ + "fzf@0.5.2", + "", + {}, + "sha512-Tt4kuxLXFKHy8KT40zwsUPUkg1CrsgY25FxA2U/j/0WgEDCk3ddc/zLTCCcbSHX9FcKtLuVaDGtGE/STWC+j3Q==", + ], + + "generator-function": [ + "generator-function@2.0.1", + "", + {}, + "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==", + ], + + "gensync": [ + "gensync@1.0.0-beta.2", + "", + {}, + "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + ], + + "get-caller-file": [ + "get-caller-file@2.0.5", + "", + {}, + "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + ], + + "get-east-asian-width": [ + "get-east-asian-width@1.4.0", + "", + {}, + "sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==", + ], + + "get-intrinsic": [ + "get-intrinsic@1.3.0", + "", + { + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0", + }, + }, + "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + ], + + "get-nonce": [ + "get-nonce@1.0.1", + "", + {}, + "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==", + ], + + "get-own-enumerable-keys": [ + "get-own-enumerable-keys@1.0.0", + "", + {}, + "sha512-PKsK2FSrQCyxcGHsGrLDcK0lx+0Ke+6e8KFFozA9/fIQLhQzPaRvJFdcz7+Axg3jUH/Mq+NI4xa5u/UT2tQskA==", + ], + + "get-proto": [ + "get-proto@1.0.1", + "", + { "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" } }, + "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + ], + + "get-stream": [ + "get-stream@6.0.1", + "", + {}, + "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + ], + + "get-symbol-description": [ + "get-symbol-description@1.1.0", + "", + { + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + }, + }, + "sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==", + ], + + "get-tsconfig": [ + "get-tsconfig@4.13.0", + "", + { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, + "sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ==", + ], + + "glob-parent": [ + "glob-parent@6.0.2", + "", + { "dependencies": { "is-glob": "^4.0.3" } }, + "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + ], + + "globals": [ + "globals@16.5.0", + "", + {}, + "sha512-c/c15i26VrJ4IRt5Z89DnIzCGDn9EcebibhAOjw5ibqEHsE1wLUgkPn9RDmNcUKyU87GeaL633nyJ+pplFR2ZQ==", + ], + + "globalthis": [ + "globalthis@1.0.4", + "", + { "dependencies": { "define-properties": "^1.2.1", "gopd": "^1.0.1" } }, + "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==", + ], + + "globby": [ + "globby@11.1.0", + "", + { + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0", + }, + }, + "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + ], + + "goober": [ + "goober@2.1.18", + "", + { "peerDependencies": { "csstype": "^3.0.10" } }, + "sha512-2vFqsaDVIT9Gz7N6kAL++pLpp41l3PfDuusHcjnGLfR6+huZkl6ziX+zgVC3ZxpqWhzH6pyDdGrCeDhMIvwaxw==", + ], + + "gopd": [ + "gopd@1.2.0", + "", + {}, + "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + ], + + "graceful-fs": [ + "graceful-fs@4.2.11", + "", + {}, + "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + ], + + "graphql": [ + "graphql@16.12.0", + "", + {}, + "sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==", + ], + + "has-bigints": [ + "has-bigints@1.1.0", + "", + {}, + "sha512-R3pbpkcIqv2Pm3dUwgjclDRVmWpTJW2DcMzcIhEXEx1oh/CEMObMm3KLmRJOdvhM7o4uQBnwr8pzRK2sJWIqfg==", + ], + + "has-flag": [ + "has-flag@4.0.0", + "", + {}, + "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + ], + + "has-property-descriptors": [ + "has-property-descriptors@1.0.2", + "", + { "dependencies": { "es-define-property": "^1.0.0" } }, + "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + ], + + "has-proto": [ + "has-proto@1.2.0", + "", + { "dependencies": { "dunder-proto": "^1.0.0" } }, + "sha512-KIL7eQPfHQRC8+XluaIw7BHUwwqL19bQn4hzNgdr+1wXoU0KKj6rufu47lhY7KbJR2C6T6+PfyN0Ea7wkSS+qQ==", + ], + + "has-symbols": [ + "has-symbols@1.1.0", + "", + {}, + "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + ], + + "has-tostringtag": [ + "has-tostringtag@1.0.2", + "", + { "dependencies": { "has-symbols": "^1.0.3" } }, + "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + ], + + "hasown": [ + "hasown@2.0.2", + "", + { "dependencies": { "function-bind": "^1.1.2" } }, + "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + ], + + "hast-util-to-estree": [ + "hast-util-to-estree@3.1.3", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-attach-comments": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "zwitch": "^2.0.0", + }, + }, + "sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w==", + ], + + "hast-util-to-html": [ + "hast-util-to-html@9.0.5", + "", + { + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-whitespace": "^3.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "stringify-entities": "^4.0.0", + "zwitch": "^2.0.4", + }, + }, + "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==", + ], + + "hast-util-to-jsx-runtime": [ + "hast-util-to-jsx-runtime@2.3.6", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0", + }, + }, + "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==", + ], + + "hast-util-whitespace": [ + "hast-util-whitespace@3.0.0", + "", + { "dependencies": { "@types/hast": "^3.0.0" } }, + "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + ], + + "headers-polyfill": [ + "headers-polyfill@4.0.3", + "", + {}, + "sha512-IScLbePpkvO846sIwOtOTDjutRMWdXdJmXdMvk6gCBHxFO8d+QKOQedyZSxFTTFYRSmlgSTDtXqqq4pcenBXLQ==", + ], + + "hermes-estree": [ + "hermes-estree@0.25.1", + "", + {}, + "sha512-0wUoCcLp+5Ev5pDW2OriHC2MJCbwLwuRx+gAqMTOkGKJJiBCLjtrvy4PWUGn6MIVefecRpzoOZ/UV6iGdOr+Cw==", + ], + + "hermes-parser": [ + "hermes-parser@0.25.1", + "", + { "dependencies": { "hermes-estree": "0.25.1" } }, + "sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA==", + ], + + "hono": [ + "hono@4.11.1", + "", + {}, + "sha512-KsFcH0xxHes0J4zaQgWbYwmz3UPOOskdqZmItstUG93+Wk1ePBLkLGwbP9zlmh1BFUiL8Qp+Xfu9P7feJWpGNg==", + ], + + "html-void-elements": [ + "html-void-elements@3.0.0", + "", + {}, + "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + ], + + "http-errors": [ + "http-errors@2.0.1", + "", + { + "dependencies": { + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1", + }, + }, + "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", + ], + + "http2-client": [ + "http2-client@1.3.5", + "", + {}, + "sha512-EC2utToWl4RKfs5zd36Mxq7nzHHBuomZboI0yYL6Y0RmBgT7Sgkq4rQ0ezFTYoIsSs7Tm9SJe+o2FcAg6GBhGA==", + ], + + "https-proxy-agent": [ + "https-proxy-agent@7.0.6", + "", + { "dependencies": { "agent-base": "^7.1.2", "debug": "4" } }, + "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + ], + + "human-signals": [ + "human-signals@2.1.0", + "", + {}, + "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + ], + + "iconv-lite": [ + "iconv-lite@0.7.1", + "", + { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, + "sha512-2Tth85cXwGFHfvRgZWszZSvdo+0Xsqmw8k8ZwxScfcBneNUraK+dxRxRm24nszx80Y0TVio8kKLt5sLE7ZCLlw==", + ], + + "ignore": [ + "ignore@5.3.2", + "", + {}, + "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + ], + + "immer": [ + "immer@9.0.21", + "", + {}, + "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==", + ], + + "import-fresh": [ + "import-fresh@3.3.1", + "", + { "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" } }, + "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + ], + + "imurmurhash": [ + "imurmurhash@0.1.4", + "", + {}, + "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + ], + + "inflected": [ + "inflected@2.1.0", + "", + {}, + "sha512-hAEKNxvHf2Iq3H60oMBHkB4wl5jn3TPF3+fXek/sRwAB5gP9xWs4r7aweSF95f99HFoz69pnZTcu8f0SIHV18w==", + ], + + "inherits": [ + "inherits@2.0.4", + "", + {}, + "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + ], + + "inline-style-parser": [ + "inline-style-parser@0.2.7", + "", + {}, + "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==", + ], + + "internal-slot": [ + "internal-slot@1.1.0", + "", + { "dependencies": { "es-errors": "^1.3.0", "hasown": "^2.0.2", "side-channel": "^1.1.0" } }, + "sha512-4gd7VpWNQNB4UKKCFFVcp1AVv+FMOgs9NKzjHKusc8jTMhd5eL1NqQqOpE0KzMds804/yHlglp3uxgluOqAPLw==", + ], + + "ipaddr.js": [ + "ipaddr.js@1.9.1", + "", + {}, + "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + ], + + "is-alphabetical": [ + "is-alphabetical@2.0.1", + "", + {}, + "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + ], + + "is-alphanumerical": [ + "is-alphanumerical@2.0.1", + "", + { "dependencies": { "is-alphabetical": "^2.0.0", "is-decimal": "^2.0.0" } }, + "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + ], + + "is-array-buffer": [ + "is-array-buffer@3.0.5", + "", + { + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "get-intrinsic": "^1.2.6", + }, + }, + "sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==", + ], + + "is-arrayish": [ + "is-arrayish@0.2.1", + "", + {}, + "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + ], + + "is-async-function": [ + "is-async-function@2.1.1", + "", + { + "dependencies": { + "async-function": "^1.0.0", + "call-bound": "^1.0.3", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0", + }, + }, + "sha512-9dgM/cZBnNvjzaMYHVoxxfPj2QXt22Ev7SuuPrs+xav0ukGB0S6d4ydZdEiM48kLx5kDV+QBPrpVnFyefL8kkQ==", + ], + + "is-bigint": [ + "is-bigint@1.1.0", + "", + { "dependencies": { "has-bigints": "^1.0.2" } }, + "sha512-n4ZT37wG78iz03xPRKJrHTdZbe3IicyucEtdRsV5yglwc3GyUfbAfpSeD0FJ41NbUNSt5wbhqfp1fS+BgnvDFQ==", + ], + + "is-binary-path": [ + "is-binary-path@2.1.0", + "", + { "dependencies": { "binary-extensions": "^2.0.0" } }, + "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + ], + + "is-boolean-object": [ + "is-boolean-object@1.2.2", + "", + { "dependencies": { "call-bound": "^1.0.3", "has-tostringtag": "^1.0.2" } }, + "sha512-wa56o2/ElJMYqjCjGkXri7it5FbebW5usLw/nPmCMs5DeZ7eziSYZhSmPRn0txqeW4LnAmQQU7FgqLpsEFKM4A==", + ], + + "is-callable": [ + "is-callable@1.2.7", + "", + {}, + "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + ], + + "is-data-view": [ + "is-data-view@1.0.2", + "", + { + "dependencies": { + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", + "is-typed-array": "^1.1.13", + }, + }, + "sha512-RKtWF8pGmS87i2D6gqQu/l7EYRlVdfzemCJN/P3UOs//x1QE7mfhvzHIApBTRf7axvT6DMGwSwBXYCT0nfB9xw==", + ], + + "is-date-object": [ + "is-date-object@1.1.0", + "", + { "dependencies": { "call-bound": "^1.0.2", "has-tostringtag": "^1.0.2" } }, + "sha512-PwwhEakHVKTdRNVOw+/Gyh0+MzlCl4R6qKvkhuvLtPMggI1WAHt9sOwZxQLSGpUaDnrdyDsomoRgNnCfKNSXXg==", + ], + + "is-decimal": [ + "is-decimal@2.0.1", + "", + {}, + "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + ], + + "is-docker": [ + "is-docker@3.0.0", + "", + { "bin": { "is-docker": "cli.js" } }, + "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==", + ], + + "is-extglob": [ + "is-extglob@2.1.1", + "", + {}, + "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + ], + + "is-finalizationregistry": [ + "is-finalizationregistry@1.1.1", + "", + { "dependencies": { "call-bound": "^1.0.3" } }, + "sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg==", + ], + + "is-fullwidth-code-point": [ + "is-fullwidth-code-point@3.0.0", + "", + {}, + "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + ], + + "is-generator-function": [ + "is-generator-function@1.1.2", + "", + { + "dependencies": { + "call-bound": "^1.0.4", + "generator-function": "^2.0.0", + "get-proto": "^1.0.1", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0", + }, + }, + "sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==", + ], + + "is-glob": [ + "is-glob@4.0.3", + "", + { "dependencies": { "is-extglob": "^2.1.1" } }, + "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + ], + + "is-hexadecimal": [ + "is-hexadecimal@2.0.1", + "", + {}, + "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + ], + + "is-in-ssh": [ + "is-in-ssh@1.0.0", + "", + {}, + "sha512-jYa6Q9rH90kR1vKB6NM7qqd1mge3Fx4Dhw5TVlK1MUBqhEOuCagrEHMevNuCcbECmXZ0ThXkRm+Ymr51HwEPAw==", + ], + + "is-inside-container": [ + "is-inside-container@1.0.0", + "", + { "dependencies": { "is-docker": "^3.0.0" }, "bin": { "is-inside-container": "cli.js" } }, + "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==", + ], + + "is-interactive": [ + "is-interactive@2.0.0", + "", + {}, + "sha512-qP1vozQRI+BMOPcjFzrjXuQvdak2pHNUMZoeG2eRbiSqyvbEf/wQtEOTOX1guk6E3t36RkaqiSt8A/6YElNxLQ==", + ], + + "is-map": [ + "is-map@2.0.3", + "", + {}, + "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==", + ], + + "is-negative-zero": [ + "is-negative-zero@2.0.3", + "", + {}, + "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==", + ], + + "is-node-process": [ + "is-node-process@1.2.0", + "", + {}, + "sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw==", + ], + + "is-number": [ + "is-number@7.0.0", + "", + {}, + "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + ], + + "is-number-object": [ + "is-number-object@1.1.1", + "", + { "dependencies": { "call-bound": "^1.0.3", "has-tostringtag": "^1.0.2" } }, + "sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw==", + ], + + "is-obj": [ + "is-obj@3.0.0", + "", + {}, + "sha512-IlsXEHOjtKhpN8r/tRFj2nDyTmHvcfNeu/nrRIcXE17ROeatXchkojffa1SpdqW4cr/Fj6QkEf/Gn4zf6KKvEQ==", + ], + + "is-plain-obj": [ + "is-plain-obj@4.1.0", + "", + {}, + "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + ], + + "is-promise": [ + "is-promise@4.0.0", + "", + {}, + "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==", + ], + + "is-regex": [ + "is-regex@1.2.1", + "", + { + "dependencies": { + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2", + }, + }, + "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", + ], + + "is-regexp": [ + "is-regexp@3.1.0", + "", + {}, + "sha512-rbku49cWloU5bSMI+zaRaXdQHXnthP6DZ/vLnfdSKyL4zUzuWnomtOEiZZOd+ioQ+avFo/qau3KPTc7Fjy1uPA==", + ], + + "is-set": [ + "is-set@2.0.3", + "", + {}, + "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==", + ], + + "is-shared-array-buffer": [ + "is-shared-array-buffer@1.0.4", + "", + { "dependencies": { "call-bound": "^1.0.3" } }, + "sha512-ISWac8drv4ZGfwKl5slpHG9OwPNty4jOWPRIhBpxOoD+hqITiwuipOQ2bNthAzwA3B4fIjO4Nln74N0S9byq8A==", + ], + + "is-stream": [ + "is-stream@2.0.1", + "", + {}, + "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + ], + + "is-string": [ + "is-string@1.1.1", + "", + { "dependencies": { "call-bound": "^1.0.3", "has-tostringtag": "^1.0.2" } }, + "sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA==", + ], + + "is-symbol": [ + "is-symbol@1.1.1", + "", + { + "dependencies": { + "call-bound": "^1.0.2", + "has-symbols": "^1.1.0", + "safe-regex-test": "^1.1.0", + }, + }, + "sha512-9gGx6GTtCQM73BgmHQXfDmLtfjjTUDSyoxTCbp5WtoixAhfgsDirWIcVQ/IHpvI5Vgd5i/J5F7B9cN/WlVbC/w==", + ], + + "is-typed-array": [ + "is-typed-array@1.1.15", + "", + { "dependencies": { "which-typed-array": "^1.1.16" } }, + "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", + ], + + "is-unicode-supported": [ + "is-unicode-supported@2.1.0", + "", + {}, + "sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ==", + ], + + "is-weakmap": [ + "is-weakmap@2.0.2", + "", + {}, + "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==", + ], + + "is-weakref": [ + "is-weakref@1.1.1", + "", + { "dependencies": { "call-bound": "^1.0.3" } }, + "sha512-6i9mGWSlqzNMEqpCp93KwRS1uUOodk2OJ6b+sq7ZPDSy2WuI5NFIxp/254TytR8ftefexkWn5xNiHUNpPOfSew==", + ], + + "is-weakset": [ + "is-weakset@2.0.4", + "", + { "dependencies": { "call-bound": "^1.0.3", "get-intrinsic": "^1.2.6" } }, + "sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==", + ], + + "is-wsl": [ + "is-wsl@3.1.0", + "", + { "dependencies": { "is-inside-container": "^1.0.0" } }, + "sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==", + ], + + "isarray": [ + "isarray@2.0.5", + "", + {}, + "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", + ], + + "isbot": [ + "isbot@5.1.32", + "", + {}, + "sha512-VNfjM73zz2IBZmdShMfAUg10prm6t7HFUQmNAEOAVS4YH92ZrZcvkMcGX6cIgBJAzWDzPent/EeAtYEHNPNPBQ==", + ], + + "isexe": [ + "isexe@2.0.0", + "", + {}, + "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + ], + + "jiti": [ + "jiti@2.6.1", + "", + { "bin": { "jiti": "lib/jiti-cli.mjs" } }, + "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==", + ], + + "jose": [ + "jose@6.1.3", + "", + {}, + "sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ==", + ], + + "js-tokens": [ + "js-tokens@4.0.0", + "", + {}, + "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + ], + + "js-yaml": [ + "js-yaml@4.1.1", + "", + { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, + "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + ], + + "jsep": [ + "jsep@1.4.0", + "", + {}, + "sha512-B7qPcEVE3NVkmSJbaYxvv4cHkVW7DQsZz13pUMrfS8z8Q/BuShN+gcTXrUlPiGqM2/t/EEaI030bpxMqY8gMlw==", + ], + + "jsesc": [ + "jsesc@3.1.0", + "", + { "bin": { "jsesc": "bin/jsesc" } }, + "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + ], + + "json-buffer": [ + "json-buffer@3.0.1", + "", + {}, + "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + ], + + "json-parse-even-better-errors": [ + "json-parse-even-better-errors@2.3.1", + "", + {}, + "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + ], + + "json-schema-traverse": [ + "json-schema-traverse@0.4.1", + "", + {}, + "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + ], + + "json-schema-typed": [ + "json-schema-typed@8.0.2", + "", + {}, + "sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==", + ], + + "json-stable-stringify-without-jsonify": [ + "json-stable-stringify-without-jsonify@1.0.1", + "", + {}, + "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + ], + + "json5": [ + "json5@2.2.3", + "", + { "bin": { "json5": "lib/cli.js" } }, + "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + ], + + "jsonc-parser": [ + "jsonc-parser@2.2.1", + "", + {}, + "sha512-o6/yDBYccGvTz1+QFevz6l6OBZ2+fMVu2JZ9CIhzsYRX4mjaK5IyX9eldUdCmga16zlgQxyrj5pt9kzuj2C02w==", + ], + + "jsonfile": [ + "jsonfile@6.2.0", + "", + { + "dependencies": { "universalify": "^2.0.0" }, + "optionalDependencies": { "graceful-fs": "^4.1.6" }, + }, + "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + ], + + "jsonpath-plus": [ + "jsonpath-plus@10.3.0", + "", + { + "dependencies": { + "@jsep-plugin/assignment": "^1.3.0", + "@jsep-plugin/regex": "^1.0.4", + "jsep": "^1.4.0", + }, + "bin": { "jsonpath": "bin/jsonpath-cli.js", "jsonpath-plus": "bin/jsonpath-cli.js" }, + }, + "sha512-8TNmfeTCk2Le33A3vRRwtuworG/L5RrgMvdjhKZxvyShO+mBu2fP50OWUjRLNtvw344DdDarFh9buFAZs5ujeA==", + ], + + "jsonpointer": [ + "jsonpointer@5.0.1", + "", + {}, + "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==", + ], + + "jsonschema": [ + "jsonschema@1.5.0", + "", + {}, + "sha512-K+A9hhqbn0f3pJX17Q/7H6yQfD/5OXgdrR5UE12gMXCiN9D5Xq2o5mddV2QEcX/bjla99ASsAAQUyMCCRWAEhw==", + ], + + "keyv": [ + "keyv@4.5.4", + "", + { "dependencies": { "json-buffer": "3.0.1" } }, + "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + ], + + "kleur": [ + "kleur@4.1.5", + "", + {}, + "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", + ], + + "leven": [ + "leven@3.1.0", + "", + {}, + "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + ], + + "levn": [ + "levn@0.4.1", + "", + { "dependencies": { "prelude-ls": "^1.2.1", "type-check": "~0.4.0" } }, + "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + ], + + "lightningcss": [ + "lightningcss@1.30.2", + "", + { + "dependencies": { "detect-libc": "^2.0.3" }, + "optionalDependencies": { + "lightningcss-android-arm64": "1.30.2", + "lightningcss-darwin-arm64": "1.30.2", + "lightningcss-darwin-x64": "1.30.2", + "lightningcss-freebsd-x64": "1.30.2", + "lightningcss-linux-arm-gnueabihf": "1.30.2", + "lightningcss-linux-arm64-gnu": "1.30.2", + "lightningcss-linux-arm64-musl": "1.30.2", + "lightningcss-linux-x64-gnu": "1.30.2", + "lightningcss-linux-x64-musl": "1.30.2", + "lightningcss-win32-arm64-msvc": "1.30.2", + "lightningcss-win32-x64-msvc": "1.30.2", + }, + }, + "sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==", + ], + + "lightningcss-android-arm64": [ + "lightningcss-android-arm64@1.30.2", + "", + { "os": "android", "cpu": "arm64" }, + "sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==", + ], + + "lightningcss-darwin-arm64": [ + "lightningcss-darwin-arm64@1.30.2", + "", + { "os": "darwin", "cpu": "arm64" }, + "sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==", + ], + + "lightningcss-darwin-x64": [ + "lightningcss-darwin-x64@1.30.2", + "", + { "os": "darwin", "cpu": "x64" }, + "sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==", + ], + + "lightningcss-freebsd-x64": [ + "lightningcss-freebsd-x64@1.30.2", + "", + { "os": "freebsd", "cpu": "x64" }, + "sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==", + ], + + "lightningcss-linux-arm-gnueabihf": [ + "lightningcss-linux-arm-gnueabihf@1.30.2", + "", + { "os": "linux", "cpu": "arm" }, + "sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==", + ], + + "lightningcss-linux-arm64-gnu": [ + "lightningcss-linux-arm64-gnu@1.30.2", + "", + { "os": "linux", "cpu": "arm64" }, + "sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==", + ], + + "lightningcss-linux-arm64-musl": [ + "lightningcss-linux-arm64-musl@1.30.2", + "", + { "os": "linux", "cpu": "arm64" }, + "sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==", + ], + + "lightningcss-linux-x64-gnu": [ + "lightningcss-linux-x64-gnu@1.30.2", + "", + { "os": "linux", "cpu": "x64" }, + "sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==", + ], + + "lightningcss-linux-x64-musl": [ + "lightningcss-linux-x64-musl@1.30.2", + "", + { "os": "linux", "cpu": "x64" }, + "sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==", + ], + + "lightningcss-win32-arm64-msvc": [ + "lightningcss-win32-arm64-msvc@1.30.2", + "", + { "os": "win32", "cpu": "arm64" }, + "sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==", + ], + + "lightningcss-win32-x64-msvc": [ + "lightningcss-win32-x64-msvc@1.30.2", + "", + { "os": "win32", "cpu": "x64" }, + "sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==", + ], + + "lines-and-columns": [ + "lines-and-columns@1.2.4", + "", + {}, + "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + ], + + "linkify-it": [ + "linkify-it@5.0.0", + "", + { "dependencies": { "uc.micro": "^2.0.0" } }, + "sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==", + ], + + "locate-path": [ + "locate-path@6.0.0", + "", + { "dependencies": { "p-locate": "^5.0.0" } }, + "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + ], + + "lodash": [ + "lodash@4.17.21", + "", + {}, + "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + ], + + "lodash.isempty": [ + "lodash.isempty@4.4.0", + "", + {}, + "sha512-oKMuF3xEeqDltrGMfDxAPGIVMSSRv8tbRSODbrs4KGsRRLEhrW8N8Rd4DRgB2+621hY8A8XwwrTVhXWpxFvMzg==", + ], + + "lodash.merge": [ + "lodash.merge@4.6.2", + "", + {}, + "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + ], + + "lodash.omitby": [ + "lodash.omitby@4.6.0", + "", + {}, + "sha512-5OrRcIVR75M288p4nbI2WLAf3ndw2GD9fyNv3Bc15+WCxJDdZ4lYndSxGd7hnG6PVjiJTeJE2dHEGhIuKGicIQ==", + ], + + "lodash.topath": [ + "lodash.topath@4.5.2", + "", + {}, + "sha512-1/W4dM+35DwvE/iEd1M9ekewOSTlpFekhw9mhAtrwjVqUr83/ilQiyAvmg4tVX7Unkcfl1KC+i9WdaT4B6aQcg==", + ], + + "lodash.uniq": [ + "lodash.uniq@4.5.0", + "", + {}, + "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==", + ], + + "lodash.uniqby": [ + "lodash.uniqby@4.7.0", + "", + {}, + "sha512-e/zcLx6CSbmaEgFHCA7BnoQKyCtKMxnuWrJygbwPs/AIn+IMKl66L8/s+wBUn5LRw2pZx3bUHibiV1b6aTWIww==", + ], + + "lodash.uniqwith": [ + "lodash.uniqwith@4.5.0", + "", + {}, + "sha512-7lYL8bLopMoy4CTICbxygAUq6CdRJ36vFc80DucPueUee+d5NBRxz3FdT9Pes/HEx5mPoT9jwnsEJWz1N7uq7Q==", + ], + + "log-symbols": [ + "log-symbols@6.0.0", + "", + { "dependencies": { "chalk": "^5.3.0", "is-unicode-supported": "^1.3.0" } }, + "sha512-i24m8rpwhmPIS4zscNzK6MSEhk0DUWa/8iYQWxhffV8jkI4Phvs3F+quL5xvS0gdQR0FyTCMMH33Y78dDTzzIw==", + ], + + "loglevel": [ + "loglevel@1.9.2", + "", + {}, + "sha512-HgMmCqIJSAKqo68l0rS2AanEWfkxaZ5wNiEFb5ggm08lDs9Xl2KxBlX3PTcaD2chBM1gXAYf491/M2Rv8Jwayg==", + ], + + "loglevel-plugin-prefix": [ + "loglevel-plugin-prefix@0.8.4", + "", + {}, + "sha512-WpG9CcFAOjz/FtNht+QJeGpvVl/cdR6P0z6OcXSkr8wFJOsV2GRj2j10JLfjuA4aYkcKCNIEqRGCyTife9R8/g==", + ], + + "long": [ + "long@5.3.2", + "", + {}, + "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==", + ], + + "longest-streak": [ + "longest-streak@3.1.0", + "", + {}, + "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + ], + + "lru-cache": [ + "lru-cache@5.1.1", + "", + { "dependencies": { "yallist": "^3.0.2" } }, + "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + ], + + "lucide-react": [ + "lucide-react@0.562.0", + "", + { "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, + "sha512-82hOAu7y0dbVuFfmO4bYF1XEwYk/mEbM5E+b1jgci/udUBEE/R7LF5Ip0CCEmXe8AybRM8L+04eP+LGZeDvkiw==", + ], + + "lunr": [ + "lunr@2.3.9", + "", + {}, + "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==", + ], + + "magic-string": [ + "magic-string@0.30.21", + "", + { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.5" } }, + "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + ], + + "markdown-extensions": [ + "markdown-extensions@2.0.0", + "", + {}, + "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", + ], + + "markdown-it": [ + "markdown-it@14.1.0", + "", + { + "dependencies": { + "argparse": "^2.0.1", + "entities": "^4.4.0", + "linkify-it": "^5.0.0", + "mdurl": "^2.0.0", + "punycode.js": "^2.3.1", + "uc.micro": "^2.1.0", + }, + "bin": { "markdown-it": "bin/markdown-it.mjs" }, + }, + "sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==", + ], + + "markdown-table": [ + "markdown-table@3.0.4", + "", + {}, + "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + ], + + "math-intrinsics": [ + "math-intrinsics@1.1.0", + "", + {}, + "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + ], + + "mdast-util-find-and-replace": [ + "mdast-util-find-and-replace@3.0.2", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0", + }, + }, + "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + ], + + "mdast-util-from-markdown": [ + "mdast-util-from-markdown@2.0.2", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0", + }, + }, + "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==", + ], + + "mdast-util-frontmatter": [ + "mdast-util-frontmatter@2.0.1", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "escape-string-regexp": "^5.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0", + }, + }, + "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==", + ], + + "mdast-util-gfm": [ + "mdast-util-gfm@3.1.0", + "", + { + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + }, + }, + "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + ], + + "mdast-util-gfm-autolink-literal": [ + "mdast-util-gfm-autolink-literal@2.0.1", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0", + }, + }, + "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + ], + + "mdast-util-gfm-footnote": [ + "mdast-util-gfm-footnote@2.1.0", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + }, + }, + "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + ], + + "mdast-util-gfm-strikethrough": [ + "mdast-util-gfm-strikethrough@2.0.0", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + }, + }, + "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + ], + + "mdast-util-gfm-table": [ + "mdast-util-gfm-table@2.0.0", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + }, + }, + "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + ], + + "mdast-util-gfm-task-list-item": [ + "mdast-util-gfm-task-list-item@2.0.0", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + }, + }, + "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + ], + + "mdast-util-mdx": [ + "mdast-util-mdx@3.0.0", + "", + { + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + }, + }, + "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + ], + + "mdast-util-mdx-expression": [ + "mdast-util-mdx-expression@2.0.1", + "", + { + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + }, + }, + "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + ], + + "mdast-util-mdx-jsx": [ + "mdast-util-mdx-jsx@3.2.0", + "", + { + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0", + }, + }, + "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==", + ], + + "mdast-util-mdxjs-esm": [ + "mdast-util-mdxjs-esm@2.0.1", + "", + { + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + }, + }, + "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + ], + + "mdast-util-phrasing": [ + "mdast-util-phrasing@4.1.0", + "", + { "dependencies": { "@types/mdast": "^4.0.0", "unist-util-is": "^6.0.0" } }, + "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + ], + + "mdast-util-to-hast": [ + "mdast-util-to-hast@13.2.1", + "", + { + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0", + }, + }, + "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==", + ], + + "mdast-util-to-markdown": [ + "mdast-util-to-markdown@2.1.2", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0", + }, + }, + "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + ], + + "mdast-util-to-string": [ + "mdast-util-to-string@4.0.0", + "", + { "dependencies": { "@types/mdast": "^4.0.0" } }, + "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + ], + + "mdurl": [ + "mdurl@2.0.0", + "", + {}, + "sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==", + ], + + "media-typer": [ + "media-typer@1.1.0", + "", + {}, + "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==", + ], + + "merge-descriptors": [ + "merge-descriptors@2.0.0", + "", + {}, + "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==", + ], + + "merge-stream": [ + "merge-stream@2.0.0", + "", + {}, + "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + ], + + "merge2": [ + "merge2@1.4.1", + "", + {}, + "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + ], + + "micromark": [ + "micromark@4.0.2", + "", + { + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + ], + + "micromark-core-commonmark": [ + "micromark-core-commonmark@2.0.3", + "", + { + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + ], + + "micromark-extension-frontmatter": [ + "micromark-extension-frontmatter@2.0.0", + "", + { + "dependencies": { + "fault": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg==", + ], + + "micromark-extension-gfm": [ + "micromark-extension-gfm@3.0.0", + "", + { + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + ], + + "micromark-extension-gfm-autolink-literal": [ + "micromark-extension-gfm-autolink-literal@2.1.0", + "", + { + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + ], + + "micromark-extension-gfm-footnote": [ + "micromark-extension-gfm-footnote@2.1.0", + "", + { + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + ], + + "micromark-extension-gfm-strikethrough": [ + "micromark-extension-gfm-strikethrough@2.1.0", + "", + { + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + ], + + "micromark-extension-gfm-table": [ + "micromark-extension-gfm-table@2.1.1", + "", + { + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + ], + + "micromark-extension-gfm-tagfilter": [ + "micromark-extension-gfm-tagfilter@2.0.0", + "", + { "dependencies": { "micromark-util-types": "^2.0.0" } }, + "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + ], + + "micromark-extension-gfm-task-list-item": [ + "micromark-extension-gfm-task-list-item@2.1.0", + "", + { + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + ], + + "micromark-extension-mdx-expression": [ + "micromark-extension-mdx-expression@3.0.1", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q==", + ], + + "micromark-extension-mdx-jsx": [ + "micromark-extension-mdx-jsx@3.0.2", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0", + }, + }, + "sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ==", + ], + + "micromark-extension-mdx-md": [ + "micromark-extension-mdx-md@2.0.0", + "", + { "dependencies": { "micromark-util-types": "^2.0.0" } }, + "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==", + ], + + "micromark-extension-mdxjs": [ + "micromark-extension-mdxjs@3.0.0", + "", + { + "dependencies": { + "acorn": "^8.0.0", + "acorn-jsx": "^5.0.0", + "micromark-extension-mdx-expression": "^3.0.0", + "micromark-extension-mdx-jsx": "^3.0.0", + "micromark-extension-mdx-md": "^2.0.0", + "micromark-extension-mdxjs-esm": "^3.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==", + ], + + "micromark-extension-mdxjs-esm": [ + "micromark-extension-mdxjs-esm@3.0.0", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0", + }, + }, + "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==", + ], + + "micromark-factory-destination": [ + "micromark-factory-destination@2.0.1", + "", + { + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + ], + + "micromark-factory-label": [ + "micromark-factory-label@2.0.1", + "", + { + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + ], + + "micromark-factory-mdx-expression": [ + "micromark-factory-mdx-expression@2.0.3", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0", + }, + }, + "sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ==", + ], + + "micromark-factory-space": [ + "micromark-factory-space@2.0.1", + "", + { + "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-types": "^2.0.0" }, + }, + "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + ], + + "micromark-factory-title": [ + "micromark-factory-title@2.0.1", + "", + { + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + ], + + "micromark-factory-whitespace": [ + "micromark-factory-whitespace@2.0.1", + "", + { + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + ], + + "micromark-util-character": [ + "micromark-util-character@2.1.1", + "", + { "dependencies": { "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, + "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + ], + + "micromark-util-chunked": [ + "micromark-util-chunked@2.0.1", + "", + { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, + "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + ], + + "micromark-util-classify-character": [ + "micromark-util-classify-character@2.0.1", + "", + { + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + ], + + "micromark-util-combine-extensions": [ + "micromark-util-combine-extensions@2.0.1", + "", + { "dependencies": { "micromark-util-chunked": "^2.0.0", "micromark-util-types": "^2.0.0" } }, + "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + ], + + "micromark-util-decode-numeric-character-reference": [ + "micromark-util-decode-numeric-character-reference@2.0.2", + "", + { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, + "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + ], + + "micromark-util-decode-string": [ + "micromark-util-decode-string@2.0.1", + "", + { + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + }, + }, + "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + ], + + "micromark-util-encode": [ + "micromark-util-encode@2.0.1", + "", + {}, + "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + ], + + "micromark-util-events-to-acorn": [ + "micromark-util-events-to-acorn@2.0.3", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0", + }, + }, + "sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg==", + ], + + "micromark-util-html-tag-name": [ + "micromark-util-html-tag-name@2.0.1", + "", + {}, + "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + ], + + "micromark-util-normalize-identifier": [ + "micromark-util-normalize-identifier@2.0.1", + "", + { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, + "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + ], + + "micromark-util-resolve-all": [ + "micromark-util-resolve-all@2.0.1", + "", + { "dependencies": { "micromark-util-types": "^2.0.0" } }, + "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + ], + + "micromark-util-sanitize-uri": [ + "micromark-util-sanitize-uri@2.0.1", + "", + { + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + }, + }, + "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + ], + + "micromark-util-subtokenize": [ + "micromark-util-subtokenize@2.1.0", + "", + { + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + }, + }, + "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + ], + + "micromark-util-symbol": [ + "micromark-util-symbol@2.0.1", + "", + {}, + "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + ], + + "micromark-util-types": [ + "micromark-util-types@2.0.2", + "", + {}, + "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + ], + + "micromatch": [ + "micromatch@4.0.8", + "", + { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, + "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + ], + + "mime-db": [ + "mime-db@1.52.0", + "", + {}, + "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + ], + + "mime-types": [ + "mime-types@2.1.35", + "", + { "dependencies": { "mime-db": "1.52.0" } }, + "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + ], + + "mimic-fn": [ + "mimic-fn@2.1.0", + "", + {}, + "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + ], + + "mimic-function": [ + "mimic-function@5.0.1", + "", + {}, + "sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA==", + ], + + "minimatch": [ + "minimatch@3.1.2", + "", + { "dependencies": { "brace-expansion": "^1.1.7" } }, + "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + ], + + "minimist": [ + "minimist@1.2.8", + "", + {}, + "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + ], + + "motion-dom": [ + "motion-dom@12.34.0", + "", + { "dependencies": { "motion-utils": "^12.29.2" } }, + "sha512-Lql3NuEcScRDxTAO6GgUsRHBZOWI/3fnMlkMcH5NftzcN37zJta+bpbMAV9px4Nj057TuvRooMK7QrzMCgtz6Q==", + ], + + "motion-utils": [ + "motion-utils@12.29.2", + "", + {}, + "sha512-G3kc34H2cX2gI63RqU+cZq+zWRRPSsNIOjpdl9TN4AQwC4sgwYPl/Q/Obf/d53nOm569T0fYK+tcoSV50BWx8A==", + ], + + "ms": [ + "ms@2.1.3", + "", + {}, + "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + ], + + "msw": [ + "msw@2.12.4", + "", + { + "dependencies": { + "@inquirer/confirm": "^5.0.0", + "@mswjs/interceptors": "^0.40.0", + "@open-draft/deferred-promise": "^2.2.0", + "@types/statuses": "^2.0.6", + "cookie": "^1.0.2", + "graphql": "^16.12.0", + "headers-polyfill": "^4.0.2", + "is-node-process": "^1.2.0", + "outvariant": "^1.4.3", + "path-to-regexp": "^6.3.0", + "picocolors": "^1.1.1", + "rettime": "^0.7.0", + "statuses": "^2.0.2", + "strict-event-emitter": "^0.5.1", + "tough-cookie": "^6.0.0", + "type-fest": "^5.2.0", + "until-async": "^3.0.2", + "yargs": "^17.7.2", + }, + "peerDependencies": { "typescript": ">= 4.8.x" }, + "optionalPeers": ["typescript"], + "bin": { "msw": "cli/index.js" }, + }, + "sha512-rHNiVfTyKhzc0EjoXUBVGteNKBevdjOlVC6GlIRXpy+/3LHEIGRovnB5WPjcvmNODVQ1TNFnoa7wsGbd0V3epg==", + ], + + "mute-stream": [ + "mute-stream@2.0.0", + "", + {}, + "sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==", + ], + + "nanoid": [ + "nanoid@3.3.11", + "", + { "bin": { "nanoid": "bin/nanoid.cjs" } }, + "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + ], + + "natural-compare": [ + "natural-compare@1.4.0", + "", + {}, + "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + ], + + "negotiator": [ + "negotiator@1.0.0", + "", + {}, + "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==", + ], + + "next-themes": [ + "next-themes@0.4.6", + "", + { + "peerDependencies": { + "react": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc", + }, + }, + "sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA==", + ], + + "nimma": [ + "nimma@0.2.3", + "", + { + "dependencies": { + "@jsep-plugin/regex": "^1.0.1", + "@jsep-plugin/ternary": "^1.0.2", + "astring": "^1.8.1", + "jsep": "^1.2.0", + }, + "optionalDependencies": { "jsonpath-plus": "^6.0.1 || ^10.1.0", "lodash.topath": "^4.5.2" }, + }, + "sha512-1ZOI8J+1PKKGceo/5CT5GfQOG6H8I2BencSK06YarZ2wXwH37BSSUWldqJmMJYA5JfqDqffxDXynt6f11AyKcA==", + ], + + "node-domexception": [ + "node-domexception@1.0.0", + "", + {}, + "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + ], + + "node-fetch": [ + "node-fetch@3.3.2", + "", + { + "dependencies": { + "data-uri-to-buffer": "^4.0.0", + "fetch-blob": "^3.1.4", + "formdata-polyfill": "^4.0.10", + }, + }, + "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", + ], + + "node-fetch-h2": [ + "node-fetch-h2@2.3.0", + "", + { "dependencies": { "http2-client": "^1.2.5" } }, + "sha512-ofRW94Ab0T4AOh5Fk8t0h8OBWrmjb0SSB20xh1H8YnPV9EJ+f5AMoYSUQ2zgJ4Iq2HAK0I2l5/Nequ8YzFS3Hg==", + ], + + "node-readfiles": [ + "node-readfiles@0.2.0", + "", + { "dependencies": { "es6-promise": "^3.2.1" } }, + "sha512-SU00ZarexNlE4Rjdm83vglt5Y9yiQ+XI1XpflWlb7q7UTN1JUItm69xMeiQCTxtTfnzt+83T8Cx+vI2ED++VDA==", + ], + + "node-releases": [ + "node-releases@2.0.27", + "", + {}, + "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + ], + + "normalize-path": [ + "normalize-path@3.0.0", + "", + {}, + "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + ], + + "npm-run-path": [ + "npm-run-path@4.0.1", + "", + { "dependencies": { "path-key": "^3.0.0" } }, + "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + ], + + "oas-kit-common": [ + "oas-kit-common@1.0.8", + "", + { "dependencies": { "fast-safe-stringify": "^2.0.7" } }, + "sha512-pJTS2+T0oGIwgjGpw7sIRU8RQMcUoKCDWFLdBqKB2BNmGpbBMH2sdqAaOXUg8OzonZHU0L7vfJu1mJFEiYDWOQ==", + ], + + "oas-linter": [ + "oas-linter@3.2.2", + "", + { + "dependencies": { + "@exodus/schemasafe": "^1.0.0-rc.2", + "should": "^13.2.1", + "yaml": "^1.10.0", + }, + }, + "sha512-KEGjPDVoU5K6swgo9hJVA/qYGlwfbFx+Kg2QB/kd7rzV5N8N5Mg6PlsoCMohVnQmo+pzJap/F610qTodKzecGQ==", + ], + + "oas-resolver": [ + "oas-resolver@2.5.6", + "", + { + "dependencies": { + "node-fetch-h2": "^2.3.0", + "oas-kit-common": "^1.0.8", + "reftools": "^1.1.9", + "yaml": "^1.10.0", + "yargs": "^17.0.1", + }, + "bin": { "resolve": "resolve.js" }, + }, + "sha512-Yx5PWQNZomfEhPPOphFbZKi9W93CocQj18NlD2Pa4GWZzdZpSJvYwoiuurRI7m3SpcChrnO08hkuQDL3FGsVFQ==", + ], + + "oas-schema-walker": [ + "oas-schema-walker@1.1.5", + "", + {}, + "sha512-2yucenq1a9YPmeNExoUa9Qwrt9RFkjqaMAA1X+U7sbb0AqBeTIdMHky9SQQ6iN94bO5NW0W4TRYXerG+BdAvAQ==", + ], + + "oas-validator": [ + "oas-validator@5.0.8", + "", + { + "dependencies": { + "call-me-maybe": "^1.0.1", + "oas-kit-common": "^1.0.8", + "oas-linter": "^3.2.2", + "oas-resolver": "^2.5.6", + "oas-schema-walker": "^1.1.5", + "reftools": "^1.1.9", + "should": "^13.2.1", + "yaml": "^1.10.0", + }, + }, + "sha512-cu20/HE5N5HKqVygs3dt94eYJfBi0TsZvPVXDhbXQHiEityDN+RROTleefoKRKKJ9dFAF2JBkDHgvWj0sjKGmw==", + ], + + "object-assign": [ + "object-assign@4.1.1", + "", + {}, + "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + ], + + "object-inspect": [ + "object-inspect@1.13.4", + "", + {}, + "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + ], + + "object-keys": [ + "object-keys@1.1.1", + "", + {}, + "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + ], + + "object-treeify": [ + "object-treeify@1.1.33", + "", + {}, + "sha512-EFVjAYfzWqWsBMRHPMAXLCDIJnpMhdWAqR7xG6M6a2cs6PMFpl/+Z20w9zDW4vkxOFfddegBKq9Rehd0bxWE7A==", + ], + + "object.assign": [ + "object.assign@4.1.7", + "", + { + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + "has-symbols": "^1.1.0", + "object-keys": "^1.1.1", + }, + }, + "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==", + ], + + "on-finished": [ + "on-finished@2.4.1", + "", + { "dependencies": { "ee-first": "1.1.1" } }, + "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + ], + + "once": [ + "once@1.4.0", + "", + { "dependencies": { "wrappy": "1" } }, + "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + ], + + "onetime": [ + "onetime@5.1.2", + "", + { "dependencies": { "mimic-fn": "^2.1.0" } }, + "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + ], + + "oniguruma-parser": [ + "oniguruma-parser@0.12.1", + "", + {}, + "sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==", + ], + + "oniguruma-to-es": [ + "oniguruma-to-es@4.3.4", + "", + { + "dependencies": { + "oniguruma-parser": "^0.12.1", + "regex": "^6.0.1", + "regex-recursion": "^6.0.2", + }, + }, + "sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==", + ], + + "open": [ + "open@11.0.0", + "", + { + "dependencies": { + "default-browser": "^5.4.0", + "define-lazy-prop": "^3.0.0", + "is-in-ssh": "^1.0.0", + "is-inside-container": "^1.0.0", + "powershell-utils": "^0.1.0", + "wsl-utils": "^0.3.0", + }, + }, + "sha512-smsWv2LzFjP03xmvFoJ331ss6h+jixfA4UUV/Bsiyuu4YJPfN+FIQGOIiv4w9/+MoHkfkJ22UIaQWRVFRfH6Vw==", + ], + + "openapi-types": [ + "openapi-types@12.1.3", + "", + {}, + "sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw==", + ], + + "openapi3-ts": [ + "openapi3-ts@4.5.0", + "", + { "dependencies": { "yaml": "^2.8.0" } }, + "sha512-jaL+HgTq2Gj5jRcfdutgRGLosCy/hT8sQf6VOy+P+g36cZOjI1iukdPnijC+4CmeRzg/jEllJUboEic2FhxhtQ==", + ], + + "optionator": [ + "optionator@0.9.4", + "", + { + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5", + }, + }, + "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + ], + + "ora": [ + "ora@8.2.0", + "", + { + "dependencies": { + "chalk": "^5.3.0", + "cli-cursor": "^5.0.0", + "cli-spinners": "^2.9.2", + "is-interactive": "^2.0.0", + "is-unicode-supported": "^2.0.0", + "log-symbols": "^6.0.0", + "stdin-discarder": "^0.2.2", + "string-width": "^7.2.0", + "strip-ansi": "^7.1.0", + }, + }, + "sha512-weP+BZ8MVNnlCm8c0Qdc1WSWq4Qn7I+9CJGm7Qali6g44e/PUzbjNqJX5NJ9ljlNMosfJvg1fKEGILklK9cwnw==", + ], + + "orval": [ + "orval@7.17.2", + "", + { + "dependencies": { + "@apidevtools/swagger-parser": "^12.1.0", + "@commander-js/extra-typings": "^14.0.0", + "@orval/angular": "7.17.2", + "@orval/axios": "7.17.2", + "@orval/core": "7.17.2", + "@orval/fetch": "7.17.2", + "@orval/hono": "7.17.2", + "@orval/mcp": "7.17.2", + "@orval/mock": "7.17.2", + "@orval/query": "7.17.2", + "@orval/swr": "7.17.2", + "@orval/zod": "7.17.2", + "chalk": "^4.1.2", + "chokidar": "^4.0.3", + "commander": "^14.0.1", + "enquirer": "^2.4.1", + "execa": "^5.1.1", + "find-up": "5.0.0", + "fs-extra": "^11.3.2", + "jiti": "^2.6.1", + "js-yaml": "4.1.1", + "lodash.uniq": "^4.5.0", + "openapi3-ts": "4.5.0", + "string-argv": "^0.3.2", + "tsconfck": "^2.1.2", + "typedoc": "^0.28.14", + "typedoc-plugin-coverage": "^4.0.2", + "typedoc-plugin-markdown": "^4.9.0", + }, + "bin": "./dist/bin/orval.js", + }, + "sha512-6+drCVVWNukdX+ytFPOC2UJ51gv0kGAPch7cTQYxO7VsGwdZ1DiZhYozdGehwu6QeVvAszPhPlh4uLq53k0x+w==", + ], + + "outvariant": [ + "outvariant@1.4.3", + "", + {}, + "sha512-+Sl2UErvtsoajRDKCE5/dBz4DIvHXQQnAxtQTF04OJxY0+DyZXSo5P5Bb7XYWOh81syohlYL24hbDwxedPUJCA==", + ], + + "own-keys": [ + "own-keys@1.0.1", + "", + { + "dependencies": { + "get-intrinsic": "^1.2.6", + "object-keys": "^1.1.1", + "safe-push-apply": "^1.0.0", + }, + }, + "sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg==", + ], + + "p-limit": [ + "p-limit@3.1.0", + "", + { "dependencies": { "yocto-queue": "^0.1.0" } }, + "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + ], + + "p-locate": [ + "p-locate@5.0.0", + "", + { "dependencies": { "p-limit": "^3.0.2" } }, + "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + ], + + "package-manager-detector": [ + "package-manager-detector@1.6.0", + "", + {}, + "sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA==", + ], + + "parent-module": [ + "parent-module@1.0.1", + "", + { "dependencies": { "callsites": "^3.0.0" } }, + "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + ], + + "parse-entities": [ + "parse-entities@4.0.2", + "", + { + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0", + }, + }, + "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==", + ], + + "parse-json": [ + "parse-json@5.2.0", + "", + { + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6", + }, + }, + "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + ], + + "parse-ms": [ + "parse-ms@4.0.0", + "", + {}, + "sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw==", + ], + + "parseurl": [ + "parseurl@1.3.3", + "", + {}, + "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + ], + + "path-browserify": [ + "path-browserify@1.0.1", + "", + {}, + "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==", + ], + + "path-exists": [ + "path-exists@4.0.0", + "", + {}, + "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + ], + + "path-key": [ + "path-key@3.1.1", + "", + {}, + "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + ], + + "path-to-regexp": [ + "path-to-regexp@6.3.0", + "", + {}, + "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==", + ], + + "path-type": [ + "path-type@4.0.0", + "", + {}, + "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + ], + + "pathe": [ + "pathe@2.0.3", + "", + {}, + "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + ], + + "picocolors": [ + "picocolors@1.1.1", + "", + {}, + "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + ], + + "picomatch": [ + "picomatch@4.0.3", + "", + {}, + "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + ], + + "pkce-challenge": [ + "pkce-challenge@5.0.1", + "", + {}, + "sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==", + ], + + "pony-cause": [ + "pony-cause@1.1.1", + "", + {}, + "sha512-PxkIc/2ZpLiEzQXu5YRDOUgBlfGYBY8156HY5ZcRAwwonMk5W/MrJP2LLkG/hF7GEQzaHo2aS7ho6ZLCOvf+6g==", + ], + + "possible-typed-array-names": [ + "possible-typed-array-names@1.1.0", + "", + {}, + "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", + ], + + "postcss": [ + "postcss@8.5.6", + "", + { + "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" }, + }, + "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + ], + + "postcss-selector-parser": [ + "postcss-selector-parser@7.1.1", + "", + { "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" } }, + "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + ], + + "posthog-js": [ + "posthog-js@1.351.1", + "", + { + "dependencies": { + "@opentelemetry/api": "^1.9.0", + "@opentelemetry/api-logs": "^0.208.0", + "@opentelemetry/exporter-logs-otlp-http": "^0.208.0", + "@opentelemetry/resources": "^2.2.0", + "@opentelemetry/sdk-logs": "^0.208.0", + "@posthog/core": "1.23.1", + "@posthog/types": "1.351.1", + "core-js": "^3.38.1", + "dompurify": "^3.3.1", + "fflate": "^0.4.8", + "preact": "^10.28.2", + "query-selector-shadow-dom": "^1.0.1", + "web-vitals": "^5.1.0", + }, + }, + "sha512-IbyVjhmQOgXR5UDflibXpqticWbfNnh2hM5+h+Mtz+MKbmw4vIu0AsHqSr6IcNMyrRM8NHoNiecjb00N6ROtmQ==", + ], + + "powershell-utils": [ + "powershell-utils@0.1.0", + "", + {}, + "sha512-dM0jVuXJPsDN6DvRpea484tCUaMiXWjuCn++HGTqUWzGDjv5tZkEZldAJ/UMlqRYGFrD/etByo4/xOuC/snX2A==", + ], + + "preact": [ + "preact@10.28.3", + "", + {}, + "sha512-tCmoRkPQLpBeWzpmbhryairGnhW9tKV6c6gr/w+RhoRoKEJwsjzipwp//1oCpGPOchvSLaAPlpcJi9MwMmoPyA==", + ], + + "prelude-ls": [ + "prelude-ls@1.2.1", + "", + {}, + "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + ], + + "prettier": [ + "prettier@3.7.4", + "", + { "bin": { "prettier": "bin/prettier.cjs" } }, + "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==", + ], + + "prettier-plugin-tailwindcss": [ + "prettier-plugin-tailwindcss@0.7.2", + "", + { + "peerDependencies": { + "@ianvs/prettier-plugin-sort-imports": "*", + "@prettier/plugin-hermes": "*", + "@prettier/plugin-oxc": "*", + "@prettier/plugin-pug": "*", + "@shopify/prettier-plugin-liquid": "*", + "@trivago/prettier-plugin-sort-imports": "*", + "@zackad/prettier-plugin-twig": "*", + "prettier": "^3.0", + "prettier-plugin-astro": "*", + "prettier-plugin-css-order": "*", + "prettier-plugin-jsdoc": "*", + "prettier-plugin-marko": "*", + "prettier-plugin-multiline-arrays": "*", + "prettier-plugin-organize-attributes": "*", + "prettier-plugin-organize-imports": "*", + "prettier-plugin-sort-imports": "*", + "prettier-plugin-svelte": "*", + }, + "optionalPeers": [ + "@ianvs/prettier-plugin-sort-imports", + "@prettier/plugin-hermes", + "@prettier/plugin-oxc", + "@prettier/plugin-pug", + "@shopify/prettier-plugin-liquid", + "@trivago/prettier-plugin-sort-imports", + "@zackad/prettier-plugin-twig", + "prettier-plugin-astro", + "prettier-plugin-css-order", + "prettier-plugin-jsdoc", + "prettier-plugin-marko", + "prettier-plugin-multiline-arrays", + "prettier-plugin-organize-attributes", + "prettier-plugin-organize-imports", + "prettier-plugin-sort-imports", + "prettier-plugin-svelte", + ], + }, + "sha512-LkphyK3Fw+q2HdMOoiEHWf93fNtYJwfamoKPl7UwtjFQdei/iIBoX11G6j706FzN3ymX9mPVi97qIY8328vdnA==", + ], + + "pretty-ms": [ + "pretty-ms@9.3.0", + "", + { "dependencies": { "parse-ms": "^4.0.0" } }, + "sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ==", + ], + + "prompts": [ + "prompts@2.4.2", + "", + { "dependencies": { "kleur": "^3.0.3", "sisteransi": "^1.0.5" } }, + "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + ], + + "property-information": [ + "property-information@7.1.0", + "", + {}, + "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + ], + + "protobufjs": [ + "protobufjs@7.5.4", + "", + { + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0", + }, + }, + "sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==", + ], + + "proxy-addr": [ + "proxy-addr@2.0.7", + "", + { "dependencies": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" } }, + "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + ], + + "proxy-from-env": [ + "proxy-from-env@1.1.0", + "", + {}, + "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + ], + + "punycode": [ + "punycode@2.3.1", + "", + {}, + "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + ], + + "punycode.js": [ + "punycode.js@2.3.1", + "", + {}, + "sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==", + ], + + "qs": [ + "qs@6.14.0", + "", + { "dependencies": { "side-channel": "^1.1.0" } }, + "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==", + ], + + "query-selector-shadow-dom": [ + "query-selector-shadow-dom@1.0.1", + "", + {}, + "sha512-lT5yCqEBgfoMYpf3F2xQRK7zEr1rhIIZuceDK6+xRkJQ4NMbHTwXqk4NkwDwQMNqXgG9r9fyHnzwNVs6zV5KRw==", + ], + + "queue-microtask": [ + "queue-microtask@1.2.3", + "", + {}, + "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + ], + + "radix-ui": [ + "radix-ui@1.4.3", + "", + { + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-accessible-icon": "1.1.7", + "@radix-ui/react-accordion": "1.2.12", + "@radix-ui/react-alert-dialog": "1.1.15", + "@radix-ui/react-arrow": "1.1.7", + "@radix-ui/react-aspect-ratio": "1.1.7", + "@radix-ui/react-avatar": "1.1.10", + "@radix-ui/react-checkbox": "1.3.3", + "@radix-ui/react-collapsible": "1.1.12", + "@radix-ui/react-collection": "1.1.7", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-context-menu": "2.2.16", + "@radix-ui/react-dialog": "1.1.15", + "@radix-ui/react-direction": "1.1.1", + "@radix-ui/react-dismissable-layer": "1.1.11", + "@radix-ui/react-dropdown-menu": "2.1.16", + "@radix-ui/react-focus-guards": "1.1.3", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-form": "0.1.8", + "@radix-ui/react-hover-card": "1.1.15", + "@radix-ui/react-label": "2.1.7", + "@radix-ui/react-menu": "2.1.16", + "@radix-ui/react-menubar": "1.1.16", + "@radix-ui/react-navigation-menu": "1.2.14", + "@radix-ui/react-one-time-password-field": "0.1.8", + "@radix-ui/react-password-toggle-field": "0.1.3", + "@radix-ui/react-popover": "1.1.15", + "@radix-ui/react-popper": "1.2.8", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-progress": "1.1.7", + "@radix-ui/react-radio-group": "1.3.8", + "@radix-ui/react-roving-focus": "1.1.11", + "@radix-ui/react-scroll-area": "1.2.10", + "@radix-ui/react-select": "2.2.6", + "@radix-ui/react-separator": "1.1.7", + "@radix-ui/react-slider": "1.3.6", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-switch": "1.2.6", + "@radix-ui/react-tabs": "1.1.13", + "@radix-ui/react-toast": "1.2.15", + "@radix-ui/react-toggle": "1.1.10", + "@radix-ui/react-toggle-group": "1.1.11", + "@radix-ui/react-toolbar": "1.1.11", + "@radix-ui/react-tooltip": "1.2.8", + "@radix-ui/react-use-callback-ref": "1.1.1", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-escape-keydown": "1.1.1", + "@radix-ui/react-use-is-hydrated": "0.1.0", + "@radix-ui/react-use-layout-effect": "1.1.1", + "@radix-ui/react-use-size": "1.1.1", + "@radix-ui/react-visually-hidden": "1.2.3", + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react", "@types/react-dom"], + }, + "sha512-aWizCQiyeAenIdUbqEpXgRA1ya65P13NKn/W8rWkcN0OPkRDxdBVLWnIEDsS2RpwCK2nobI7oMUSmexzTDyAmA==", + ], + + "range-parser": [ + "range-parser@1.2.1", + "", + {}, + "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + ], + + "raw-body": [ + "raw-body@3.0.2", + "", + { + "dependencies": { + "bytes": "~3.1.2", + "http-errors": "~2.0.1", + "iconv-lite": "~0.7.0", + "unpipe": "~1.0.0", + }, + }, + "sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==", + ], + + "react": [ + "react@19.2.3", + "", + {}, + "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==", + ], + + "react-dom": [ + "react-dom@19.2.3", + "", + { "dependencies": { "scheduler": "^0.27.0" }, "peerDependencies": { "react": "^19.2.3" } }, + "sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==", + ], + + "react-intersection-observer": [ + "react-intersection-observer@10.0.2", + "", + { + "peerDependencies": { + "react": "^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0", + }, + "optionalPeers": ["react-dom"], + }, + "sha512-lAMzxVWrBko6SLd1jx6l84fVrzJu91hpxHlvD2as2Wec9mDCjdYXwc5xNOFBchpeBir0Y7AGBW+C/AYMa7CSFg==", + ], + + "react-refresh": [ + "react-refresh@0.18.0", + "", + {}, + "sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw==", + ], + + "react-remove-scroll": [ + "react-remove-scroll@2.7.2", + "", + { + "dependencies": { + "react-remove-scroll-bar": "^2.3.7", + "react-style-singleton": "^2.2.3", + "tslib": "^2.1.0", + "use-callback-ref": "^1.3.3", + "use-sidecar": "^1.1.3", + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-Iqb9NjCCTt6Hf+vOdNIZGdTiH1QSqr27H/Ek9sv/a97gfueI/5h1s3yRi1nngzMUaOOToin5dI1dXKdXiF+u0Q==", + ], + + "react-remove-scroll-bar": [ + "react-remove-scroll-bar@2.3.8", + "", + { + "dependencies": { "react-style-singleton": "^2.2.2", "tslib": "^2.0.0" }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q==", + ], + + "react-style-singleton": [ + "react-style-singleton@2.2.3", + "", + { + "dependencies": { "get-nonce": "^1.0.0", "tslib": "^2.0.0" }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ==", + ], + + "readdirp": [ + "readdirp@3.6.0", + "", + { "dependencies": { "picomatch": "^2.2.1" } }, + "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + ], + + "recast": [ + "recast@0.23.11", + "", + { + "dependencies": { + "ast-types": "^0.16.1", + "esprima": "~4.0.0", + "source-map": "~0.6.1", + "tiny-invariant": "^1.3.3", + "tslib": "^2.0.1", + }, + }, + "sha512-YTUo+Flmw4ZXiWfQKGcwwc11KnoRAYgzAE2E7mXKCjSviTKShtxBsN6YUUBB2gtaBzKzeKunxhUwNHQuRryhWA==", + ], + + "recma-build-jsx": [ + "recma-build-jsx@1.0.0", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "estree-util-build-jsx": "^3.0.0", + "vfile": "^6.0.0", + }, + }, + "sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew==", + ], + + "recma-jsx": [ + "recma-jsx@1.0.1", + "", + { + "dependencies": { + "acorn-jsx": "^5.0.0", + "estree-util-to-js": "^2.0.0", + "recma-parse": "^1.0.0", + "recma-stringify": "^1.0.0", + "unified": "^11.0.0", + }, + "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" }, + }, + "sha512-huSIy7VU2Z5OLv6oFLosQGGDqPqdO1iq6bWNAdhzMxSJP7RAso4fCZ1cKu8j9YHCZf3TPrq4dw3okhrylgcd7w==", + ], + + "recma-parse": [ + "recma-parse@1.0.0", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "esast-util-from-js": "^2.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0", + }, + }, + "sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ==", + ], + + "recma-stringify": [ + "recma-stringify@1.0.0", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "estree-util-to-js": "^2.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0", + }, + }, + "sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g==", + ], + + "reflect.getprototypeof": [ + "reflect.getprototypeof@1.0.10", + "", + { + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.9", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.7", + "get-proto": "^1.0.1", + "which-builtin-type": "^1.2.1", + }, + }, + "sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw==", + ], + + "reftools": [ + "reftools@1.1.9", + "", + {}, + "sha512-OVede/NQE13xBQ+ob5CKd5KyeJYU2YInb1bmV4nRoOfquZPkAkxuOXicSe1PvqIuZZ4kD13sPKBbR7UFDmli6w==", + ], + + "regex": [ + "regex@6.1.0", + "", + { "dependencies": { "regex-utilities": "^2.3.0" } }, + "sha512-6VwtthbV4o/7+OaAF9I5L5V3llLEsoPyq9P1JVXkedTP33c7MfCG0/5NOPcSJn0TzXcG9YUrR0gQSWioew3LDg==", + ], + + "regex-recursion": [ + "regex-recursion@6.0.2", + "", + { "dependencies": { "regex-utilities": "^2.3.0" } }, + "sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==", + ], + + "regex-utilities": [ + "regex-utilities@2.3.0", + "", + {}, + "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==", + ], + + "regexp.prototype.flags": [ + "regexp.prototype.flags@1.5.4", + "", + { + "dependencies": { + "call-bind": "^1.0.8", + "define-properties": "^1.2.1", + "es-errors": "^1.3.0", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "set-function-name": "^2.0.2", + }, + }, + "sha512-dYqgNSZbDwkaJ2ceRd9ojCGjBq+mOm9LmtXnAnEGyHhN/5R7iDW2TRw3h+o/jCFxus3P2LfWIIiwowAjANm7IA==", + ], + + "rehype-recma": [ + "rehype-recma@1.0.0", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "hast-util-to-estree": "^3.0.0", + }, + }, + "sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw==", + ], + + "remark-frontmatter": [ + "remark-frontmatter@5.0.0", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-frontmatter": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0", + "unified": "^11.0.0", + }, + }, + "sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ==", + ], + + "remark-gfm": [ + "remark-gfm@4.0.1", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0", + }, + }, + "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + ], + + "remark-mdx": [ + "remark-mdx@3.1.1", + "", + { "dependencies": { "mdast-util-mdx": "^3.0.0", "micromark-extension-mdxjs": "^3.0.0" } }, + "sha512-Pjj2IYlUY3+D8x00UJsIOg5BEvfMyeI+2uLPn9VO9Wg4MEtN/VTIq2NEJQfde9PnX15KgtHyl9S0BcTnWrIuWg==", + ], + + "remark-mdx-frontmatter": [ + "remark-mdx-frontmatter@5.2.0", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "estree-util-value-to-estree": "^3.0.0", + "toml": "^3.0.0", + "unified": "^11.0.0", + "unist-util-mdx-define": "^1.0.0", + "yaml": "^2.0.0", + }, + }, + "sha512-U/hjUYTkQqNjjMRYyilJgLXSPF65qbLPdoESOkXyrwz2tVyhAnm4GUKhfXqOOS9W34M3545xEMq+aMpHgVjEeQ==", + ], + + "remark-parse": [ + "remark-parse@11.0.0", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0", + }, + }, + "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + ], + + "remark-rehype": [ + "remark-rehype@11.1.2", + "", + { + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0", + }, + }, + "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + ], + + "remark-stringify": [ + "remark-stringify@11.0.0", + "", + { + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0", + }, + }, + "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + ], + + "require-directory": [ + "require-directory@2.1.1", + "", + {}, + "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + ], + + "require-from-string": [ + "require-from-string@2.0.2", + "", + {}, + "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + ], + + "reselect": [ + "reselect@5.1.1", + "", + {}, + "sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==", + ], + + "resolve-from": [ + "resolve-from@4.0.0", + "", + {}, + "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + ], + + "resolve-pkg-maps": [ + "resolve-pkg-maps@1.0.0", + "", + {}, + "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + ], + + "restore-cursor": [ + "restore-cursor@5.1.0", + "", + { "dependencies": { "onetime": "^7.0.0", "signal-exit": "^4.1.0" } }, + "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==", + ], + + "rettime": [ + "rettime@0.7.0", + "", + {}, + "sha512-LPRKoHnLKd/r3dVxcwO7vhCW+orkOGj9ViueosEBK6ie89CijnfRlhaDhHq/3Hxu4CkWQtxwlBG0mzTQY6uQjw==", + ], + + "reusify": [ + "reusify@1.1.0", + "", + {}, + "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + ], + + "rolldown": [ + "rolldown@1.0.0-beta.50", + "", + { + "dependencies": { + "@oxc-project/types": "=0.97.0", + "@rolldown/pluginutils": "1.0.0-beta.50", + }, + "optionalDependencies": { + "@rolldown/binding-android-arm64": "1.0.0-beta.50", + "@rolldown/binding-darwin-arm64": "1.0.0-beta.50", + "@rolldown/binding-darwin-x64": "1.0.0-beta.50", + "@rolldown/binding-freebsd-x64": "1.0.0-beta.50", + "@rolldown/binding-linux-arm-gnueabihf": "1.0.0-beta.50", + "@rolldown/binding-linux-arm64-gnu": "1.0.0-beta.50", + "@rolldown/binding-linux-arm64-musl": "1.0.0-beta.50", + "@rolldown/binding-linux-x64-gnu": "1.0.0-beta.50", + "@rolldown/binding-linux-x64-musl": "1.0.0-beta.50", + "@rolldown/binding-openharmony-arm64": "1.0.0-beta.50", + "@rolldown/binding-wasm32-wasi": "1.0.0-beta.50", + "@rolldown/binding-win32-arm64-msvc": "1.0.0-beta.50", + "@rolldown/binding-win32-ia32-msvc": "1.0.0-beta.50", + "@rolldown/binding-win32-x64-msvc": "1.0.0-beta.50", + }, + "bin": { "rolldown": "bin/cli.mjs" }, + }, + "sha512-JFULvCNl/anKn99eKjOSEubi0lLmNqQDAjyEMME2T4CwezUDL0i6t1O9xZsu2OMehPnV2caNefWpGF+8TnzB6A==", + ], + + "rollup": [ + "rollup@4.57.1", + "", + { + "dependencies": { "@types/estree": "1.0.8" }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.57.1", + "@rollup/rollup-android-arm64": "4.57.1", + "@rollup/rollup-darwin-arm64": "4.57.1", + "@rollup/rollup-darwin-x64": "4.57.1", + "@rollup/rollup-freebsd-arm64": "4.57.1", + "@rollup/rollup-freebsd-x64": "4.57.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.57.1", + "@rollup/rollup-linux-arm-musleabihf": "4.57.1", + "@rollup/rollup-linux-arm64-gnu": "4.57.1", + "@rollup/rollup-linux-arm64-musl": "4.57.1", + "@rollup/rollup-linux-loong64-gnu": "4.57.1", + "@rollup/rollup-linux-loong64-musl": "4.57.1", + "@rollup/rollup-linux-ppc64-gnu": "4.57.1", + "@rollup/rollup-linux-ppc64-musl": "4.57.1", + "@rollup/rollup-linux-riscv64-gnu": "4.57.1", + "@rollup/rollup-linux-riscv64-musl": "4.57.1", + "@rollup/rollup-linux-s390x-gnu": "4.57.1", + "@rollup/rollup-linux-x64-gnu": "4.57.1", + "@rollup/rollup-linux-x64-musl": "4.57.1", + "@rollup/rollup-openbsd-x64": "4.57.1", + "@rollup/rollup-openharmony-arm64": "4.57.1", + "@rollup/rollup-win32-arm64-msvc": "4.57.1", + "@rollup/rollup-win32-ia32-msvc": "4.57.1", + "@rollup/rollup-win32-x64-gnu": "4.57.1", + "@rollup/rollup-win32-x64-msvc": "4.57.1", + "fsevents": "~2.3.2", + }, + "bin": { "rollup": "dist/bin/rollup" }, + }, + "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==", + ], + + "router": [ + "router@2.2.0", + "", + { + "dependencies": { + "debug": "^4.4.0", + "depd": "^2.0.0", + "is-promise": "^4.0.0", + "parseurl": "^1.3.3", + "path-to-regexp": "^8.0.0", + }, + }, + "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==", + ], + + "run-applescript": [ + "run-applescript@7.1.0", + "", + {}, + "sha512-DPe5pVFaAsinSaV6QjQ6gdiedWDcRCbUuiQfQa2wmWV7+xC9bGulGI8+TdRmoFkAPaBXk8CrAbnlY2ISniJ47Q==", + ], + + "run-parallel": [ + "run-parallel@1.2.0", + "", + { "dependencies": { "queue-microtask": "^1.2.2" } }, + "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + ], + + "safe-array-concat": [ + "safe-array-concat@1.1.3", + "", + { + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "get-intrinsic": "^1.2.6", + "has-symbols": "^1.1.0", + "isarray": "^2.0.5", + }, + }, + "sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==", + ], + + "safe-push-apply": [ + "safe-push-apply@1.0.0", + "", + { "dependencies": { "es-errors": "^1.3.0", "isarray": "^2.0.5" } }, + "sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA==", + ], + + "safe-regex-test": [ + "safe-regex-test@1.1.0", + "", + { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "is-regex": "^1.2.1" } }, + "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", + ], + + "safe-stable-stringify": [ + "safe-stable-stringify@1.1.1", + "", + {}, + "sha512-ERq4hUjKDbJfE4+XtZLFPCDi8Vb1JqaxAPTxWFLBx8XcAlf9Bda/ZJdVezs/NAfsMQScyIlUMx+Yeu7P7rx5jw==", + ], + + "safer-buffer": [ + "safer-buffer@2.1.2", + "", + {}, + "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + ], + + "scheduler": [ + "scheduler@0.27.0", + "", + {}, + "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + ], + + "semver": [ + "semver@6.3.1", + "", + { "bin": { "semver": "bin/semver.js" } }, + "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + ], + + "send": [ + "send@1.2.1", + "", + { + "dependencies": { + "debug": "^4.4.3", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "fresh": "^2.0.0", + "http-errors": "^2.0.1", + "mime-types": "^3.0.2", + "ms": "^2.1.3", + "on-finished": "^2.4.1", + "range-parser": "^1.2.1", + "statuses": "^2.0.2", + }, + }, + "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==", + ], + + "seroval": [ + "seroval@1.4.1", + "", + {}, + "sha512-9GOc+8T6LN4aByLN75uRvMbrwY5RDBW6lSlknsY4LEa9ZmWcxKcRe1G/Q3HZXjltxMHTrStnvrwAICxZrhldtg==", + ], + + "seroval-plugins": [ + "seroval-plugins@1.4.0", + "", + { "peerDependencies": { "seroval": "^1.0" } }, + "sha512-zir1aWzoiax6pbBVjoYVd0O1QQXgIL3eVGBMsBsNmM8Ukq90yGaWlfx0AB9dTS8GPqrOrbXn79vmItCUP9U3BQ==", + ], + + "serve-static": [ + "serve-static@2.2.1", + "", + { + "dependencies": { + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "parseurl": "^1.3.3", + "send": "^1.2.0", + }, + }, + "sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==", + ], + + "set-function-length": [ + "set-function-length@1.2.2", + "", + { + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2", + }, + }, + "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + ], + + "set-function-name": [ + "set-function-name@2.0.2", + "", + { + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "functions-have-names": "^1.2.3", + "has-property-descriptors": "^1.0.2", + }, + }, + "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==", + ], + + "set-proto": [ + "set-proto@1.0.0", + "", + { + "dependencies": { + "dunder-proto": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + }, + }, + "sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw==", + ], + + "setprototypeof": [ + "setprototypeof@1.2.0", + "", + {}, + "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + ], + + "shadcn": [ + "shadcn@3.6.2", + "", + { + "dependencies": { + "@antfu/ni": "^25.0.0", + "@babel/core": "^7.28.0", + "@babel/parser": "^7.28.0", + "@babel/plugin-transform-typescript": "^7.28.0", + "@babel/preset-typescript": "^7.27.1", + "@dotenvx/dotenvx": "^1.48.4", + "@modelcontextprotocol/sdk": "^1.17.2", + "browserslist": "^4.26.2", + "commander": "^14.0.0", + "cosmiconfig": "^9.0.0", + "dedent": "^1.6.0", + "deepmerge": "^4.3.1", + "diff": "^8.0.2", + "execa": "^9.6.0", + "fast-glob": "^3.3.3", + "fs-extra": "^11.3.1", + "fuzzysort": "^3.1.0", + "https-proxy-agent": "^7.0.6", + "kleur": "^4.1.5", + "msw": "^2.10.4", + "node-fetch": "^3.3.2", + "open": "^11.0.0", + "ora": "^8.2.0", + "postcss": "^8.5.6", + "postcss-selector-parser": "^7.1.0", + "prompts": "^2.4.2", + "recast": "^0.23.11", + "stringify-object": "^5.0.0", + "ts-morph": "^26.0.0", + "tsconfig-paths": "^4.2.0", + "zod": "^3.24.1", + "zod-to-json-schema": "^3.24.6", + }, + "bin": { "shadcn": "dist/index.js" }, + }, + "sha512-2g48/7UsXTSWMFU9GYww85AN5iVTkErbeycrcleI55R+atqW8HE1M/YDFyQ+0T3Bwsd4e8vycPu9gmwODunDpw==", + ], + + "shebang-command": [ + "shebang-command@2.0.0", + "", + { "dependencies": { "shebang-regex": "^3.0.0" } }, + "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + ], + + "shebang-regex": [ + "shebang-regex@3.0.0", + "", + {}, + "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + ], + + "shiki": [ + "shiki@3.22.0", + "", + { + "dependencies": { + "@shikijs/core": "3.22.0", + "@shikijs/engine-javascript": "3.22.0", + "@shikijs/engine-oniguruma": "3.22.0", + "@shikijs/langs": "3.22.0", + "@shikijs/themes": "3.22.0", + "@shikijs/types": "3.22.0", + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4", + }, + }, + "sha512-LBnhsoYEe0Eou4e1VgJACes+O6S6QC0w71fCSp5Oya79inkwkm15gQ1UF6VtQ8j/taMDh79hAB49WUk8ALQW3g==", + ], + + "should": [ + "should@13.2.3", + "", + { + "dependencies": { + "should-equal": "^2.0.0", + "should-format": "^3.0.3", + "should-type": "^1.4.0", + "should-type-adaptors": "^1.0.1", + "should-util": "^1.0.0", + }, + }, + "sha512-ggLesLtu2xp+ZxI+ysJTmNjh2U0TsC+rQ/pfED9bUZZ4DKefP27D+7YJVVTvKsmjLpIi9jAa7itwDGkDDmt1GQ==", + ], + + "should-equal": [ + "should-equal@2.0.0", + "", + { "dependencies": { "should-type": "^1.4.0" } }, + "sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA==", + ], + + "should-format": [ + "should-format@3.0.3", + "", + { "dependencies": { "should-type": "^1.3.0", "should-type-adaptors": "^1.0.1" } }, + "sha512-hZ58adtulAk0gKtua7QxevgUaXTTXxIi8t41L3zo9AHvjXO1/7sdLECuHeIN2SRtYXpNkmhoUP2pdeWgricQ+Q==", + ], + + "should-type": [ + "should-type@1.4.0", + "", + {}, + "sha512-MdAsTu3n25yDbIe1NeN69G4n6mUnJGtSJHygX3+oN0ZbO3DTiATnf7XnYJdGT42JCXurTb1JI0qOBR65shvhPQ==", + ], + + "should-type-adaptors": [ + "should-type-adaptors@1.1.0", + "", + { "dependencies": { "should-type": "^1.3.0", "should-util": "^1.0.0" } }, + "sha512-JA4hdoLnN+kebEp2Vs8eBe9g7uy0zbRo+RMcU0EsNy+R+k049Ki+N5tT5Jagst2g7EAja+euFuoXFCa8vIklfA==", + ], + + "should-util": [ + "should-util@1.0.1", + "", + {}, + "sha512-oXF8tfxx5cDk8r2kYqlkUJzZpDBqVY/II2WhvU0n9Y3XYvAYRmeaf1PvvIvTgPnv4KJ+ES5M0PyDq5Jp+Ygy2g==", + ], + + "side-channel": [ + "side-channel@1.1.0", + "", + { + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2", + }, + }, + "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + ], + + "side-channel-list": [ + "side-channel-list@1.0.0", + "", + { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3" } }, + "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + ], + + "side-channel-map": [ + "side-channel-map@1.0.1", + "", + { + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + }, + }, + "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + ], + + "side-channel-weakmap": [ + "side-channel-weakmap@1.0.2", + "", + { + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1", + }, + }, + "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + ], + + "signal-exit": [ + "signal-exit@3.0.7", + "", + {}, + "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + ], + + "simple-eval": [ + "simple-eval@1.0.1", + "", + { "dependencies": { "jsep": "^1.3.6" } }, + "sha512-LH7FpTAkeD+y5xQC4fzS+tFtaNlvt3Ib1zKzvhjv/Y+cioV4zIuw4IZr2yhRLu67CWL7FR9/6KXKnjRoZTvGGQ==", + ], + + "sisteransi": [ + "sisteransi@1.0.5", + "", + {}, + "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + ], + + "slash": [ + "slash@3.0.0", + "", + {}, + "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + ], + + "solid-js": [ + "solid-js@1.9.10", + "", + { "dependencies": { "csstype": "^3.1.0", "seroval": "~1.3.0", "seroval-plugins": "~1.3.0" } }, + "sha512-Coz956cos/EPDlhs6+jsdTxKuJDPT7B5SVIWgABwROyxjY7Xbr8wkzD68Et+NxnV7DLJ3nJdAC2r9InuV/4Jew==", + ], + + "sonner": [ + "sonner@2.0.7", + "", + { + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0 || ^19.0.0-rc", + "react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-rc", + }, + }, + "sha512-W6ZN4p58k8aDKA4XPcx2hpIQXBRAgyiWVkYhT7CvK6D3iAu7xjvVyhQHg2/iaKJZ1XVJ4r7XuwGL+WGEK37i9w==", + ], + + "source-map": [ + "source-map@0.7.6", + "", + {}, + "sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==", + ], + + "source-map-js": [ + "source-map-js@1.2.1", + "", + {}, + "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + ], + + "space-separated-tokens": [ + "space-separated-tokens@2.0.2", + "", + {}, + "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + ], + + "statuses": [ + "statuses@2.0.2", + "", + {}, + "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", + ], + + "stdin-discarder": [ + "stdin-discarder@0.2.2", + "", + {}, + "sha512-UhDfHmA92YAlNnCfhmq0VeNL5bDbiZGg7sZ2IvPsXubGkiNa9EC+tUTsjBRsYUAz87btI6/1wf4XoVvQ3uRnmQ==", + ], + + "stop-iteration-iterator": [ + "stop-iteration-iterator@1.1.0", + "", + { "dependencies": { "es-errors": "^1.3.0", "internal-slot": "^1.1.0" } }, + "sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==", + ], + + "strict-event-emitter": [ + "strict-event-emitter@0.5.1", + "", + {}, + "sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ==", + ], + + "string-argv": [ + "string-argv@0.3.2", + "", + {}, + "sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==", + ], + + "string-width": [ + "string-width@7.2.0", + "", + { + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0", + }, + }, + "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + ], + + "string.prototype.trim": [ + "string.prototype.trim@1.2.10", + "", + { + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "define-data-property": "^1.1.4", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.5", + "es-object-atoms": "^1.0.0", + "has-property-descriptors": "^1.0.2", + }, + }, + "sha512-Rs66F0P/1kedk5lyYyH9uBzuiI/kNRmwJAR9quK6VOtIpZ2G+hMZd+HQbbv25MgCA6gEffoMZYxlTod4WcdrKA==", + ], + + "string.prototype.trimend": [ + "string.prototype.trimend@1.0.9", + "", + { + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.2", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + }, + }, + "sha512-G7Ok5C6E/j4SGfyLCloXTrngQIQU3PWtXGst3yM7Bea9FRURf1S42ZHlZZtsNque2FN2PoUhfZXYLNWwEr4dLQ==", + ], + + "string.prototype.trimstart": [ + "string.prototype.trimstart@1.0.8", + "", + { + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + }, + }, + "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==", + ], + + "stringify-entities": [ + "stringify-entities@4.0.4", + "", + { + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0", + }, + }, + "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + ], + + "stringify-object": [ + "stringify-object@5.0.0", + "", + { + "dependencies": { + "get-own-enumerable-keys": "^1.0.0", + "is-obj": "^3.0.0", + "is-regexp": "^3.1.0", + }, + }, + "sha512-zaJYxz2FtcMb4f+g60KsRNFOpVMUyuJgA51Zi5Z1DOTC3S59+OQiVOzE9GZt0x72uBGWKsQIuBKeF9iusmKFsg==", + ], + + "strip-ansi": [ + "strip-ansi@6.0.1", + "", + { "dependencies": { "ansi-regex": "^5.0.1" } }, + "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + ], + + "strip-bom": [ + "strip-bom@3.0.0", + "", + {}, + "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + ], + + "strip-final-newline": [ + "strip-final-newline@2.0.0", + "", + {}, + "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + ], + + "strip-json-comments": [ + "strip-json-comments@3.1.1", + "", + {}, + "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + ], + + "style-to-js": [ + "style-to-js@1.1.21", + "", + { "dependencies": { "style-to-object": "1.0.14" } }, + "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==", + ], + + "style-to-object": [ + "style-to-object@1.0.14", + "", + { "dependencies": { "inline-style-parser": "0.2.7" } }, + "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==", + ], + + "supports-color": [ + "supports-color@7.2.0", + "", + { "dependencies": { "has-flag": "^4.0.0" } }, + "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + ], + + "swagger2openapi": [ + "swagger2openapi@7.0.8", + "", + { + "dependencies": { + "call-me-maybe": "^1.0.1", + "node-fetch": "^2.6.1", + "node-fetch-h2": "^2.3.0", + "node-readfiles": "^0.2.0", + "oas-kit-common": "^1.0.8", + "oas-resolver": "^2.5.6", + "oas-schema-walker": "^1.1.5", + "oas-validator": "^5.0.8", + "reftools": "^1.1.9", + "yaml": "^1.10.0", + "yargs": "^17.0.1", + }, + "bin": { + "swagger2openapi": "swagger2openapi.js", + "oas-validate": "oas-validate.js", + "boast": "boast.js", + }, + }, + "sha512-upi/0ZGkYgEcLeGieoz8gT74oWHA0E7JivX7aN9mAf+Tc7BQoRBvnIGHoPDw+f9TXTW4s6kGYCZJtauP6OYp7g==", + ], + + "tabbable": [ + "tabbable@6.3.0", + "", + {}, + "sha512-EIHvdY5bPLuWForiR/AN2Bxngzpuwn1is4asboytXtpTgsArc+WmSJKVLlhdh71u7jFcryDqB2A8lQvj78MkyQ==", + ], + + "tagged-tag": [ + "tagged-tag@1.0.0", + "", + {}, + "sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng==", + ], + + "tailwind-merge": [ + "tailwind-merge@3.4.0", + "", + {}, + "sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g==", + ], + + "tailwindcss": [ + "tailwindcss@4.1.18", + "", + {}, + "sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw==", + ], + + "tapable": [ + "tapable@2.3.0", + "", + {}, + "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==", + ], + + "tiny-invariant": [ + "tiny-invariant@1.3.3", + "", + {}, + "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", + ], + + "tiny-warning": [ + "tiny-warning@1.0.3", + "", + {}, + "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==", + ], + + "tinyexec": [ + "tinyexec@1.0.2", + "", + {}, + "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==", + ], + + "tinyglobby": [ + "tinyglobby@0.2.15", + "", + { "dependencies": { "fdir": "^6.5.0", "picomatch": "^4.0.3" } }, + "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + ], + + "tldts": [ + "tldts@7.0.19", + "", + { "dependencies": { "tldts-core": "^7.0.19" }, "bin": { "tldts": "bin/cli.js" } }, + "sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA==", + ], + + "tldts-core": [ + "tldts-core@7.0.19", + "", + {}, + "sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A==", + ], + + "to-regex-range": [ + "to-regex-range@5.0.1", + "", + { "dependencies": { "is-number": "^7.0.0" } }, + "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + ], + + "toidentifier": [ + "toidentifier@1.0.1", + "", + {}, + "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + ], + + "toml": [ + "toml@3.0.0", + "", + {}, + "sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w==", + ], + + "tough-cookie": [ + "tough-cookie@6.0.0", + "", + { "dependencies": { "tldts": "^7.0.5" } }, + "sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==", + ], + + "tr46": [ + "tr46@0.0.3", + "", + {}, + "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + ], + + "trim-lines": [ + "trim-lines@3.0.1", + "", + {}, + "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + ], + + "trough": [ + "trough@2.2.0", + "", + {}, + "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + ], + + "ts-api-utils": [ + "ts-api-utils@2.1.0", + "", + { "peerDependencies": { "typescript": ">=4.8.4" } }, + "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==", + ], + + "ts-morph": [ + "ts-morph@26.0.0", + "", + { "dependencies": { "@ts-morph/common": "~0.27.0", "code-block-writer": "^13.0.3" } }, + "sha512-ztMO++owQnz8c/gIENcM9XfCEzgoGphTv+nKpYNM1bgsdOVC/jRZuEBf6N+mLLDNg68Kl+GgUZfOySaRiG1/Ug==", + ], + + "tsconfck": [ + "tsconfck@2.1.2", + "", + { + "peerDependencies": { "typescript": "^4.3.5 || ^5.0.0" }, + "optionalPeers": ["typescript"], + "bin": { "tsconfck": "bin/tsconfck.js" }, + }, + "sha512-ghqN1b0puy3MhhviwO2kGF8SeMDNhEbnKxjK7h6+fvY9JAxqvXi8y5NAHSQv687OVboS2uZIByzGd45/YxrRHg==", + ], + + "tsconfig-paths": [ + "tsconfig-paths@4.2.0", + "", + { "dependencies": { "json5": "^2.2.2", "minimist": "^1.2.6", "strip-bom": "^3.0.0" } }, + "sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==", + ], + + "tslib": [ + "tslib@2.8.1", + "", + {}, + "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + ], + + "tsx": [ + "tsx@4.21.0", + "", + { + "dependencies": { "esbuild": "~0.27.0", "get-tsconfig": "^4.7.5" }, + "optionalDependencies": { "fsevents": "~2.3.3" }, + "bin": { "tsx": "dist/cli.mjs" }, + }, + "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==", + ], + + "tw-animate-css": [ + "tw-animate-css@1.4.0", + "", + {}, + "sha512-7bziOlRqH0hJx80h/3mbicLW7o8qLsH5+RaLR2t+OHM3D0JlWGODQKQ4cxbK7WlvmUxpcj6Kgu6EKqjrGFe3QQ==", + ], + + "type-check": [ + "type-check@0.4.0", + "", + { "dependencies": { "prelude-ls": "^1.2.1" } }, + "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + ], + + "type-fest": [ + "type-fest@5.3.1", + "", + { "dependencies": { "tagged-tag": "^1.0.0" } }, + "sha512-VCn+LMHbd4t6sF3wfU/+HKT63C9OoyrSIf4b+vtWHpt2U7/4InZG467YDNMFMR70DdHjAdpPWmw2lzRdg0Xqqg==", + ], + + "type-is": [ + "type-is@2.0.1", + "", + { + "dependencies": { + "content-type": "^1.0.5", + "media-typer": "^1.1.0", + "mime-types": "^3.0.0", + }, + }, + "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==", + ], + + "typed-array-buffer": [ + "typed-array-buffer@1.0.3", + "", + { + "dependencies": { + "call-bound": "^1.0.3", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.14", + }, + }, + "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw==", + ], + + "typed-array-byte-length": [ + "typed-array-byte-length@1.0.3", + "", + { + "dependencies": { + "call-bind": "^1.0.8", + "for-each": "^0.3.3", + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.14", + }, + }, + "sha512-BaXgOuIxz8n8pIq3e7Atg/7s+DpiYrxn4vdot3w9KbnBhcRQq6o3xemQdIfynqSeXeDrF32x+WvfzmOjPiY9lg==", + ], + + "typed-array-byte-offset": [ + "typed-array-byte-offset@1.0.4", + "", + { + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "for-each": "^0.3.3", + "gopd": "^1.2.0", + "has-proto": "^1.2.0", + "is-typed-array": "^1.1.15", + "reflect.getprototypeof": "^1.0.9", + }, + }, + "sha512-bTlAFB/FBYMcuX81gbL4OcpH5PmlFHqlCCpAl8AlEzMz5k53oNDvN8p1PNOWLEmI2x4orp3raOFB51tv9X+MFQ==", + ], + + "typed-array-length": [ + "typed-array-length@1.0.7", + "", + { + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "is-typed-array": "^1.1.13", + "possible-typed-array-names": "^1.0.0", + "reflect.getprototypeof": "^1.0.6", + }, + }, + "sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg==", + ], + + "typedoc": [ + "typedoc@0.28.15", + "", + { + "dependencies": { + "@gerrit0/mini-shiki": "^3.17.0", + "lunr": "^2.3.9", + "markdown-it": "^14.1.0", + "minimatch": "^9.0.5", + "yaml": "^2.8.1", + }, + "peerDependencies": { + "typescript": "5.0.x || 5.1.x || 5.2.x || 5.3.x || 5.4.x || 5.5.x || 5.6.x || 5.7.x || 5.8.x || 5.9.x", + }, + "bin": { "typedoc": "bin/typedoc" }, + }, + "sha512-mw2/2vTL7MlT+BVo43lOsufkkd2CJO4zeOSuWQQsiXoV2VuEn7f6IZp2jsUDPmBMABpgR0R5jlcJ2OGEFYmkyg==", + ], + + "typedoc-plugin-coverage": [ + "typedoc-plugin-coverage@4.0.2", + "", + { "peerDependencies": { "typedoc": "0.28.x" } }, + "sha512-mfn0e7NCqB8x2PfvhXrtmd7KWlsNf1+B2N9y8gR/jexXBLrXl/0e+b2HdG5HaTXGi7i0t2pyQY2VRmq7gtdEHQ==", + ], + + "typedoc-plugin-markdown": [ + "typedoc-plugin-markdown@4.9.0", + "", + { "peerDependencies": { "typedoc": "0.28.x" } }, + "sha512-9Uu4WR9L7ZBgAl60N/h+jqmPxxvnC9nQAlnnO/OujtG2ubjnKTVUFY1XDhcMY+pCqlX3N2HsQM2QTYZIU9tJuw==", + ], + + "typescript": [ + "typescript@5.9.3", + "", + { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, + "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + ], + + "typescript-eslint": [ + "typescript-eslint@8.50.1", + "", + { + "dependencies": { + "@typescript-eslint/eslint-plugin": "8.50.1", + "@typescript-eslint/parser": "8.50.1", + "@typescript-eslint/typescript-estree": "8.50.1", + "@typescript-eslint/utils": "8.50.1", + }, + "peerDependencies": { "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <6.0.0" }, + }, + "sha512-ytTHO+SoYSbhAH9CrYnMhiLx8To6PSSvqnvXyPUgPETCvB6eBKmTI9w6XMPS3HsBRGkwTVBX+urA8dYQx6bHfQ==", + ], + + "uc.micro": [ + "uc.micro@2.1.0", + "", + {}, + "sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==", + ], + + "unbox-primitive": [ + "unbox-primitive@1.1.0", + "", + { + "dependencies": { + "call-bound": "^1.0.3", + "has-bigints": "^1.0.2", + "has-symbols": "^1.1.0", + "which-boxed-primitive": "^1.1.1", + }, + }, + "sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==", + ], + + "undici-types": [ + "undici-types@7.16.0", + "", + {}, + "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", + ], + + "unicorn-magic": [ + "unicorn-magic@0.3.0", + "", + {}, + "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==", + ], + + "unified": [ + "unified@11.0.5", + "", + { + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0", + }, + }, + "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + ], + + "unist-util-is": [ + "unist-util-is@6.0.1", + "", + { "dependencies": { "@types/unist": "^3.0.0" } }, + "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==", + ], + + "unist-util-mdx-define": [ + "unist-util-mdx-define@1.1.2", + "", + { + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-util-scope": "^1.0.0", + "estree-walker": "^3.0.0", + "vfile": "^6.0.0", + }, + }, + "sha512-9ncH7i7TN5Xn7/tzX5bE3rXgz1X/u877gYVAUB3mLeTKYJmQHmqKTDBi6BTGXV7AeolBCI9ErcVsOt2qryoD0g==", + ], + + "unist-util-position": [ + "unist-util-position@5.0.0", + "", + { "dependencies": { "@types/unist": "^3.0.0" } }, + "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + ], + + "unist-util-position-from-estree": [ + "unist-util-position-from-estree@2.0.0", + "", + { "dependencies": { "@types/unist": "^3.0.0" } }, + "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==", + ], + + "unist-util-stringify-position": [ + "unist-util-stringify-position@4.0.0", + "", + { "dependencies": { "@types/unist": "^3.0.0" } }, + "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + ], + + "unist-util-visit": [ + "unist-util-visit@5.1.0", + "", + { + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0", + }, + }, + "sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg==", + ], + + "unist-util-visit-parents": [ + "unist-util-visit-parents@6.0.2", + "", + { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0" } }, + "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==", + ], + + "universalify": [ + "universalify@2.0.1", + "", + {}, + "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + ], + + "unpipe": [ + "unpipe@1.0.0", + "", + {}, + "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + ], + + "unplugin": [ + "unplugin@2.3.11", + "", + { + "dependencies": { + "@jridgewell/remapping": "^2.3.5", + "acorn": "^8.15.0", + "picomatch": "^4.0.3", + "webpack-virtual-modules": "^0.6.2", + }, + }, + "sha512-5uKD0nqiYVzlmCRs01Fhs2BdkEgBS3SAVP6ndrBsuK42iC2+JHyxM05Rm9G8+5mkmRtzMZGY8Ct5+mliZxU/Ww==", + ], + + "until-async": [ + "until-async@3.0.2", + "", + {}, + "sha512-IiSk4HlzAMqTUseHHe3VhIGyuFmN90zMTpD3Z3y8jeQbzLIq500MVM7Jq2vUAnTKAFPJrqwkzr6PoTcPhGcOiw==", + ], + + "update-browserslist-db": [ + "update-browserslist-db@1.2.3", + "", + { + "dependencies": { "escalade": "^3.2.0", "picocolors": "^1.1.1" }, + "peerDependencies": { "browserslist": ">= 4.21.0" }, + "bin": { "update-browserslist-db": "cli.js" }, + }, + "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + ], + + "uri-js": [ + "uri-js@4.4.1", + "", + { "dependencies": { "punycode": "^2.1.0" } }, + "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + ], + + "urijs": [ + "urijs@1.19.11", + "", + {}, + "sha512-HXgFDgDommxn5/bIv0cnQZsPhHDA90NPHD6+c/v21U5+Sx5hoP8+dP9IZXBU1gIfvdRfhG8cel9QNPeionfcCQ==", + ], + + "use-callback-ref": [ + "use-callback-ref@1.3.3", + "", + { + "dependencies": { "tslib": "^2.0.0" }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg==", + ], + + "use-sidecar": [ + "use-sidecar@1.1.3", + "", + { + "dependencies": { "detect-node-es": "^1.1.0", "tslib": "^2.0.0" }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc", + }, + "optionalPeers": ["@types/react"], + }, + "sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ==", + ], + + "use-sync-external-store": [ + "use-sync-external-store@1.6.0", + "", + { "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, + "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", + ], + + "util-deprecate": [ + "util-deprecate@1.0.2", + "", + {}, + "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + ], + + "utility-types": [ + "utility-types@3.11.0", + "", + {}, + "sha512-6Z7Ma2aVEWisaL6TvBCy7P8rm2LQoPv6dJ7ecIaIixHcwfbJ0x7mWdbcwlIM5IGQxPZSFYeqRCqlOOeKoJYMkw==", + ], + + "validator": [ + "validator@13.15.26", + "", + {}, + "sha512-spH26xU080ydGggxRyR1Yhcbgx+j3y5jbNXk/8L+iRvdIEQ4uTRH2Sgf2dokud6Q4oAtsbNvJ1Ft+9xmm6IZcA==", + ], + + "vary": [ + "vary@1.1.2", + "", + {}, + "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + ], + + "vfile": [ + "vfile@6.0.3", + "", + { "dependencies": { "@types/unist": "^3.0.0", "vfile-message": "^4.0.0" } }, + "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + ], + + "vfile-message": [ + "vfile-message@4.0.3", + "", + { "dependencies": { "@types/unist": "^3.0.0", "unist-util-stringify-position": "^4.0.0" } }, + "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==", + ], + + "vite": [ + "rolldown-vite@7.2.5", + "", + { + "dependencies": { + "@oxc-project/runtime": "0.97.0", + "fdir": "^6.5.0", + "lightningcss": "^1.30.2", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rolldown": "1.0.0-beta.50", + "tinyglobby": "^0.2.15", + }, + "optionalDependencies": { "fsevents": "~2.3.3" }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "esbuild": "^0.25.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2", + }, + "optionalPeers": [ + "@types/node", + "esbuild", + "jiti", + "less", + "sass", + "sass-embedded", + "stylus", + "sugarss", + "terser", + "tsx", + "yaml", + ], + "bin": { "vite": "bin/vite.js" }, + }, + "sha512-u09tdk/huMiN8xwoiBbig197jKdCamQTtOruSalOzbqGje3jdHiV0njQlAW0YvzoahkirFePNQ4RYlfnRQpXZA==", + ], + + "web-streams-polyfill": [ + "web-streams-polyfill@3.3.3", + "", + {}, + "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + ], + + "web-vitals": [ + "web-vitals@5.1.0", + "", + {}, + "sha512-ArI3kx5jI0atlTtmV0fWU3fjpLmq/nD3Zr1iFFlJLaqa5wLBkUSzINwBPySCX/8jRyjlmy1Volw1kz1g9XE4Jg==", + ], + + "webidl-conversions": [ + "webidl-conversions@3.0.1", + "", + {}, + "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + ], + + "webpack-virtual-modules": [ + "webpack-virtual-modules@0.6.2", + "", + {}, + "sha512-66/V2i5hQanC51vBQKPH4aI8NMAcBW59FVBs+rC7eGHupMyfn34q7rZIE+ETlJ+XTevqfUhVVBgSUNSW2flEUQ==", + ], + + "whatwg-url": [ + "whatwg-url@5.0.0", + "", + { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, + "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + ], + + "which": [ + "which@2.0.2", + "", + { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, + "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + ], + + "which-boxed-primitive": [ + "which-boxed-primitive@1.1.1", + "", + { + "dependencies": { + "is-bigint": "^1.1.0", + "is-boolean-object": "^1.2.1", + "is-number-object": "^1.1.1", + "is-string": "^1.1.1", + "is-symbol": "^1.1.1", + }, + }, + "sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA==", + ], + + "which-builtin-type": [ + "which-builtin-type@1.2.1", + "", + { + "dependencies": { + "call-bound": "^1.0.2", + "function.prototype.name": "^1.1.6", + "has-tostringtag": "^1.0.2", + "is-async-function": "^2.0.0", + "is-date-object": "^1.1.0", + "is-finalizationregistry": "^1.1.0", + "is-generator-function": "^1.0.10", + "is-regex": "^1.2.1", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.1.0", + "which-collection": "^1.0.2", + "which-typed-array": "^1.1.16", + }, + }, + "sha512-6iBczoX+kDQ7a3+YJBnh3T+KZRxM/iYNPXicqk66/Qfm1b93iu+yOImkg0zHbj5LNOcNv1TEADiZ0xa34B4q6Q==", + ], + + "which-collection": [ + "which-collection@1.0.2", + "", + { + "dependencies": { + "is-map": "^2.0.3", + "is-set": "^2.0.3", + "is-weakmap": "^2.0.2", + "is-weakset": "^2.0.3", + }, + }, + "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==", + ], + + "which-typed-array": [ + "which-typed-array@1.1.19", + "", + { + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "for-each": "^0.3.5", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + }, + }, + "sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw==", + ], + + "word-wrap": [ + "word-wrap@1.2.5", + "", + {}, + "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + ], + + "wrap-ansi": [ + "wrap-ansi@6.2.0", + "", + { + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0", + }, + }, + "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + ], + + "wrappy": [ + "wrappy@1.0.2", + "", + {}, + "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + ], + + "wsl-utils": [ + "wsl-utils@0.3.0", + "", + { "dependencies": { "is-wsl": "^3.1.0", "powershell-utils": "^0.1.0" } }, + "sha512-3sFIGLiaDP7rTO4xh3g+b3AzhYDIUGGywE/WsmqzJWDxus5aJXVnPTNC/6L+r2WzrwXqVOdD262OaO+cEyPMSQ==", + ], + + "y18n": [ + "y18n@5.0.8", + "", + {}, + "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + ], + + "yallist": [ + "yallist@3.1.1", + "", + {}, + "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + ], + + "yaml": [ + "yaml@2.8.2", + "", + { "bin": { "yaml": "bin.mjs" } }, + "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==", + ], + + "yargs": [ + "yargs@17.7.2", + "", + { + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1", + }, + }, + "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + ], + + "yargs-parser": [ + "yargs-parser@21.1.1", + "", + {}, + "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + ], + + "yocto-queue": [ + "yocto-queue@0.1.0", + "", + {}, + "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + ], + + "yoctocolors": [ + "yoctocolors@2.1.2", + "", + {}, + "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug==", + ], + + "yoctocolors-cjs": [ + "yoctocolors-cjs@2.1.3", + "", + {}, + "sha512-U/PBtDf35ff0D8X8D0jfdzHYEPFxAI7jJlxZXwCSez5M3190m+QobIfh+sWDWSHMCWWJN2AWamkegn6vr6YBTw==", + ], + + "zod": [ + "zod@4.2.1", + "", + {}, + "sha512-0wZ1IRqGGhMP76gLqz8EyfBXKk0J2qo2+H3fi4mcUP/KtTocoX08nmIAHl1Z2kJIZbZee8KOpBCSNPRgauucjw==", + ], + + "zod-to-json-schema": [ + "zod-to-json-schema@3.25.0", + "", + { "peerDependencies": { "zod": "^3.25 || ^4" } }, + "sha512-HvWtU2UG41LALjajJrML6uQejQhNJx+JBO9IflpSja4R03iNWfKXrj6W2h7ljuLyc1nKS+9yDyL/9tD1U/yBnQ==", + ], + + "zod-validation-error": [ + "zod-validation-error@4.0.2", + "", + { "peerDependencies": { "zod": "^3.25.0 || ^4.0.0" } }, + "sha512-Q6/nZLe6jxuU80qb/4uJ4t5v2VEZ44lzQjPDhYJNztRQ4wyWc6VF3D3Kb/fAuPetZQnhS3hnajCf9CsWesghLQ==", + ], + + "zwitch": [ + "zwitch@2.0.4", + "", + {}, + "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + ], + + "@apidevtools/swagger-parser/ajv": [ + "ajv@8.17.1", + "", + { + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + }, + }, + "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + ], + + "@dotenvx/dotenvx/commander": [ + "commander@11.1.0", + "", + {}, + "sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==", + ], + + "@dotenvx/dotenvx/which": [ + "which@4.0.0", + "", + { "dependencies": { "isexe": "^3.1.1" }, "bin": { "node-which": "bin/which.js" } }, + "sha512-GlaYyEb07DPxYCKhKzplCWBJtvxZcZMrL+4UkrTSJHHPyZU4mYYTv3qaOe77H7EODLSSopAUFAc6W8U4yqvscg==", + ], + + "@eslint-community/eslint-utils/eslint-visitor-keys": [ + "eslint-visitor-keys@3.4.3", + "", + {}, + "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + ], + + "@eslint/eslintrc/globals": [ + "globals@14.0.0", + "", + {}, + "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + ], + + "@gerrit0/mini-shiki/@shikijs/engine-oniguruma": [ + "@shikijs/engine-oniguruma@3.20.0", + "", + { "dependencies": { "@shikijs/types": "3.20.0", "@shikijs/vscode-textmate": "^10.0.2" } }, + "sha512-Yx3gy7xLzM0ZOjqoxciHjA7dAt5tyzJE3L4uQoM83agahy+PlW244XJSrmJRSBvGYELDhYXPacD4R/cauV5bzQ==", + ], + + "@gerrit0/mini-shiki/@shikijs/langs": [ + "@shikijs/langs@3.20.0", + "", + { "dependencies": { "@shikijs/types": "3.20.0" } }, + "sha512-le+bssCxcSHrygCWuOrYJHvjus6zhQ2K7q/0mgjiffRbkhM4o1EWu2m+29l0yEsHDbWaWPNnDUTRVVBvBBeKaA==", + ], + + "@gerrit0/mini-shiki/@shikijs/themes": [ + "@shikijs/themes@3.20.0", + "", + { "dependencies": { "@shikijs/types": "3.20.0" } }, + "sha512-U1NSU7Sl26Q7ErRvJUouArxfM2euWqq1xaSrbqMu2iqa+tSp0D1Yah8216sDYbdDHw4C8b75UpE65eWorm2erQ==", + ], + + "@gerrit0/mini-shiki/@shikijs/types": [ + "@shikijs/types@3.20.0", + "", + { "dependencies": { "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, + "sha512-lhYAATn10nkZcBQ0BlzSbJA3wcmL5MXUUF8d2Zzon6saZDlToKaiRX60n2+ZaHJCmXEcZRWNzn+k9vplr8Jhsw==", + ], + + "@ibm-cloud/openapi-ruleset/minimatch": [ + "minimatch@6.2.0", + "", + { "dependencies": { "brace-expansion": "^2.0.1" } }, + "sha512-sauLxniAmvnhhRjFwPNnJKaPFYyddAgbYdeUpHULtCT/GhzdCx/MDNy+Y40lBxTQUrMzDE8e0S43Z5uqfO0REg==", + ], + + "@inquirer/core/signal-exit": [ + "signal-exit@4.1.0", + "", + {}, + "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + ], + + "@modelcontextprotocol/sdk/ajv": [ + "ajv@8.17.1", + "", + { + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + }, + }, + "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + ], + + "@opentelemetry/otlp-transformer/@opentelemetry/resources": [ + "@opentelemetry/resources@2.2.0", + "", + { + "dependencies": { + "@opentelemetry/core": "2.2.0", + "@opentelemetry/semantic-conventions": "^1.29.0", + }, + "peerDependencies": { "@opentelemetry/api": ">=1.3.0 <1.10.0" }, + }, + "sha512-1pNQf/JazQTMA0BiO5NINUzH0cbLbbl7mntLa4aJNmCCXSj0q03T5ZXXL0zw4G55TjdL9Tz32cznGClf+8zr5A==", + ], + + "@opentelemetry/resources/@opentelemetry/core": [ + "@opentelemetry/core@2.5.1", + "", + { + "dependencies": { "@opentelemetry/semantic-conventions": "^1.29.0" }, + "peerDependencies": { "@opentelemetry/api": ">=1.0.0 <1.10.0" }, + }, + "sha512-Dwlc+3HAZqpgTYq0MUyZABjFkcrKTePwuiFVLjahGD8cx3enqihmpAmdgNFO1R4m/sIe5afjJrA25Prqy4NXlA==", + ], + + "@opentelemetry/sdk-logs/@opentelemetry/resources": [ + "@opentelemetry/resources@2.2.0", + "", + { + "dependencies": { + "@opentelemetry/core": "2.2.0", + "@opentelemetry/semantic-conventions": "^1.29.0", + }, + "peerDependencies": { "@opentelemetry/api": ">=1.3.0 <1.10.0" }, + }, + "sha512-1pNQf/JazQTMA0BiO5NINUzH0cbLbbl7mntLa4aJNmCCXSj0q03T5ZXXL0zw4G55TjdL9Tz32cznGClf+8zr5A==", + ], + + "@opentelemetry/sdk-metrics/@opentelemetry/resources": [ + "@opentelemetry/resources@2.2.0", + "", + { + "dependencies": { + "@opentelemetry/core": "2.2.0", + "@opentelemetry/semantic-conventions": "^1.29.0", + }, + "peerDependencies": { "@opentelemetry/api": ">=1.3.0 <1.10.0" }, + }, + "sha512-1pNQf/JazQTMA0BiO5NINUzH0cbLbbl7mntLa4aJNmCCXSj0q03T5ZXXL0zw4G55TjdL9Tz32cznGClf+8zr5A==", + ], + + "@opentelemetry/sdk-trace-base/@opentelemetry/resources": [ + "@opentelemetry/resources@2.2.0", + "", + { + "dependencies": { + "@opentelemetry/core": "2.2.0", + "@opentelemetry/semantic-conventions": "^1.29.0", + }, + "peerDependencies": { "@opentelemetry/api": ">=1.3.0 <1.10.0" }, + }, + "sha512-1pNQf/JazQTMA0BiO5NINUzH0cbLbbl7mntLa4aJNmCCXSj0q03T5ZXXL0zw4G55TjdL9Tz32cznGClf+8zr5A==", + ], + + "@rollup/pluginutils/estree-walker": [ + "estree-walker@2.0.2", + "", + {}, + "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + ], + + "@stoplight/better-ajv-errors/ajv": [ + "ajv@8.17.1", + "", + { + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + }, + }, + "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + ], + + "@stoplight/json-ref-readers/node-fetch": [ + "node-fetch@2.7.0", + "", + { + "dependencies": { "whatwg-url": "^5.0.0" }, + "peerDependencies": { "encoding": "^0.1.0" }, + "optionalPeers": ["encoding"], + }, + "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + ], + + "@stoplight/json-ref-readers/tslib": [ + "tslib@1.14.1", + "", + {}, + "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", + ], + + "@stoplight/spectral-core/ajv": [ + "ajv@8.17.1", + "", + { + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + }, + }, + "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + ], + + "@stoplight/spectral-core/ajv-formats": [ + "ajv-formats@2.1.1", + "", + { "dependencies": { "ajv": "^8.0.0" } }, + "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + ], + + "@stoplight/spectral-functions/ajv": [ + "ajv@8.17.1", + "", + { + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + }, + }, + "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + ], + + "@stoplight/spectral-functions/ajv-formats": [ + "ajv-formats@2.1.1", + "", + { "dependencies": { "ajv": "^8.0.0" } }, + "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + ], + + "@stoplight/spectral-parsers/@stoplight/types": [ + "@stoplight/types@14.1.1", + "", + { "dependencies": { "@types/json-schema": "^7.0.4", "utility-types": "^3.10.0" } }, + "sha512-/kjtr+0t0tjKr+heVfviO9FrU/uGLc+QNX3fHJc19xsCNYqU7lVhaXxDmEID9BZTjG+/r9pK9xP/xU02XGg65g==", + ], + + "@stoplight/spectral-rulesets/ajv": [ + "ajv@8.17.1", + "", + { + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + }, + }, + "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + ], + + "@stoplight/spectral-rulesets/ajv-formats": [ + "ajv-formats@2.1.1", + "", + { "dependencies": { "ajv": "^8.0.0" } }, + "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + ], + + "@stoplight/spectral-rulesets/json-schema-traverse": [ + "json-schema-traverse@1.0.0", + "", + {}, + "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + ], + + "@stoplight/spectral-runtime/node-fetch": [ + "node-fetch@2.7.0", + "", + { + "dependencies": { "whatwg-url": "^5.0.0" }, + "peerDependencies": { "encoding": "^0.1.0" }, + "optionalPeers": ["encoding"], + }, + "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + ], + + "@stoplight/yaml/@stoplight/types": [ + "@stoplight/types@14.1.1", + "", + { "dependencies": { "@types/json-schema": "^7.0.4", "utility-types": "^3.10.0" } }, + "sha512-/kjtr+0t0tjKr+heVfviO9FrU/uGLc+QNX3fHJc19xsCNYqU7lVhaXxDmEID9BZTjG+/r9pK9xP/xU02XGg65g==", + ], + + "@tailwindcss/oxide-wasm32-wasi/@emnapi/core": [ + "@emnapi/core@1.7.1", + "", + { "dependencies": { "@emnapi/wasi-threads": "1.1.0", "tslib": "^2.4.0" }, "bundled": true }, + "sha512-o1uhUASyo921r2XtHYOHy7gdkGLge8ghBEQHMWmyJFoXlpU58kIrhhN3w26lpQb6dspetweapMn2CSNwQ8I4wg==", + ], + + "@tailwindcss/oxide-wasm32-wasi/@emnapi/runtime": [ + "@emnapi/runtime@1.7.1", + "", + { "dependencies": { "tslib": "^2.4.0" }, "bundled": true }, + "sha512-PVtJr5CmLwYAU9PZDMITZoR5iAOShYREoR45EyyLrbntV50mdePTgUn4AmOw90Ifcj+x2kRjdzr1HP3RrNiHGA==", + ], + + "@tailwindcss/oxide-wasm32-wasi/@emnapi/wasi-threads": [ + "@emnapi/wasi-threads@1.1.0", + "", + { "dependencies": { "tslib": "^2.4.0" }, "bundled": true }, + "sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==", + ], + + "@tailwindcss/oxide-wasm32-wasi/@napi-rs/wasm-runtime": [ + "@napi-rs/wasm-runtime@1.1.0", + "", + { + "dependencies": { + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1", + "@tybys/wasm-util": "^0.10.1", + }, + "bundled": true, + }, + "sha512-Fq6DJW+Bb5jaWE69/qOE0D1TUN9+6uWhCeZpdnSBk14pjLcCWR7Q8n49PTSPHazM37JqrsdpEthXy2xn6jWWiA==", + ], + + "@tailwindcss/oxide-wasm32-wasi/@tybys/wasm-util": [ + "@tybys/wasm-util@0.10.1", + "", + { "dependencies": { "tslib": "^2.4.0" }, "bundled": true }, + "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==", + ], + + "@tailwindcss/oxide-wasm32-wasi/tslib": [ + "tslib@2.8.1", + "", + { "bundled": true }, + "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + ], + + "@tanstack/form-core/@tanstack/store": [ + "@tanstack/store@0.7.7", + "", + {}, + "sha512-xa6pTan1bcaqYDS9BDpSiS63qa6EoDkPN9RsRaxHuDdVDNntzq3xNwR5YKTU/V3SkSyC9T4YVOPh2zRQN0nhIQ==", + ], + + "@tanstack/router-generator/zod": [ + "zod@3.25.76", + "", + {}, + "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + ], + + "@tanstack/router-plugin/zod": [ + "zod@3.25.76", + "", + {}, + "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + ], + + "@ts-morph/common/minimatch": [ + "minimatch@10.1.1", + "", + { "dependencies": { "@isaacs/brace-expansion": "^5.0.0" } }, + "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==", + ], + + "@typescript-eslint/eslint-plugin/ignore": [ + "ignore@7.0.5", + "", + {}, + "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + ], + + "@typescript-eslint/typescript-estree/minimatch": [ + "minimatch@9.0.5", + "", + { "dependencies": { "brace-expansion": "^2.0.1" } }, + "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + ], + + "@typescript-eslint/typescript-estree/semver": [ + "semver@7.7.3", + "", + { "bin": { "semver": "bin/semver.js" } }, + "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + ], + + "accepts/mime-types": [ + "mime-types@3.0.2", + "", + { "dependencies": { "mime-db": "^1.54.0" } }, + "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + ], + + "ajv-errors/ajv": [ + "ajv@8.17.1", + "", + { + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + }, + }, + "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + ], + + "ajv-formats/ajv": [ + "ajv@8.17.1", + "", + { + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + }, + }, + "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + ], + + "anymatch/picomatch": [ + "picomatch@2.3.1", + "", + {}, + "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + ], + + "chokidar/glob-parent": [ + "glob-parent@5.1.2", + "", + { "dependencies": { "is-glob": "^4.0.1" } }, + "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + ], + + "cliui/string-width": [ + "string-width@4.2.3", + "", + { + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1", + }, + }, + "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + ], + + "cliui/wrap-ansi": [ + "wrap-ansi@7.0.0", + "", + { + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0", + }, + }, + "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + ], + + "express/cookie": [ + "cookie@0.7.2", + "", + {}, + "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", + ], + + "express/mime-types": [ + "mime-types@3.0.2", + "", + { "dependencies": { "mime-db": "^1.54.0" } }, + "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + ], + + "fast-glob/glob-parent": [ + "glob-parent@5.1.2", + "", + { "dependencies": { "is-glob": "^4.0.1" } }, + "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + ], + + "log-symbols/chalk": [ + "chalk@5.6.2", + "", + {}, + "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==", + ], + + "log-symbols/is-unicode-supported": [ + "is-unicode-supported@1.3.0", + "", + {}, + "sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==", + ], + + "mdast-util-find-and-replace/escape-string-regexp": [ + "escape-string-regexp@5.0.0", + "", + {}, + "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + ], + + "mdast-util-frontmatter/escape-string-regexp": [ + "escape-string-regexp@5.0.0", + "", + {}, + "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + ], + + "micromatch/picomatch": [ + "picomatch@2.3.1", + "", + {}, + "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + ], + + "oas-linter/yaml": [ + "yaml@1.10.2", + "", + {}, + "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + ], + + "oas-resolver/yaml": [ + "yaml@1.10.2", + "", + {}, + "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + ], + + "oas-validator/yaml": [ + "yaml@1.10.2", + "", + {}, + "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + ], + + "ora/chalk": [ + "chalk@5.6.2", + "", + {}, + "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==", + ], + + "ora/strip-ansi": [ + "strip-ansi@7.1.2", + "", + { "dependencies": { "ansi-regex": "^6.0.1" } }, + "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + ], + + "orval/chokidar": [ + "chokidar@4.0.3", + "", + { "dependencies": { "readdirp": "^4.0.1" } }, + "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + ], + + "parse-entities/@types/unist": [ + "@types/unist@2.0.11", + "", + {}, + "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + ], + + "prompts/kleur": [ + "kleur@3.0.3", + "", + {}, + "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + ], + + "readdirp/picomatch": [ + "picomatch@2.3.1", + "", + {}, + "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + ], + + "recast/source-map": [ + "source-map@0.6.1", + "", + {}, + "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + ], + + "restore-cursor/onetime": [ + "onetime@7.0.0", + "", + { "dependencies": { "mimic-function": "^5.0.0" } }, + "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==", + ], + + "restore-cursor/signal-exit": [ + "signal-exit@4.1.0", + "", + {}, + "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + ], + + "rolldown/@rolldown/pluginutils": [ + "@rolldown/pluginutils@1.0.0-beta.50", + "", + {}, + "sha512-5e76wQiQVeL1ICOZVUg4LSOVYg9jyhGCin+icYozhsUzM+fHE7kddi1bdiE0jwVqTfkjba3jUFbEkoC9WkdvyA==", + ], + + "router/path-to-regexp": [ + "path-to-regexp@8.3.0", + "", + {}, + "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==", + ], + + "send/mime-types": [ + "mime-types@3.0.2", + "", + { "dependencies": { "mime-db": "^1.54.0" } }, + "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + ], + + "shadcn/execa": [ + "execa@9.6.1", + "", + { + "dependencies": { + "@sindresorhus/merge-streams": "^4.0.0", + "cross-spawn": "^7.0.6", + "figures": "^6.1.0", + "get-stream": "^9.0.0", + "human-signals": "^8.0.1", + "is-plain-obj": "^4.1.0", + "is-stream": "^4.0.1", + "npm-run-path": "^6.0.0", + "pretty-ms": "^9.2.0", + "signal-exit": "^4.1.0", + "strip-final-newline": "^4.0.0", + "yoctocolors": "^2.1.1", + }, + }, + "sha512-9Be3ZoN4LmYR90tUoVu2te2BsbzHfhJyfEiAVfz7N5/zv+jduIfLrV2xdQXOHbaD6KgpGdO9PRPM1Y4Q9QkPkA==", + ], + + "shadcn/zod": [ + "zod@3.25.76", + "", + {}, + "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + ], + + "solid-js/seroval": [ + "seroval@1.3.2", + "", + {}, + "sha512-RbcPH1n5cfwKrru7v7+zrZvjLurgHhGyso3HTyGtRivGWgYjbOmGuivCQaORNELjNONoK35nj28EoWul9sb1zQ==", + ], + + "solid-js/seroval-plugins": [ + "seroval-plugins@1.3.3", + "", + { "peerDependencies": { "seroval": "^1.0" } }, + "sha512-16OL3NnUBw8JG1jBLUoZJsLnQq0n5Ua6aHalhJK4fMQkz1lqR7Osz1sA30trBtd9VUDc2NgkuRCn8+/pBwqZ+w==", + ], + + "string-width/strip-ansi": [ + "strip-ansi@7.1.2", + "", + { "dependencies": { "ansi-regex": "^6.0.1" } }, + "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + ], + + "swagger2openapi/node-fetch": [ + "node-fetch@2.7.0", + "", + { + "dependencies": { "whatwg-url": "^5.0.0" }, + "peerDependencies": { "encoding": "^0.1.0" }, + "optionalPeers": ["encoding"], + }, + "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + ], + + "swagger2openapi/yaml": [ + "yaml@1.10.2", + "", + {}, + "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + ], + + "tsx/esbuild": [ + "esbuild@0.27.2", + "", + { + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.2", + "@esbuild/android-arm": "0.27.2", + "@esbuild/android-arm64": "0.27.2", + "@esbuild/android-x64": "0.27.2", + "@esbuild/darwin-arm64": "0.27.2", + "@esbuild/darwin-x64": "0.27.2", + "@esbuild/freebsd-arm64": "0.27.2", + "@esbuild/freebsd-x64": "0.27.2", + "@esbuild/linux-arm": "0.27.2", + "@esbuild/linux-arm64": "0.27.2", + "@esbuild/linux-ia32": "0.27.2", + "@esbuild/linux-loong64": "0.27.2", + "@esbuild/linux-mips64el": "0.27.2", + "@esbuild/linux-ppc64": "0.27.2", + "@esbuild/linux-riscv64": "0.27.2", + "@esbuild/linux-s390x": "0.27.2", + "@esbuild/linux-x64": "0.27.2", + "@esbuild/netbsd-arm64": "0.27.2", + "@esbuild/netbsd-x64": "0.27.2", + "@esbuild/openbsd-arm64": "0.27.2", + "@esbuild/openbsd-x64": "0.27.2", + "@esbuild/openharmony-arm64": "0.27.2", + "@esbuild/sunos-x64": "0.27.2", + "@esbuild/win32-arm64": "0.27.2", + "@esbuild/win32-ia32": "0.27.2", + "@esbuild/win32-x64": "0.27.2", + }, + "bin": { "esbuild": "bin/esbuild" }, + }, + "sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==", + ], + + "type-is/mime-types": [ + "mime-types@3.0.2", + "", + { "dependencies": { "mime-db": "^1.54.0" } }, + "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + ], + + "typedoc/minimatch": [ + "minimatch@9.0.5", + "", + { "dependencies": { "brace-expansion": "^2.0.1" } }, + "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + ], + + "wrap-ansi/string-width": [ + "string-width@4.2.3", + "", + { + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1", + }, + }, + "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + ], + + "yargs/string-width": [ + "string-width@4.2.3", + "", + { + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1", + }, + }, + "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + ], + + "@apidevtools/swagger-parser/ajv/json-schema-traverse": [ + "json-schema-traverse@1.0.0", + "", + {}, + "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + ], + + "@dotenvx/dotenvx/which/isexe": [ + "isexe@3.1.1", + "", + {}, + "sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==", + ], + + "@ibm-cloud/openapi-ruleset/minimatch/brace-expansion": [ + "brace-expansion@2.0.2", + "", + { "dependencies": { "balanced-match": "^1.0.0" } }, + "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + ], + + "@modelcontextprotocol/sdk/ajv/json-schema-traverse": [ + "json-schema-traverse@1.0.0", + "", + {}, + "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + ], + + "@stoplight/better-ajv-errors/ajv/json-schema-traverse": [ + "json-schema-traverse@1.0.0", + "", + {}, + "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + ], + + "@stoplight/spectral-core/ajv/json-schema-traverse": [ + "json-schema-traverse@1.0.0", + "", + {}, + "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + ], + + "@stoplight/spectral-functions/ajv/json-schema-traverse": [ + "json-schema-traverse@1.0.0", + "", + {}, + "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + ], + + "@typescript-eslint/typescript-estree/minimatch/brace-expansion": [ + "brace-expansion@2.0.2", + "", + { "dependencies": { "balanced-match": "^1.0.0" } }, + "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + ], + + "accepts/mime-types/mime-db": [ + "mime-db@1.54.0", + "", + {}, + "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + ], + + "ajv-errors/ajv/json-schema-traverse": [ + "json-schema-traverse@1.0.0", + "", + {}, + "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + ], + + "ajv-formats/ajv/json-schema-traverse": [ + "json-schema-traverse@1.0.0", + "", + {}, + "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + ], + + "cliui/string-width/emoji-regex": [ + "emoji-regex@8.0.0", + "", + {}, + "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + ], + + "express/mime-types/mime-db": [ + "mime-db@1.54.0", + "", + {}, + "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + ], + + "ora/strip-ansi/ansi-regex": [ + "ansi-regex@6.2.2", + "", + {}, + "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + ], + + "orval/chokidar/readdirp": [ + "readdirp@4.1.2", + "", + {}, + "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + ], + + "send/mime-types/mime-db": [ + "mime-db@1.54.0", + "", + {}, + "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + ], + + "shadcn/execa/get-stream": [ + "get-stream@9.0.1", + "", + { "dependencies": { "@sec-ant/readable-stream": "^0.4.1", "is-stream": "^4.0.1" } }, + "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA==", + ], + + "shadcn/execa/human-signals": [ + "human-signals@8.0.1", + "", + {}, + "sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ==", + ], + + "shadcn/execa/is-stream": [ + "is-stream@4.0.1", + "", + {}, + "sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A==", + ], + + "shadcn/execa/npm-run-path": [ + "npm-run-path@6.0.0", + "", + { "dependencies": { "path-key": "^4.0.0", "unicorn-magic": "^0.3.0" } }, + "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==", + ], + + "shadcn/execa/signal-exit": [ + "signal-exit@4.1.0", + "", + {}, + "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + ], + + "shadcn/execa/strip-final-newline": [ + "strip-final-newline@4.0.0", + "", + {}, + "sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw==", + ], + + "string-width/strip-ansi/ansi-regex": [ + "ansi-regex@6.2.2", + "", + {}, + "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + ], + + "tsx/esbuild/@esbuild/aix-ppc64": [ + "@esbuild/aix-ppc64@0.27.2", + "", + { "os": "aix", "cpu": "ppc64" }, + "sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==", + ], + + "tsx/esbuild/@esbuild/android-arm": [ + "@esbuild/android-arm@0.27.2", + "", + { "os": "android", "cpu": "arm" }, + "sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==", + ], + + "tsx/esbuild/@esbuild/android-arm64": [ + "@esbuild/android-arm64@0.27.2", + "", + { "os": "android", "cpu": "arm64" }, + "sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==", + ], + + "tsx/esbuild/@esbuild/android-x64": [ + "@esbuild/android-x64@0.27.2", + "", + { "os": "android", "cpu": "x64" }, + "sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==", + ], + + "tsx/esbuild/@esbuild/darwin-arm64": [ + "@esbuild/darwin-arm64@0.27.2", + "", + { "os": "darwin", "cpu": "arm64" }, + "sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==", + ], + + "tsx/esbuild/@esbuild/darwin-x64": [ + "@esbuild/darwin-x64@0.27.2", + "", + { "os": "darwin", "cpu": "x64" }, + "sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==", + ], + + "tsx/esbuild/@esbuild/freebsd-arm64": [ + "@esbuild/freebsd-arm64@0.27.2", + "", + { "os": "freebsd", "cpu": "arm64" }, + "sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==", + ], + + "tsx/esbuild/@esbuild/freebsd-x64": [ + "@esbuild/freebsd-x64@0.27.2", + "", + { "os": "freebsd", "cpu": "x64" }, + "sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==", + ], + + "tsx/esbuild/@esbuild/linux-arm": [ + "@esbuild/linux-arm@0.27.2", + "", + { "os": "linux", "cpu": "arm" }, + "sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==", + ], + + "tsx/esbuild/@esbuild/linux-arm64": [ + "@esbuild/linux-arm64@0.27.2", + "", + { "os": "linux", "cpu": "arm64" }, + "sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==", + ], + + "tsx/esbuild/@esbuild/linux-ia32": [ + "@esbuild/linux-ia32@0.27.2", + "", + { "os": "linux", "cpu": "ia32" }, + "sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==", + ], + + "tsx/esbuild/@esbuild/linux-loong64": [ + "@esbuild/linux-loong64@0.27.2", + "", + { "os": "linux", "cpu": "none" }, + "sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==", + ], + + "tsx/esbuild/@esbuild/linux-mips64el": [ + "@esbuild/linux-mips64el@0.27.2", + "", + { "os": "linux", "cpu": "none" }, + "sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==", + ], + + "tsx/esbuild/@esbuild/linux-ppc64": [ + "@esbuild/linux-ppc64@0.27.2", + "", + { "os": "linux", "cpu": "ppc64" }, + "sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==", + ], + + "tsx/esbuild/@esbuild/linux-riscv64": [ + "@esbuild/linux-riscv64@0.27.2", + "", + { "os": "linux", "cpu": "none" }, + "sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==", + ], + + "tsx/esbuild/@esbuild/linux-s390x": [ + "@esbuild/linux-s390x@0.27.2", + "", + { "os": "linux", "cpu": "s390x" }, + "sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==", + ], + + "tsx/esbuild/@esbuild/linux-x64": [ + "@esbuild/linux-x64@0.27.2", + "", + { "os": "linux", "cpu": "x64" }, + "sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==", + ], + + "tsx/esbuild/@esbuild/netbsd-arm64": [ + "@esbuild/netbsd-arm64@0.27.2", + "", + { "os": "none", "cpu": "arm64" }, + "sha512-Kj6DiBlwXrPsCRDeRvGAUb/LNrBASrfqAIok+xB0LxK8CHqxZ037viF13ugfsIpePH93mX7xfJp97cyDuTZ3cw==", + ], + + "tsx/esbuild/@esbuild/netbsd-x64": [ + "@esbuild/netbsd-x64@0.27.2", + "", + { "os": "none", "cpu": "x64" }, + "sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==", + ], + + "tsx/esbuild/@esbuild/openbsd-arm64": [ + "@esbuild/openbsd-arm64@0.27.2", + "", + { "os": "openbsd", "cpu": "arm64" }, + "sha512-DNIHH2BPQ5551A7oSHD0CKbwIA/Ox7+78/AWkbS5QoRzaqlev2uFayfSxq68EkonB+IKjiuxBFoV8ESJy8bOHA==", + ], + + "tsx/esbuild/@esbuild/openbsd-x64": [ + "@esbuild/openbsd-x64@0.27.2", + "", + { "os": "openbsd", "cpu": "x64" }, + "sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==", + ], + + "tsx/esbuild/@esbuild/openharmony-arm64": [ + "@esbuild/openharmony-arm64@0.27.2", + "", + { "os": "none", "cpu": "arm64" }, + "sha512-LRBbCmiU51IXfeXk59csuX/aSaToeG7w48nMwA6049Y4J4+VbWALAuXcs+qcD04rHDuSCSRKdmY63sruDS5qag==", + ], + + "tsx/esbuild/@esbuild/sunos-x64": [ + "@esbuild/sunos-x64@0.27.2", + "", + { "os": "sunos", "cpu": "x64" }, + "sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==", + ], + + "tsx/esbuild/@esbuild/win32-arm64": [ + "@esbuild/win32-arm64@0.27.2", + "", + { "os": "win32", "cpu": "arm64" }, + "sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==", + ], + + "tsx/esbuild/@esbuild/win32-ia32": [ + "@esbuild/win32-ia32@0.27.2", + "", + { "os": "win32", "cpu": "ia32" }, + "sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==", + ], + + "tsx/esbuild/@esbuild/win32-x64": [ + "@esbuild/win32-x64@0.27.2", + "", + { "os": "win32", "cpu": "x64" }, + "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==", + ], + + "type-is/mime-types/mime-db": [ + "mime-db@1.54.0", + "", + {}, + "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + ], + + "typedoc/minimatch/brace-expansion": [ + "brace-expansion@2.0.2", + "", + { "dependencies": { "balanced-match": "^1.0.0" } }, + "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + ], + + "wrap-ansi/string-width/emoji-regex": [ + "emoji-regex@8.0.0", + "", + {}, + "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + ], + + "yargs/string-width/emoji-regex": [ + "emoji-regex@8.0.0", + "", + {}, + "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + ], + + "shadcn/execa/npm-run-path/path-key": [ + "path-key@4.0.0", + "", + {}, + "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + ], + }, } diff --git a/web/index.html b/web/index.html index a0e51683..9cef970f 100644 --- a/web/index.html +++ b/web/index.html @@ -2,9 +2,9 @@ - + - virsh-sandbox-frontend + fluid.sh
diff --git a/web/nginx.conf b/web/nginx.conf new file mode 100644 index 00000000..d6b7afd3 --- /dev/null +++ b/web/nginx.conf @@ -0,0 +1,23 @@ +server { + listen 80; + server_name _; + root /usr/share/nginx/html; + index index.html; + + location / { + try_files $uri $uri/ /index.html; + } + + location /assets/ { + expires 1y; + add_header Cache-Control "public, immutable"; + } + + location /v1/ { + proxy_pass ${API_URL}; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } +} diff --git a/web/orval.config.ts b/web/orval.config.ts index 272e45dc..91ccac67 100644 --- a/web/orval.config.ts +++ b/web/orval.config.ts @@ -1,22 +1,26 @@ import { defineConfig } from 'orval' export default defineConfig({ - 'virsh-sandbox-api': { + 'fluid-api': { output: { client: 'react-query', mode: 'tags-split', clean: true, prettier: true, - target: 'src/virsh-sandbox', - schemas: 'src/virsh-sandbox/model', + target: 'src/api', + schemas: 'src/api/model', override: { operationName: (operation) => { return operation.operationId || '' }, + mutator: { + path: './src/lib/axios.ts', + name: 'customInstance', + }, }, }, input: { - target: '../virsh-sandbox/docs/openapi.yaml', + target: '../api/docs/openapi.yaml', }, }, }) diff --git a/web/package.json b/web/package.json index 9e341fca..d2c5ce65 100644 --- a/web/package.json +++ b/web/package.json @@ -1,5 +1,5 @@ { - "name": "virsh-sandbox-frontend", + "name": "fluid-web", "private": true, "version": "0.0.0", "type": "module", @@ -17,30 +17,39 @@ "@base-ui/react": "^1.0.0", "@faker-js/faker": "^10.1.0", "@fontsource-variable/jetbrains-mono": "^5.2.8", + "@mdx-js/rollup": "^3.1.1", "@tailwindcss/vite": "^4.1.18", "@tanstack/react-form": "^1.27.6", "@tanstack/react-query": "^5.90.12", "@tanstack/react-router": "^1.143.4", "@tanstack/react-table": "^8.21.3", "@tanstack/router-devtools": "^1.143.4", + "@xterm/addon-fit": "^0.11.0", + "@xterm/xterm": "^6.0.0", "axios": "^1.13.2", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", + "framer-motion": "^12.34.0", "lucide-react": "^0.562.0", "next-themes": "^0.4.6", + "posthog-js": "^1.351.1", "radix-ui": "^1.4.3", "react": "^19.2.0", "react-dom": "^19.2.0", + "react-intersection-observer": "^10.0.2", "shadcn": "^3.6.2", + "shiki": "^3.22.0", "sonner": "^2.0.7", "tailwind-merge": "^3.4.0", "tailwindcss": "^4.1.18", "tw-animate-css": "^1.4.0", + "yaml": "^2.8.2", "zod": "^4.2.1" }, "devDependencies": { "@eslint/js": "^9.39.1", "@tanstack/router-plugin": "^1.143.4", + "@types/mdx": "^2.0.13", "@types/node": "^25.0.3", "@types/react": "^19.2.5", "@types/react-dom": "^19.2.3", @@ -52,6 +61,9 @@ "orval": "^7.17.2", "prettier": "^3.7.4", "prettier-plugin-tailwindcss": "^0.7.2", + "remark-frontmatter": "^5.0.0", + "remark-gfm": "^4.0.1", + "remark-mdx-frontmatter": "^5.2.0", "typescript": "~5.9.3", "typescript-eslint": "^8.46.4", "vite": "npm:rolldown-vite@7.2.5" diff --git a/web/public/favicon.png b/web/public/favicon.png new file mode 100644 index 00000000..99832d47 Binary files /dev/null and b/web/public/favicon.png differ diff --git a/landing-page/public/fonts/NeueMachina-Light.otf b/web/public/fonts/NeueMachina-Light.otf similarity index 100% rename from landing-page/public/fonts/NeueMachina-Light.otf rename to web/public/fonts/NeueMachina-Light.otf diff --git a/landing-page/public/fonts/NeueMachina-Regular.otf b/web/public/fonts/NeueMachina-Regular.otf similarity index 100% rename from landing-page/public/fonts/NeueMachina-Regular.otf rename to web/public/fonts/NeueMachina-Regular.otf diff --git a/landing-page/public/fonts/NeueMachina-Ultrabold.otf b/web/public/fonts/NeueMachina-Ultrabold.otf similarity index 100% rename from landing-page/public/fonts/NeueMachina-Ultrabold.otf rename to web/public/fonts/NeueMachina-Ultrabold.otf diff --git a/edit_mode.png b/web/public/images/edit_mode.png similarity index 100% rename from edit_mode.png rename to web/public/images/edit_mode.png diff --git a/read_only_mode.png b/web/public/images/read_only_mode.png similarity index 100% rename from read_only_mode.png rename to web/public/images/read_only_mode.png diff --git a/landing-page/public/images/skeleton_smoking_cigarette.jpg b/web/public/images/skeleton_smoking_cigarette.jpg similarity index 100% rename from landing-page/public/images/skeleton_smoking_cigarette.jpg rename to web/public/images/skeleton_smoking_cigarette.jpg diff --git a/web/public/install.sh b/web/public/install.sh new file mode 100644 index 00000000..12429a1f --- /dev/null +++ b/web/public/install.sh @@ -0,0 +1,17 @@ +#!/bin/sh +set -e + +echo "Installing Fluid..." + +if ! command -v go >/dev/null 2>&1; then + echo "Error: 'go' is not installed. Please install Go first: https://go.dev/doc/install" + exit 1 +fi + +echo "Running: go install github.com/aspectrr/fluid.sh/fluid/cmd/fluid@latest" +go install github.com/aspectrr/fluid.sh/fluid/cmd/fluid@latest + +echo "" +echo "Fluid installed successfully!" +echo "Ensure that $(go env GOPATH)/bin is in your PATH." +echo "Run 'fluid --help' to get started." diff --git a/web/public/vite.svg b/web/public/vite.svg deleted file mode 100644 index e7b8dfb1..00000000 --- a/web/public/vite.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/web/src/App.css b/web/src/App.css deleted file mode 100644 index b9d355df..00000000 --- a/web/src/App.css +++ /dev/null @@ -1,42 +0,0 @@ -#root { - max-width: 1280px; - margin: 0 auto; - padding: 2rem; - text-align: center; -} - -.logo { - height: 6em; - padding: 1.5em; - will-change: filter; - transition: filter 300ms; -} -.logo:hover { - filter: drop-shadow(0 0 2em #646cffaa); -} -.logo.react:hover { - filter: drop-shadow(0 0 2em #61dafbaa); -} - -@keyframes logo-spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } -} - -@media (prefers-reduced-motion: no-preference) { - a:nth-of-type(2) .logo { - animation: logo-spin infinite 20s linear; - } -} - -.card { - padding: 2em; -} - -.read-the-docs { - color: #888; -} diff --git a/web/src/components/blog/tunneling/PacketEncapsulation.tsx b/web/src/components/blog/tunneling/PacketEncapsulation.tsx new file mode 100644 index 00000000..14300e94 --- /dev/null +++ b/web/src/components/blog/tunneling/PacketEncapsulation.tsx @@ -0,0 +1,305 @@ +import { useState } from 'react' +import { motion, AnimatePresence } from 'framer-motion' +import { ChevronLeft, ChevronRight } from 'lucide-react' + +const REDUCED_MOTION = + typeof window !== 'undefined' && window.matchMedia('(prefers-reduced-motion: reduce)').matches + +const BLUE = '#60a5fa' +const TEXT_MUTED = '#737373' +const BORDER = '#262626' +const CARD_BG = '#171717' + +interface Layer { + id: string + label: string + fill: string + stroke: string + detail: string +} + +const LAYERS: Layer[] = [ + { id: 'data', label: 'Data', fill: '#1e3a5f', stroke: '#60a5fa', detail: '"GET /watch?v=..."' }, + { id: 'tcp', label: 'TCP :443', fill: '#2a1a4f', stroke: '#a855f7', detail: 'src:49152 dst:443' }, + { + id: 'ip', + label: 'IP (inner)', + fill: '#1a2e1a', + stroke: '#4ade80', + detail: 'dst: 142.250.80.46', + }, + { + id: 'encrypted', + label: 'Encrypted', + fill: '#2a1a1a', + stroke: '#f87171', + detail: 'AES-256-GCM ciphertext', + }, + { + id: 'outer-ip', + label: 'Outer IP', + fill: '#2a2000', + stroke: '#fbbf24', + detail: 'dst: 198.51.100.1', + }, +] + +interface Step { + title: string + description: string + visibleLayers: string[] + direction: 'encapsulate' | 'decapsulate' +} + +const STEPS: Step[] = [ + { + title: 'Original Data', + description: + 'You type youtube.com in your browser. This generates an HTTP request - just bytes of data that need to get to YouTube.', + visibleLayers: ['data'], + direction: 'encapsulate', + }, + { + title: 'Add TCP Header', + description: + 'TCP wraps the data with port numbers: your random source port and destination port 443 (HTTPS). This tells the receiving machine which application should handle the data.', + visibleLayers: ['data', 'tcp'], + direction: 'encapsulate', + }, + { + title: 'Add IP Header', + description: + 'IP wraps everything with addresses: your laptop\'s IP and YouTube\'s IP (142.250.80.46). This is the "envelope" that routers read to forward the packet.', + visibleLayers: ['data', 'tcp', 'ip'], + direction: 'encapsulate', + }, + { + title: 'Encrypt the Payload', + description: + 'The VPN client encrypts the entire original packet (data + TCP + IP) into an unreadable ciphertext blob. Anyone intercepting this sees random bytes.', + visibleLayers: ['data', 'tcp', 'ip', 'encrypted'], + direction: 'encapsulate', + }, + { + title: 'Add Outer IP Header', + description: + 'A new IP header wraps the encrypted blob. This outer header points to the VPN server (198.51.100.1) instead of YouTube. This is what the firewall sees.', + visibleLayers: ['data', 'tcp', 'ip', 'encrypted', 'outer-ip'], + direction: 'encapsulate', + }, + { + title: 'Firewall Inspection', + description: + 'The school firewall reads only the outer IP header. It sees traffic going to 198.51.100.1 (just some random IP). Not youtube.com. The encrypted payload is opaque. Packet passes through.', + visibleLayers: ['data', 'tcp', 'ip', 'encrypted', 'outer-ip'], + direction: 'decapsulate', + }, + { + title: 'VPN Server Strips Outer Layer', + description: + 'The VPN server receives the packet, strips the outer IP header, and decrypts the payload. The original packet is fully restored.', + visibleLayers: ['data', 'tcp', 'ip'], + direction: 'decapsulate', + }, + { + title: 'Original Packet Restored', + description: + 'The VPN server now forwards the original packet (dst: youtube.com) to its real destination. YouTube sees a request from the VPN server, not from your school.', + visibleLayers: ['data', 'tcp', 'ip'], + direction: 'decapsulate', + }, +] + +const LAYER_HEIGHT = 36 +const LAYER_GAP = 4 +const BASE_X = 60 +const BASE_WIDTH = 280 + +function getLayerGeometry(totalVisible: number, positionInStack: number) { + // Outermost layer is widest, innermost is narrowest + const nestLevel = totalVisible - 1 - positionInStack + const padding = nestLevel * 20 + const x = BASE_X + padding + const width = BASE_WIDTH - padding * 2 + const y = 20 + positionInStack * (LAYER_HEIGHT + LAYER_GAP) + return { x, y, width } +} + +const duration = REDUCED_MOTION ? 0 : 0.35 + +export function PacketEncapsulation() { + const [step, setStep] = useState(0) + const current = STEPS[step] + + const visibleLayers = current.visibleLayers + .map((id) => LAYERS.find((l) => l.id === id)!) + .filter(Boolean) + + // Reverse so outermost layer renders first (back), innermost renders last (front) + const orderedLayers = [...visibleLayers].reverse() + + const svgHeight = visibleLayers.length * (LAYER_HEIGHT + LAYER_GAP) + 40 + + return ( +
+ {/* Step header */} +
+ + Step {step + 1} / {STEPS.length} + + + {current.title} + +
+ + +
+
+ + {/* SVG layers */} +
+ + + {orderedLayers.map((layer) => { + const positionInStack = visibleLayers.indexOf(layer) + const { x, y, width } = getLayerGeometry(visibleLayers.length, positionInStack) + return ( + + + + {layer.label} + + + {layer.detail} + + + ) + })} + + +
+ + {/* Description */} +
+ {current.description} +
+ + {/* Progress dots */} +
+ {STEPS.map((_, i) => ( +
+
+ ) +} diff --git a/web/src/components/blog/tunneling/PacketJourney.tsx b/web/src/components/blog/tunneling/PacketJourney.tsx new file mode 100644 index 00000000..b74d690a --- /dev/null +++ b/web/src/components/blog/tunneling/PacketJourney.tsx @@ -0,0 +1,452 @@ +import { useRef, useCallback, useState } from 'react' +import { motion, useAnimation } from 'framer-motion' +import { RotateCcw, Play } from 'lucide-react' + +const REDUCED_MOTION = + typeof window !== 'undefined' && window.matchMedia('(prefers-reduced-motion: reduce)').matches + +const BLUE = '#60a5fa' +const TEXT_MUTED = '#737373' +const BORDER = '#262626' +const CARD_BG = '#171717' +const DOT_FILL = '#525252' +const GREEN = '#4ade80' + +// Node positions +const NODES = [ + { x: 20, label: 'Laptop', sublabel: 'your device' }, + { x: 240, label: 'Firewall', sublabel: 'school network' }, + { x: 460, label: 'VPN Server', sublabel: '198.51.100.1' }, + { x: 680, label: 'YouTube', sublabel: '142.250.80.46' }, +] + +const NODE_W = 160 +const NODE_H = 70 +const NODE_Y = 75 +const LINE_Y = NODE_Y + NODE_H / 2 + +function WindowDots({ x, y }: { x: number; y: number }) { + return ( + <> + + + + + ) +} + +export function PacketJourney() { + const controls = useAnimation() + const [hasPlayed, setHasPlayed] = useState(false) + const isAnimatingRef = useRef(false) + + const animate = useCallback(async () => { + if (isAnimatingRef.current) return + isAnimatingRef.current = true + setHasPlayed(true) + + if (REDUCED_MOTION) { + await controls.start('instant') + isAnimatingRef.current = false + return + } + + await controls.start('hidden') + // Phase 1: encrypting at laptop + await controls.start('encrypting') + // Phase 2: dot travels laptop -> firewall + await controls.start('travel1') + // Phase 3: firewall inspects + await controls.start('firewall') + // Phase 4: dot travels firewall -> vpn + await controls.start('travel2') + // Phase 5: decapsulating at vpn + await controls.start('decapsulate') + // Phase 6: green dot vpn -> youtube + await controls.start('travel3') + // Phase 7: youtube responds + await controls.start('done') + + isAnimatingRef.current = false + }, [controls]) + + const replay = () => { + isAnimatingRef.current = false + animate() + } + + // Line endpoints + const line1Start = NODES[0].x + NODE_W + const line1End = NODES[1].x + const line2Start = NODES[1].x + NODE_W + const line2End = NODES[2].x + const line3Start = NODES[2].x + NODE_W + const line3End = NODES[3].x + + return ( +
+ {/* Top-right button */} +
+ {hasPlayed ? ( + + ) : null} +
+ + {/* Send Packet button (centered, shown before first play) */} + {!hasPlayed && ( +
+ +
+ )} + + + {/* Glow filter */} + {!REDUCED_MOTION && ( + + + + + + + + + + )} + + {/* Connection lines (static) */} + + + + + {/* Node boxes */} + {NODES.map((node, i) => ( + + {/* Node highlight for firewall during inspection */} + + + + {node.label} + + + {node.sublabel} + + + ))} + + {/* "encrypting..." label at laptop */} + + encrypting... + + + {/* "reads outer IP" label at firewall */} + + reads outer IP: pass + + + {/* "decapsulating..." label at VPN */} + + decapsulating... + + + {/* "200 OK" label at YouTube */} + + 200 OK + + + {/* Traveling dot - segment 1: laptop -> firewall (blue, encrypted) */} + {!REDUCED_MOTION && ( + + )} + + {/* Traveling dot - segment 2: firewall -> vpn (blue, still encrypted) */} + {!REDUCED_MOTION && ( + + )} + + {/* Traveling dot - segment 3: vpn -> youtube (green, decrypted) */} + {!REDUCED_MOTION && ( + + )} + + {/* Line labels */} + + encrypted tunnel + + + + encrypted tunnel + + + + plain HTTPS + + +
+ ) +} diff --git a/web/src/components/daemon-connection-status.tsx b/web/src/components/daemon-connection-status.tsx new file mode 100644 index 00000000..9f83e676 --- /dev/null +++ b/web/src/components/daemon-connection-status.tsx @@ -0,0 +1,59 @@ +import { useState, useEffect } from 'react' +import { useQuery } from '@tanstack/react-query' +import { axios } from '~/lib/axios' +import { Check } from 'lucide-react' + +const BRAILLE_FRAMES = ['⣾', '⣽', '⣻', 'Ⓙ', '⑿', '⣟', '⣯', '⣷'] + +export function DaemonConnectionStatus({ orgSlug }: { orgSlug: string }) { + const [dismissed, setDismissed] = useState(false) + const [frameIndex, setFrameIndex] = useState(0) + + const { data } = useQuery({ + queryKey: ['hosts', orgSlug], + queryFn: async () => { + const res = await axios.get(`/v1/orgs/${encodeURIComponent(orgSlug)}/hosts`) + return res.data as { hosts: unknown[]; count: number } + }, + refetchInterval: dismissed ? false : 3000, + enabled: !dismissed, + }) + + const hostCount = data?.count ?? 0 + const connected = hostCount > 0 + + // Auto-dismiss 3s after first connection + useEffect(() => { + if (!connected) return + const timer = setTimeout(() => setDismissed(true), 3000) + return () => clearTimeout(timer) + }, [connected]) + + // Braille spinner animation while polling + useEffect(() => { + if (connected || dismissed) return + const interval = setInterval(() => { + setFrameIndex((i) => (i + 1) % BRAILLE_FRAMES.length) + }, 100) + return () => clearInterval(interval) + }, [connected, dismissed]) + + if (dismissed) return null + + return ( +
+ {connected ? ( + + ) : ( + {BRAILLE_FRAMES[frameIndex]} + )} + + {connected ? 'Connected' : 'Waiting for connection...'} + +
+ ) +} diff --git a/web/src/components/docs/api-endpoint-card.tsx b/web/src/components/docs/api-endpoint-card.tsx new file mode 100644 index 00000000..47d26f65 --- /dev/null +++ b/web/src/components/docs/api-endpoint-card.tsx @@ -0,0 +1,209 @@ +import { useState } from 'react' +import { ChevronDown, ChevronRight, Send, Loader2 } from 'lucide-react' +import { cn } from '~/lib/utils' +import { Button } from '~/components/ui/button' +import type { OpenAPIEndpoint } from '~/lib/openapi' +import { axios } from '~/lib/axios' + +const methodColors: Record = { + GET: 'bg-green-400/10 text-green-400 border-green-400/30', + POST: 'bg-blue-400/10 text-blue-400 border-blue-400/30', + PUT: 'bg-amber-400/10 text-amber-400 border-amber-400/30', + PATCH: 'bg-amber-400/10 text-amber-400 border-amber-400/30', + DELETE: 'bg-red-400/10 text-red-400 border-red-400/30', +} + +interface ApiEndpointCardProps { + endpoint: OpenAPIEndpoint +} + +export function ApiEndpointCard({ endpoint }: ApiEndpointCardProps) { + const [expanded, setExpanded] = useState(false) + const [pathParams, setPathParams] = useState>({}) + const [body, setBody] = useState('') + const [response, setResponse] = useState<{ status: number; data: string } | null>(null) + const [error, setError] = useState(null) + const [loading, setLoading] = useState(false) + + const pathParamNames = + endpoint.parameters?.filter((p) => p.in === 'path').map((p) => p.name) || [] + + const hasBody = ['POST', 'PUT', 'PATCH'].includes(endpoint.method) + + const buildPath = () => { + let path = endpoint.path + for (const [key, val] of Object.entries(pathParams)) { + path = path.replace(`{${key}}`, encodeURIComponent(val)) + } + return `/v1${path}` + } + + const sendRequest = async () => { + setLoading(true) + setError(null) + setResponse(null) + + try { + const url = buildPath() + let data: unknown = undefined + if (hasBody && body.trim()) { + try { + data = JSON.parse(body) + } catch { + setError('Invalid JSON body') + setLoading(false) + return + } + } + + const res = await axios({ + method: endpoint.method.toLowerCase() as 'get' | 'post' | 'put' | 'patch' | 'delete', + url, + data, + validateStatus: () => true, + }) + setResponse({ + status: res.status, + data: JSON.stringify(res.data, null, 2), + }) + } catch (err) { + setError( + err instanceof Error && err.message.includes('Network Error') + ? 'Backend offline - start the API server to test endpoints' + : `Request failed: ${err instanceof Error ? err.message : 'Unknown error'}` + ) + } finally { + setLoading(false) + } + } + + // Highlight path params in the path + const renderPath = () => { + const parts = endpoint.path.split(/(\{[^}]+\})/) + return parts.map((part, i) => { + if (part.startsWith('{') && part.endsWith('}')) { + return ( + + {part} + + ) + } + return {part} + }) + } + + return ( +
+ + + {expanded && ( +
+ {endpoint.description && ( +

{endpoint.description}

+ )} + + {/* Path params */} + {pathParamNames.length > 0 && ( +
+ + Path Parameters + +
+ {pathParamNames.map((name) => ( +
+ + setPathParams({ ...pathParams, [name]: e.target.value })} + placeholder={name} + className="border-border w-full border bg-neutral-900 px-2 py-1 text-xs text-neutral-200 focus:border-blue-400 focus:outline-none" + /> +
+ ))} +
+
+ )} + + {/* Request body */} + {hasBody && ( +
+ + Request Body (JSON) + +