diff --git a/README.md b/README.md index 6aff4ccba3..0531446118 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ frdel%2Fagent-zero | Trendshift

-[![Agent Zero Website](https://img.shields.io/badge/Website-agent--zero.ai-0A192F?style=for-the-badge&logo=vercel&logoColor=white)](https://agent-zero.ai) [![Thanks to Sponsors](https://img.shields.io/badge/GitHub%20Sponsors-Thanks%20to%20Sponsors-FF69B4?style=for-the-badge&logo=githubsponsors&logoColor=white)](https://github.com/sponsors/agent0ai) [![Follow on X](https://img.shields.io/badge/X-Follow-000000?style=for-the-badge&logo=x&logoColor=white)](https://x.com/Agent0ai) [![Join our Discord](https://img.shields.io/badge/Discord-Join%20our%20server-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/B8KZKNsPpj) [![Subscribe on YouTube](https://img.shields.io/badge/YouTube-Subscribe-red?style=for-the-badge&logo=youtube&logoColor=white)](https://www.youtube.com/@AgentZeroFW) [![Connect on LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/jan-tomasek/) [![Follow on Warpcast](https://img.shields.io/badge/Warpcast-Follow-5A32F3?style=for-the-badge)](https://warpcast.com/agent-zero) +[![Agent Zero Website](https://img.shields.io/badge/Website-agent--zero.ai-0A192F?style=for-the-badge&logo=vercel&logoColor=white)](https://agent-zero.ai) [![Thanks to Sponsors](https://img.shields.io/badge/GitHub%20Sponsors-Thanks%20to%20Sponsors-FF69B4?style=for-the-badge&logo=githubsponsors&logoColor=white)](https://github.com/sponsors/agent0ai) [![Follow on X](https://img.shields.io/badge/X-Follow-000000?style=for-the-badge&logo=x&logoColor=white)](https://x.com/Agent0ai) [![Join our Discord](https://img.shields.io/badge/Discord-Join%20our%20server-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/B8KZKNsPpj) [![Subscribe on YouTube](https://img.shields.io/badge/YouTube-Subscribe-red?style=for-the-badge&logo=youtube&logoColor=white)](https://www.youtube.com/@AgentZeroFW) [![Connect on LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/jan-tomasek/) [![Follow on Warpcast](https://img.shields.io/badge/Warpcast-Follow-5A32F3?style=for-the-badge)](https://warpcast.com/agent-zero) ## Documentation: @@ -14,7 +14,7 @@ [Introduction](#a-personal-organic-agentic-framework-that-grows-and-learns-with-you) • [Installation](./docs/installation.md) • [Development](./docs/development.md) • -[Extensibility](./docs/extensibility.md) • +[WebSocket Infrastructure](./docs/websocket-infrastructure.md) • [Connectivity](./docs/connectivity.md) • [How to update](./docs/installation.md#how-to-update-agent-zero) • [Documentation](./docs/README.md) • @@ -158,6 +158,7 @@ docker run -p 50001:80 agent0ai/agent-zero | [Installation](./docs/installation.md) | Installation, setup and configuration | | [Usage](./docs/usage.md) | Basic and advanced usage | | [Development](./docs/development.md) | Development and customization | +| [WebSocket Infrastructure](./docs/websocket-infrastructure.md) | Real-time WebSocket handlers, client APIs, filtering semantics, envelopes | | [Extensibility](./docs/extensibility.md) | Extending Agent Zero | | [Connectivity](./docs/connectivity.md) | External API endpoints, MCP server connections, A2A protocol | | [Architecture](./docs/architecture.md) | System design and components | @@ -265,7 +266,7 @@ docker run -p 50001:80 agent0ai/agent-zero - More space efficient on mobile - Streamable HTTP MCP servers support - LLM API URL added to models config for Azure, local and custom providers - + ### v0.9.0 - Agent roles, backup/restore [Release video](https://www.youtube.com/watch?v=rMIe-TC6H-k) diff --git a/agent.py b/agent.py index c5c79a55d2..f7aa2f245d 100644 --- a/agent.py +++ b/agent.py @@ -1,4 +1,4 @@ -import asyncio, random, string +import asyncio, random, string, threading import nest_asyncio nest_asyncio.apply() @@ -46,6 +46,7 @@ class AgentContextType(Enum): class AgentContext: _contexts: dict[str, "AgentContext"] = {} + _contexts_lock = threading.RLock() _counter: int = 0 _notification_manager = None @@ -67,10 +68,14 @@ def __init__( ): # initialize context self.id = id or AgentContext.generate_id() - existing = self._contexts.get(self.id, None) - if existing: - AgentContext.remove(self.id) - self._contexts[self.id] = self + existing = None + with AgentContext._contexts_lock: + existing = AgentContext._contexts.get(self.id, None) + if existing: + AgentContext._contexts.pop(self.id, None) + AgentContext._contexts[self.id] = self + if existing and existing.task: + existing.task.kill() if set_current: AgentContext.set_current(self.id) @@ -95,7 +100,8 @@ def __init__( @staticmethod def get(id: str): - return AgentContext._contexts.get(id, None) + with AgentContext._contexts_lock: + return AgentContext._contexts.get(id, None) @staticmethod def use(id: str): @@ -119,13 +125,15 @@ def set_current(ctxid: str): @staticmethod def first(): - if not AgentContext._contexts: - return None - return list(AgentContext._contexts.values())[0] + with AgentContext._contexts_lock: + if not AgentContext._contexts: + return None + return list(AgentContext._contexts.values())[0] @staticmethod def all(): - return list(AgentContext._contexts.values()) + with AgentContext._contexts_lock: + return list(AgentContext._contexts.values()) @staticmethod def generate_id(): @@ -134,8 +142,9 @@ def generate_short_id(): while True: short_id = generate_short_id() - if short_id not in AgentContext._contexts: - return short_id + with AgentContext._contexts_lock: + if short_id not in AgentContext._contexts: + return short_id @classmethod def get_notification_manager(cls): @@ -147,7 +156,8 @@ def get_notification_manager(cls): @staticmethod def remove(id: str): - context = AgentContext._contexts.pop(id, None) + with AgentContext._contexts_lock: + context = AgentContext._contexts.pop(id, None) if context and context.task: context.task.kill() return context diff --git a/docs/README.md b/docs/README.md index b522ed0c61..70854e542e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -6,6 +6,7 @@ To begin with Agent Zero, follow the links below for detailed guides on various - **[Usage Guide](usage.md):** Explore GUI features and usage scenarios. - **[Development](development.md):** Set up a development environment for Agent Zero. - **[Extensibility](extensibility.md):** Learn how to create custom extensions for Agent Zero. +- **[WebSocket Infrastructure](websocket-infrastructure.md):** Build real-time features with bidirectional handlers and client APIs. - **[Connectivity](connectivity.md):** Learn how to connect to Agent Zero from other applications. - **[Architecture Overview](architecture.md):** Understand the internal workings of the framework. - **[Contributing](contribution.md):** Learn how to contribute to the Agent Zero project. @@ -58,7 +59,8 @@ To begin with Agent Zero, follow the links below for detailed guides on various - [Knowledge](architecture.md#5-knowledge) - [Instruments](architecture.md#6-instruments) - [Extensions](architecture.md#7-extensions) - - [Contributing](contribution.md) +- [WebSocket Infrastructure](websocket-infrastructure.md) +- [Development](development.md) - [Getting Started](contribution.md#getting-started) - [Making Changes](contribution.md#making-changes) - [Submitting a Pull Request](contribution.md#submitting-a-pull-request) diff --git a/docs/contribution.md b/docs/contribution.md index 498577eb3e..3ee4d73bc2 100644 --- a/docs/contribution.md +++ b/docs/contribution.md @@ -6,6 +6,7 @@ Contributions to improve Agent Zero are very welcome! This guide outlines how t - See [development](development.md) for instructions on how to set up a development environment. - See [extensibility](extensibility.md) for instructions on how to create custom extensions. +- See [websocket infrastructure](websocket-infrastructure.md) for guidance on building real-time handlers and client integrations. 1. **Fork the Repository:** Fork the Agent Zero repository on GitHub. 2. **Clone Your Fork:** Clone your forked repository to your local machine. @@ -27,4 +28,4 @@ Contributions to improve Agent Zero are very welcome! This guide outlines how t ## Documentation Stack -- The documentation is built using Markdown. We appreciate your contributions even if you don't know Markdown, and look forward to improve Agent Zero for everyone's benefit. \ No newline at end of file +- The documentation is built using Markdown. We appreciate your contributions even if you don't know Markdown, and look forward to improve Agent Zero for everyone's benefit. diff --git a/docs/development.md b/docs/development.md index 9faa1805bd..93344c0e16 100644 --- a/docs/development.md +++ b/docs/development.md @@ -68,7 +68,7 @@ Now when you select one of the python files in the project, you should see prope ```bash pip install -r requirements.txt playwright install chromium -``` +``` These will install all the python packages and browser binaries for playwright (browser agent). Errors in the code editor caused by missing packages should now be gone. If not, try reloading the window. @@ -81,7 +81,9 @@ It will not be able to do code execution and few other features requiring the Do ![VS Code debugging](res/dev/devinst-6.png) -The framework will run at the default port 5000. If you open `http://localhost:5000` in your browser and see `ERR_EMPTY_RESPONSE`, don't panic, you may need to select another port like I did for some reason. If you need to change the defaut port, you can add `"--port=5555"` to the args in the `.vscode/launch.json` file or you can create a `.env` file in the root directory and set the `WEB_UI_PORT` variable to the desired port. +The framework will run at the default port 5000. If you open `http://localhost:5000` in your browser and see `ERR_EMPTY_RESPONSE`, don't panic, you may need to select another port like I did for some reason. If you need to change the default port, you can add `"--port=5555"` to the args in the `.vscode/launch.json` file or you can create a `.env` file in the root directory and set the `WEB_UI_PORT` variable to the desired port. + +You can also set the bind host via `"--host=0.0.0.0"` (or `WEB_UI_HOST=0.0.0.0`). It may take a while the first time. You should see output like the screenshot below. The RFC error is ok for now as we did not yet connect our local development to another instance in docker. ![First run](res/dev/devinst-7.png) @@ -147,6 +149,7 @@ You're now ready to contribute to Agent Zero, create custom extensions, or modif ## Next steps - See [extensibility](extensibility.md) for instructions on how to create custom extensions. +- See [websocket infrastructure](websocket-infrastructure.md) for real-time handler patterns, client APIs, and troubleshooting tips. - See [contribution](contribution.md) for instructions on how to contribute to the framework. ## Configuration via Environment Variables @@ -167,4 +170,4 @@ These environment variables automatically override the hardcoded defaults in `ge - You can use the `DockerfileLocal` to build your docker image. - Navigate to your project root in the terminal and run `docker build -f DockerfileLocal -t agent-zero-local --build-arg CACHE_DATE=$(date +%Y-%m-%d:%H:%M:%S) .` - The `CACHE_DATE` argument is optional, it is used to cache most of the build process and only rebuild the last steps when the files or dependencies change. -- See `docker/run/build.txt` for more build command examples. \ No newline at end of file +- See `docker/run/build.txt` for more build command examples. diff --git a/docs/installation.md b/docs/installation.md index c611b1b798..1611250304 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -10,7 +10,7 @@ The following user guide provides instructions for installing and running Agent ## Windows, macOS and Linux Setup Guide -1. **Install Docker Desktop:** +1. **Install Docker Desktop:** - Docker Desktop provides the runtime environment for Agent Zero, ensuring consistent behavior and security across platforms - The entire framework runs within a Docker container, providing isolation and easy deployment - Available as a user-friendly GUI application for all major operating systems @@ -23,8 +23,8 @@ The following user guide provides instructions for installing and running Agent

> [!NOTE] -> **Linux Users:** You can install either Docker Desktop or docker-ce (Community Edition). -> For Docker Desktop, follow the instructions for your specific Linux distribution [here](https://docs.docker.com/desktop/install/linux-install/). +> **Linux Users:** You can install either Docker Desktop or docker-ce (Community Edition). +> For Docker Desktop, follow the instructions for your specific Linux distribution [here](https://docs.docker.com/desktop/install/linux-install/). > For docker-ce, follow the instructions [here](https://docs.docker.com/engine/install/). > > If you're using docker-ce, you'll need to add your user to the `docker` group: @@ -44,14 +44,14 @@ The following user guide provides instructions for installing and running Agent docker install

-1.4. Once installed, launch Docker Desktop: +1.4. Once installed, launch Docker Desktop: docker installed docker installed

> [!NOTE] -> **MacOS Configuration:** In Docker Desktop's preferences (Docker menu) → Settings → +> **MacOS Configuration:** In Docker Desktop's preferences (Docker menu) → Settings → > Advanced, enable "Allow the default Docker socket to be used (requires password)." ![docker socket macOS](res/setup/macsocket.png) @@ -189,8 +189,8 @@ Optionally you can map local folders for file persistence: > You can also access the Web UI by clicking the ports right under the container ID in Docker Desktop. > [!NOTE] -> After starting the container, you'll find all Agent Zero files in your chosen -> directory. You can access and edit these files directly on your machine, and +> After starting the container, you'll find all Agent Zero files in your chosen +> directory. You can access and edit these files directly on your machine, and > the changes will be immediately reflected in the running container. 3. Configure Agent Zero @@ -306,7 +306,7 @@ ollama pull 2. A CLI message should confirm the model download on your system #### Selecting your model within Agent Zero -1. Once you've downloaded your model(s), you must select it in the Settings page of the GUI. +1. Once you've downloaded your model(s), you must select it in the Settings page of the GUI. 2. Within the Chat model, Utility model, or Embedding model section, choose Ollama as provider. @@ -321,7 +321,7 @@ ollama pull #### Managing your downloaded models Once you've downloaded some models, you might want to check which ones you have available or remove any you no longer need. -- **Listing downloaded models:** +- **Listing downloaded models:** To see a list of all the models you've downloaded, use the command: ``` ollama list @@ -356,8 +356,10 @@ Agent Zero's Web UI is accessible from any device on your network through the Do > - The port is automatically assigned by Docker unless you specify one > [!NOTE] -> If you're running Agent Zero directly on your system (legacy approach) instead of -> using Docker, you'll need to configure the host manually in `run_ui.py` to run on all interfaces using `host="0.0.0.0"`. +> If you're running Agent Zero directly on your system (legacy approach) instead of +> using Docker, configure the bind address/ports via flags or environment variables: +> - Use `--host 0.0.0.0` (or set `WEB_UI_HOST=0.0.0.0` in `.env`) to listen on all interfaces. +> - Use `--port ` (or `WEB_UI_PORT`) to pick the HTTP port. For developers or users who need to run Agent Zero directly on their system,see the [In-Depth Guide for Full Binaries Installation](#in-depth-guide-for-full-binaries-installation). @@ -418,9 +420,8 @@ For developers or users who need to run Agent Zero directly on their system,see > docker run -p $PORT:80 -v /path/to/your/data:/a0 agent0ai/agent-zero > ``` - + ### Conclusion -After following the instructions for your specific operating system, you should have Agent Zero successfully installed and running. You can now start exploring the framework's capabilities and experimenting with creating your own intelligent agents. +After following the instructions for your specific operating system, you should have Agent Zero successfully installed and running. You can now start exploring the framework's capabilities and experimenting with creating your own intelligent agents. If you encounter any issues during the installation process, please consult the [Troubleshooting section](troubleshooting.md) of this documentation or refer to the Agent Zero [Skool](https://www.skool.com/agent-zero) or [Discord](https://discord.gg/B8KZKNsPpj) community for assistance. - diff --git a/docs/quickstart.md b/docs/quickstart.md index 437cc9b65d..91b4cb0e67 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -4,22 +4,25 @@ This guide provides a quick introduction to using Agent Zero. We'll cover launch ## Launching the Web UI 1. Make sure you have Agent Zero installed and your environment set up correctly (refer to the [Installation guide](installation.md) if needed). 2. Open a terminal in the Agent Zero directory and activate your conda environment (if you're using one). -3. Run the following command: +3. Run one of the following commands: ```bash python run_ui.py ``` -4. A message similar to this will appear in your terminal, indicating the Web UI is running: +Notes: +- HTTP binds to `--host/--port` (or `WEB_UI_HOST`/`WEB_UI_PORT`, default port 5000). + +4. A message similar to this will appear in your terminal, indicating the Web UI is running: ![](res/flask_link.png) -5. Open your web browser and navigate to the URL shown in the terminal (usually `http://127.0.0.1:50001`). You should see the Agent Zero Web UI. +5. Open your web browser and navigate to the URL shown in the terminal (usually `http://127.0.0.1:5000`). You should see the Agent Zero Web UI. ![New Chat](res/ui_newchat1.png) > [!TIP] -> As you can see, the Web UI has four distinct buttons for easy chat management: +> As you can see, the Web UI has four distinct buttons for easy chat management: > `New Chat`, `Reset Chat`, `Save Chat`, and `Load Chat`. > Chats can be saved and loaded individually in `json` format and are stored in the > `/tmp/chats` directory. @@ -49,6 +52,6 @@ Now that you've run a simple task, you can experiment with more complex requests * Create or modify files > [!TIP] -> The [Usage Guide](usage.md) provides more in-depth information on using Agent -> Zero's various features, including prompt engineering, tool usage, and multi-agent -> cooperation. \ No newline at end of file +> The [Usage Guide](usage.md) provides more in-depth information on using Agent +> Zero's various features, including prompt engineering, tool usage, and multi-agent +> cooperation. diff --git a/docs/usage.md b/docs/usage.md index 045d9095a8..1e1f16c868 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -102,6 +102,9 @@ Agent Zero's power comes from its ability to use [tools](architecture.md#tools). - **Understand Tools:** Agent Zero includes default tools like knowledge (powered by SearXNG), code execution, and communication. Understand the capabilities of these tools and how to invoke them. +### Real-Time WebSocket Features +- Use WebSockets when you need bidirectional, low-latency updates. The [WebSocket Infrastructure guide](websocket-infrastructure.md) explains the backend handler framework, client API, filtering semantics, and common producer/consumer patterns. + ## Example of Tools Usage: Web Search and Code Execution Let's say you want Agent Zero to perform some financial analysis tasks. Here's a possible prompt: diff --git a/docs/websocket-infrastructure.md b/docs/websocket-infrastructure.md new file mode 100644 index 0000000000..24a11ae95b --- /dev/null +++ b/docs/websocket-infrastructure.md @@ -0,0 +1,731 @@ +# WebSocket Infrastructure Guide + +**Audience**: Backend and frontend developers building real-time features on Agent Zero +**Updated**: 2026-01-02 +**Related Specs**: `specs/003-websocket-event-handlers/*` + +This guide consolidates everything you need to design, implement, and troubleshoot Agent Zero WebSocket flows. It complements the feature specification by describing day-to-day developer tasks, showing how backend handlers and frontend clients cooperate, and documenting practical patterns for producers and consumers on both sides of the connection. + +--- + +## Table of Contents + +1. [Architecture at a Glance](#architecture-at-a-glance) +2. [Terminology & Metadata](#terminology--metadata) +3. [Connection Lifecycle](#connection-lifecycle) +4. [Backend Cookbook (Handlers & Manager)](#backend-cookbook-handlers--manager) +5. [Frontend Cookbook (websocket.js)](#frontend-cookbook-websocketjs) +6. [Producer & Consumer Patterns](#producer--consumer-patterns) +7. [Metadata Flow & Envelopes](#metadata-flow--envelopes) +8. [Diagnostics, Harness & Logging](#diagnostics-harness--logging) +9. [Best Practices Checklist](#best-practices-checklist) +10. [Quick Reference Tables](#quick-reference-tables) +11. [Further Reading](#further-reading) + +--- + +## Architecture at a Glance + +- **Runtime (`run_ui.py`)** – boots `python-socketio.AsyncServer` inside an ASGI stack served by Uvicorn. Flask routes are mounted via `uvicorn.middleware.wsgi.WSGIMiddleware`, and Flask + Socket.IO share the same process so session cookies and CSRF semantics stay aligned. +- **Singleton handlers** – every `WebSocketHandler` subclass exposes `get_instance()` and is registered exactly once. Direct instantiation raises `SingletonInstantiationError`, keeping shared state and lifecycle hooks deterministic. +- **Dispatcher offload** – handler entrypoints (`process_event`, `on_connect`, `on_disconnect`) run in a background worker loop (via `DeferredTask`) so blocking handlers cannot stall the Socket.IO transport. Socket.IO emits/disconnects are marshalled back to the dispatcher loop. Diagnostic timing and payload summaries are only built when Event Console watchers are subscribed (development mode). +- **`python/helpers/websocket_manager.py`** – orchestrates routing, buffering, aggregation, metadata envelopes, and session tracking. Think of it as the “switchboard” for every WebSocket event. +- **`python/helpers/websocket.py`** – base class for application handlers. Provides lifecycle hooks, helper methods (`emit_to`, `broadcast`, `request`, `request_all`) and identifier metadata. +- **`webui/js/websocket.js`** – frontend singleton exposing a minimal client API (`emit`, `request`, `on`, `off`) with lazy connection management and development-only logging (no client-side `broadcast()` or `requestAll()` helpers). +- **Developer Harness (`webui/components/settings/developer/websocket-test-store.js`)** – manual and automatic validation suite for emit/request flows, timeout behaviour (including the default unlimited wait), correlation ID propagation, envelope metadata, subscription persistence across reconnect, and development-mode diagnostics. +- **Specs & Contracts** – canonical definitions live under `specs/003-websocket-event-handlers/`. This guide references those documents but focuses on applied usage. + +--- + +## Terminology & Metadata + +| Term | Where it Appears | Meaning | +|------|------------------|---------| +| `sid` | Socket.IO | Connection identifier for a Socket.IO namespace connection. With only the root namespace (`/`), each tab has one `sid`. When connecting to multiple namespaces, a tab has one `sid` per namespace. Treat connection identity as `(namespace, sid)`. | +| `handlerId` | Manager Envelope | Fully-qualified Python class name (e.g., `python.websocket_handlers.notifications.NotificationHandler`). Used for result aggregation and logging. | +| `eventId` | Manager Envelope | UUIDv4 generated for every server→client delivery. Unique per emission. Useful when correlating broadcast fan-out or diagnosing duplicates. | +| `correlationId` | Bidirectional flows | Thread that ties together request, response, and any follow-up events. Client may supply one; otherwise the manager generates and echoes it everywhere. | +| `data` | Envelope payload | Application payload you define. Always a JSON-serialisable object. | +| `user_to_sids` / `sid_to_user` | Manager session tracking | Single-user map today (`allUsers` bucket). Future-proof for multi-tenant routing but already handy when you need all active SIDs. | +| Buffer | Manager | Up to 100 fire-and-forget events stored per temporarily disconnected SID (expires after 1 hour). Request/response events never buffer—clients receive standardised errors instead. | + +Useful mental model: **client ↔ manager ↔ handler**. The manager normalises metadata and enforces routing; handlers focus on business logic; the frontend uses the same identifiers, so logs are easy to stitch. + +--- + +## Connection Lifecycle + +1. **Lazy Connect** – `/js/websocket.js` connects only when a consumer uses the client API (e.g., `emit`, `request`, `on`). Consumers may still explicitly `await websocket.connect()` to block UI until the socket is ready. +2. **Handshake** – Socket.IO connects using the existing Flask session cookie and a CSRF token provided via the Socket.IO `auth` payload (`csrf_token`). The token is obtained from `GET /csrf_token` (see `/js/api.js#getCsrfToken()`), which also sets the runtime-scoped cookie `csrf_token_{runtime_id}`. The server validates an **Origin allowlist** (RFC 6455 / OWASP CSWSH baseline) and then checks handler requirements (`requires_auth`, `requires_csrf`) before accepting. +3. **Lifecycle Hooks** – After acceptance, `WebSocketHandler.on_connect(sid)` fires for every registered handler. Use it for initial emits, state bookkeeping, or session tracking. +4. **Normal Operation** – Client emits events. Manager routes them to the appropriate handlers, gathers results, and wraps outbound deliveries in the mandatory envelope. +5. **Disconnection & Buffering** – If a tab goes away without a graceful disconnect, fire-and-forget events accumulate (max 100). On reconnect, the manager flushes the buffer via `emit_to`. Request flows respond with explicit `CONNECTION_NOT_FOUND` errors. +6. **Reconnection Attempts** – Socket.IO handles reconnect attempts; the manager continues to buffer fire-and-forget events (up to 1 hour) for temporarily disconnected SIDs and flushes them on reconnect. + +### State Sync (Replacing `/poll`) + +Agent Zero can also push poll-shaped state snapshots over the WebSocket bus, replacing the legacy 4Hz `/poll` loop while preserving the existing UI update contract. + +- **Handshake**: the frontend sync store (`/components/sync/sync-store.js`) calls `websocket.request("state_request", { context, log_from, notifications_from, timezone })` to establish per-tab cursors and a `seq_base`. +- **Push**: the server emits `state_push` events containing `{ runtime_epoch, seq, snapshot }`, where `snapshot` is exactly the `/poll` payload shape built by `python/helpers/state_snapshot.py`. +- **Coalescing**: the backend `StateMonitor` coalesces dirties per SID (25ms window) so streaming updates stay smooth without unbounded trailing-edge debounce. +- **Degraded fallback**: if the WebSocket handshake/push path is unhealthy, the UI enters `DEGRADED` and uses `/poll` as a fallback; while degraded, push snapshots are ignored to avoid racey double-writes. + +### Thinking in Roles + +- **Client** (frontend) is the page that imports `/js/websocket.js`. It acts as both a **producer** (calling `emit`, `request`) and a **consumer** (subscribing with `on`). +- **Manager** (`WebSocketManager`) sits server-side and routes everything. It resolves correlation IDs, wraps envelopes, and fans out results. +- **Handler** (`WebSocketHandler`) executes the application logic. Each handler may emit additional events back to the client or initiate its own requests to connected SIDs. + +### Flow Overview (by Operation) + +``` +Client emit() ───▶ Manager route_event() ───▶ Handler.process_event() + │ │ └──(fire-and-forget, no ack) + └── throws if └── validates payload + routes by namespace/event type + not connected updates last_activity + +Client request() ─▶ Manager route_event() ─▶ Handlers (async gather) + │ │ └── per-handler dict/None + │ │ + │ └── builds {correlationId, results[]} + └── Promise resolves with aggregated results (timeouts become error items) + +Server emit_to() ──▶ Manager.emit_to() ──▶ Socket.IO delivery/buffer + │ │ └── envelope {handlerId,…} + └── raises ConnectionNotFoundError for unknown sid (never seen) + +Server broadcast() ─▶ Manager.broadcast() + │ └── iterates active sids (respecting exclude_sids) + │ └── delegates to `Manager.emit_to()` → `socketio.emit(..., to=sid)` + └── fire-and-forget (no ack) + +Server request() ─▶ Manager.request_for_sid() ─▶ route_event() + │ │ └── per-handler responses + └── Await aggregated {correlationId, results[]} + +Server request_all() ─▶ Manager.route_event_all() ─▶ route_event per sid + │ │ └── per-handler results + └── Await list[{sid, correlationId, results[]}] +``` + +These diagrams highlight the “who calls what” surface while the detailed semantics (envelopes, buffering, timeouts) remain consistent with the tables later in this guide. + +### End-to-End Examples + +1. **Client request ➜ multiple handlers** + + 1. Frontend calls `websocket.request("refresh_metrics", payload)`. + 2. Manager routes to each handler registered for that event type and awaits `asyncio.gather`. + 3. Each handler returns a dict (or raises); the manager wraps them in `results[]` and resolves the Promise with `{ correlationId, results }`. + 4. The caller inspects per-handler data or errors, filtering by `handlerId` as needed. + +2. **Server broadcast with buffered replay** + + 1. Handler invokes `self.broadcast("notification_broadcast", data, exclude_sids=sid)`. + 2. Manager iterates active connections. For connected SIDs it emits immediately with the mandatory envelope. For temporarily disconnected SIDs it enqueues into the per-SID buffer (up to 100 events). + 3. When a buffered SID reconnects, `_flush_buffer()` replays the queued envelopes preserving `handlerId`, `eventId`, `correlationId`, and `ts`. + +3. **Server request_all ➜ client-side confirmations** + + 1. Handler issues `await self.request_all("confirm_close", { contextId }, timeout_ms=5000)`. + 2. Manager fans out to every active SID, allowing `exclude_handlers` when provided. + 3. Each subscribed client runs its `websocket.on("confirm_close", …)` callback and returns data through the Socket.IO acknowledgement. + 4. The handler receives `[{ sid, correlationId, results[] }]`, inspects each response, and proceeds accordingly. + +These expanded flows complement the operation matrix later in the guide, ensuring every combination (client/server × emit/request and server request_all) is covered explicitly. + +--- + +## Backend Cookbook (Handlers & Manager) + +### 1. Handler Discovery & Setup + +Handlers are discovered deterministically from `python/websocket_handlers/`: + +- **File entry**: `python/websocket_handlers/state_sync_handler.py` → namespace `/state_sync` +- **Folder entry**: `python/websocket_handlers/orders/` or `python/websocket_handlers/orders_handler/` → namespace `/orders` (loads `*.py` one level deep; ignores `__init__.py` and deeper nesting) +- **Reserved root**: `python/websocket_handlers/_default.py` → namespace `/` (diagnostics-only by default) + +Create handler modules under the appropriate namespace entry and inherit from `WebSocketHandler`. + +```python +from python.helpers.websocket import WebSocketHandler + +class DashboardHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["dashboard_refresh", "dashboard_push"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str) -> dict | None: + if event_type == "dashboard_refresh": + stats = await self._load_stats(data.get("scope", "all")) + return {"ok": True, "stats": stats} + + if event_type == "dashboard_push": + await self.broadcast( + "dashboard_update", + {"stats": data.get("stats", {}), "source": sid}, + exclude_sids=sid, + ) + return None +``` + +Handlers are auto-loaded on startup; duplicate event declarations produce warnings but are supported. Use `validate_event_types` to ensure names follow lowercase snake_case and avoid Socket.IO reserved events. + +### 2. Consuming Client Events (Server as Consumer) + +- Implement `process_event` and return either `None` (fire-and-forget) or a dict that becomes the handler’s contribution in `results[]`. +- Use dependency injection (async functions, database calls, etc.) but keep event loop friendly—no blocking calls. +- Validate input vigorously and return structured errors as needed. + +```python +async def process_event(self, event_type: str, data: dict, sid: str) -> dict | None: + if "query" not in data: + return {"ok": False, "error": {"code": "VALIDATION", "error": "Missing query"}} + + rows = await self.search_backend(data["query"], limit=data.get("limit", 25)) + return {"ok": True, "data": rows, "count": len(rows)} +``` + +### 3. Producing Server Events (Server as Producer) + +Four helper methods mirror the frontend API. The table below summarises them (full table in [Quick Reference](#quick-reference-tables)). + +| Method | Target | Ack | Filters | Typical Use | +|--------|--------|-----|---------|--------------| +| `emit_to(sid, event, data, correlation_id=None)` | Single SID | No | None | Push job progress, reply to a request without using Socket.IO ack (already produced). | +| `broadcast(event, data, exclude_sids=None, correlation_id=None)` | All SIDs | No | `exclude_sids` only | Fan-out notifications, multi-tab sync while skipping the caller. | +| `request(sid, event, data, timeout_ms=0)` | Single SID | Yes (`results[]`) | None | Ask the client to run local logic (e.g., UI confirmation) and gather per-handler results. | +| `request_all(event, data, timeout_ms=0)` | All SIDs | Yes (`[{sid, results[]}]`) | None | Fan-out to every tab, e.g., “refresh your panel” or “confirm unsaved changes”. | + +Each helper automatically injects `handlerId`, obeys metadata envelopes, enforces routing rules, and handles timeouts: + +```python +aggregated = await self.request_all( + "workspace_ping", + {"payload": {"reason": "health_check"}}, + timeout_ms=2_000, +) + +for entry in aggregated: + self.log.info("sid %s replied: %s", entry["sid"], entry["results"]) +``` + +Timeouts convert into `{ "ok": False, "error": {"code": "TIMEOUT", ...} }`; they do **not** raise. + +### 4. Multi-Handler Aggregation + +- When multiple handlers subscribe to the same event, the manager invokes them concurrently with `asyncio.gather`. Aggregated results preserve registration order. Use correlation IDs to map responses to original triggers. +- Client-side handler include/exclude filters are intentionally not supported. Consumers filter `results[]` by `handlerId` when needed. + +```python +if not results: + return { + "handlerId": self.identifier, + "ok": False, + "error": {"code": "NO_HANDLERS", "error": "No handler registered for this event type"}, + } +``` + +### 5. Session Tracking Helpers + +`WebSocketManager` maintains lightweight mappings that you can use from handlers: + +```python +all_sids = self.manager.get_sids_for_user() # today: every active sid +maybe_user = self.manager.get_user_for_sid(sid) # currently None or "single_user" + +if updated_payload: + await asyncio.gather( + *[ + self.emit_to(other_sid, "dashboard_update", updated_payload) + for other_sid in all_sids if other_sid != sid + ] + ) +``` + +These helpers are future-proof for multi-tenant evolution and already handy to broadcast to every tab except the caller. + +**Future Multitenancy Mechanics** +- **Registration**: When multi-user support ships, `handle_connect` will resolve the authenticated user identifier (e.g., from Flask session). `register()` will stash that identifier alongside the SID and place it into `user_to_sids[user_id]` while still populating the `allUsers` bucket for backward compatibility. +- **Lookups**: `get_sids_for_user(user_id)` will return the tenant-specific SID set. Omitting the argument (or passing `None`) keeps today’s behaviour and yields the full `allUsers` list. `get_user_for_sid(sid)` will expose whichever identifier was recorded at registration. +- **Utility**: These primitives unlock future features such as sending workspace notifications to every tab owned by the same account, ejecting all sessions for a suspended user, or correlating request/response traffic per tenant without rewriting handlers. +- **Migration Story**: Existing handler code that loops over `get_sids_for_user()` automatically gains tenant-scoped behaviour once callers pass a `user_id`. Tests will exercise both single-user (default) and multi-tenant branches to guarantee compatibility. + +--- + +## Frontend Cookbook (`websocket.js`) + +### 1. Connecting + +```javascript +import { getNamespacedClient } from "/js/websocket.js"; + +const websocket = getNamespacedClient("/"); // reserved root (diagnostics-only by default) + +// Optional: await the handshake if you need to block UI until the socket is ready +await websocket.connect(); + +// Runtime metadata is exposed globally for Alpine stores / harness +console.log(window.runtimeInfo.id, window.runtimeInfo.isDevelopment); +``` + +- The module connects lazily when a consumer uses the client API (e.g., `emit`, `request`, `on`). Components may still explicitly `await websocket.connect()` to block rendering on readiness or re-run diagnostics. +- The server enforces an Origin allowlist during the Socket.IO connect handshake (baseline CSWSH mitigation). The browser session cookie remains the authentication mechanism, and CSRF is validated via the Socket.IO `auth` payload (`csrf_token`) plus the runtime-scoped CSRF cookie and session value. +- Socket.IO handles reconnection attempts automatically. + +### Namespaces (end-state) + +- The root namespace (`/`) is reserved and intentionally unhandled by default for application events. Feature code should connect to an explicit namespace (for example `/state_sync`). +- The frontend exposes `createNamespacedClient(namespace)` and `getNamespacedClient(namespace)` (one client instance per namespace per tab). Namespaced clients expose the same minimal API: `emit`, `request`, `on`, `off`. +- Unknown namespaces are rejected deterministically during the Socket.IO connect handshake with a `connect_error` payload: + - `err.message === "UNKNOWN_NAMESPACE"` + - `err.data === { code: "UNKNOWN_NAMESPACE", namespace: "/requested" }` + +### 2. Client Operations + +- **Producers (client → server)** use `emit` and `request`. Payloads must be objects; primitive payloads throw. +- **Consumers (server → client)** register callbacks with `on(eventType, callback)` and remove them with `off()`. + +Example (producer): + +```javascript +await websocket.request("hello_request", { name: this.name }, { + timeoutMs: 1500, + correlationId: `greet-${crypto.randomUUID()}`, +}); +``` + +Example (consumer): + +```javascript +websocket.on("dashboard_update", (envelope) => { + const { handlerId, correlationId, ts, data } = envelope; + this.debugLog({ handlerId, correlationId, ts }); + this.rows = data.rows; +}); + +// Later, during cleanup +websocket.off("dashboard_update"); +``` + +### 3. Envelope Awareness + +Subscribers always receive: + +```javascript +interface ServerDeliveryEnvelope { + handlerId: string; + eventId: string; + correlationId: string; + ts: string; // ISO8601 UTC with millisecond precision + data: object; +} +``` + +Even if existing components only look at `data`, you should record `handlerId` and `correlationId` when building new features—doing so simplifies debugging multi-tab flows. + +### 4. Development-Only Logging + +`websocket.debugLog()` writes to the console only when `runtimeInfo.isDevelopment` is true. Use it liberally when diagnosing event flows without polluting production logs. + +```javascript +websocket.debugLog("request", { correlationId: payload.correlationId, timeoutMs }); +``` + +### 5. Helper Utilities + +`webui/js/websocket.js` exports helper utilities alongside the `websocket` singleton so correlation metadata and envelopes stay consistent: + +- `createCorrelationId(prefix?: string)` returns a UUID-based identifier, optionally prefixed (e.g. `createCorrelationId('hello') → hello-1234…`). Use it when chaining UI actions to backend logs. +- `validateServerEnvelope(envelope)` guarantees subscribers receive the canonical `{ handlerId, eventId, correlationId, ts, data }` shape; throw if the payload is malformed. + +Example: + +```javascript +import { getNamespacedClient, createCorrelationId, validateServerEnvelope } from '/js/websocket.js'; + +const websocket = getNamespacedClient('/state_sync'); + +const { results } = await websocket.request( + 'hello_request', + { name: this.name }, + { correlationId: createCorrelationId('hello') }, +); + +websocket.on('dashboard_update', (envelope) => { + const validated = validateServerEnvelope(envelope); + this.rows = validated.data.rows; +}); +``` + +### 6. Error Handling + +- Producer methods call `websocket.connect()` internally, so they wait for the handshake automatically. They only surface `Error("Not connected")` if the handshake ultimately fails (for example, the user is logged out or the server is down). +- `request()` acknowledgement timeouts reject with `Error("Request timeout")`. Server-side fan-out timeouts (for example `request_all`) are represented as `results[]` entries with `error.code = "TIMEOUT"` (no Promise rejection). +- For large payloads, the client throws before sending and the server rejects frames above the 50 MiB cap (`max_http_buffer_size` on the Socket.IO engine). + +### 7. Startup Broadcast + +- When **Broadcast server restart event** is enabled in Developer settings (on by default) the backend emits a fire-and-forget `server_restart` envelope the first time each connection is established after a process restart. The payload includes `runtimeId` and an ISO8601 timestamp so clients can reconcile cached state. +- Disable the toggle if your deployment pipeline already publishes restart notifications. + +--- + +## Frontend Error Handling (Using the Registry) + +Client code should treat `RequestResultItem.error.code` as one of the documented values and branch behavior accordingly. Keep UI decisions localized and reusable. + +Recommended patterns +- Centralize mapping from `WsErrorCode` → user-facing message and remediation hint. +- Always surface hard errors (timeouts); gate debug details by dev flag. + +Example – request() +```javascript +import { getNamespacedClient } from '/js/websocket.js' + +const websocket = getNamespacedClient('/state_sync') + +function renderError(code, message) { + // Map codes to UI copy; keep messages concise + switch (code) { + case 'NO_HANDLERS': return `No handler for this action (${message})` + case 'TIMEOUT': return `Request timed out; try again or increase timeout` + case 'CONNECTION_NOT_FOUND': return `Target connection unavailable; retry after reconnect` + default: return message || 'Unexpected error' + } +} + +const res = await websocket.request('example_event', { foo: 'bar' }, { timeoutMs: 1500 }) +for (const item of res.results) { + if (item.ok) { + // use item.data + } else { + const msg = renderError(item.error?.code, item.error?.error) + // show toast/log based on dev flag + console.error('[ws]', msg) + } +} +``` + +Subscriptions – envelope handler +```javascript +import { getNamespacedClient } from '/js/websocket.js' + +const websocket = getNamespacedClient('/state_sync') + +websocket.on('example_broadcast', ({ data, handlerId, eventId, correlationId }) => { + // handle data; errors should not typically arrive via broadcast + // correlationId can link UI actions to backend logs +}) +``` + +See also +- Error Codes Registry (above) for the authoritative code list +- Contracts: `frontend-api.md` for method signatures and response shapes + +--- + +## Producer & Consumer Patterns + +### Pattern A – Fire-and-Forget Notification (Server Producer → Client Consumers) + +Backend: + +```python +await self.broadcast( + "notification_broadcast", + { + "message": data["message"], + "level": data.get("level", "info"), + "timestamp": datetime.now(timezone.utc).isoformat(), + }, + exclude_sids=sid, + correlation_id=data.get("correlationId"), +) +``` + +Frontend: + +```javascript +websocket.on("notification_broadcast", ({ data, correlationId, ts }) => { + notifications.unshift({ ...data, correlationId, ts }); +}); +``` + +### Pattern B – Request/Response With Multi-Handler Aggregation (Client Producer → Server Consumers) + +Client: + +```javascript +const { correlationId, results } = await websocket.request( + "refresh_metrics", + { duration: "1h" }, + { timeoutMs: 2_000 } +); + +results.forEach(({ handlerId, ok, data, error }) => { + if (ok) renderMetrics(handlerId, data); + else console.warn(handlerId, error); +}); +``` + +Server (two handlers listening to the same event): + +```python +class TaskMetrics(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["refresh_metrics"] + + async def process_event(self, event_type: str, data: dict, sid: str) -> dict | None: + stats = await self._load_task_metrics(data["duration"]) + return {"metrics": stats} + +class HostMetrics(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["refresh_metrics"] + + async def process_event(self, event_type: str, data: dict, sid: str) -> dict | None: + return {"metrics": await self._load_host_metrics(data["duration"])} +``` + +### Pattern C – Fan-Out `request_all` (Server Producer → Many Client Consumers) + +Backend (server producer asking every tab to confirm a destructive operation): + +```python +confirmations = await self.request_all( + "confirm_close_tab", + {"contextId": context_id}, + timeout_ms=5_000, +) + +for entry in confirmations: + self.log.info("%s responded: %s", entry["sid"], entry["results"]) +``` + +Frontend consumer matching the envelope: + +```javascript +websocket.on("confirm_close_tab", async ({ data, correlationId }) => { + const accepted = await showModalAndAwaitUser(data.contextId); + return { ok: accepted, correlationId, decision: accepted ? "close" : "stay" }; +}); +``` + +### Pattern D – Server Reply Without Using `ack` + +Sometimes you want to acknowledge work immediately but stream additional updates later. Combine `request()` for the initial confirmation and `emit_to()` for follow-up events using the same correlation ID. + +```python +async def process_event(self, event_type: str, data: dict, sid: str) -> dict | None: + if event_type != "start_long_task": + return None + + correlation_id = data.get("correlationId") + asyncio.create_task(self._run_workflow(sid, correlation_id)) + return {"accepted": True, "correlationId": correlation_id} + +async def _run_workflow(self, sid: str, correlation_id: str | None): + for step in range(10): + await asyncio.sleep(1) + await self.emit_to( + sid, + "task_progress", + {"step": step, "total": 10}, + correlation_id=correlation_id, + ) +``` + +--- + +## Metadata Flow & Envelopes + +### Client → Server Payload + +Producers send an object payload as `data` (never primitives). Request metadata like `timeoutMs` and `correlationId` are passed as method options, not embedded into `data`. + +The manager validates the payload, resolves/creates `correlationId`, and passes a clean copy of `data` to handlers. + +### Server → Client Envelope (mandatory) + +```json +{ + "handlerId": "python.websocket_handlers.notifications.NotificationHandler", + "eventId": "b7e2a9cd-2857-4f7a-8bf4-12a736cb6720", + "correlationId": "caller-supplied-or-generated", + "ts": "2025-10-31T13:13:37.123Z", + "data": { "message": "Hello!" } +} +``` + +**Guidance:** + +- Use `eventId` alongside frontend logging to spot duplicate deliveries or buffered flushes. +- `correlationId` ties together the user action that triggered the event, even if multiple handlers participate. +- `handlerId` helps you distinguish which handler produced the payload, especially when multiple handlers share the same event type. + +--- + +## Diagnostics, Harness & Logging + +### Developer Harness + +- Location: `Settings → Developer → WebSocket Test Harness`. +- Automatic mode drives emit, request, delayed request (default unlimited timeout), subscription persistence, and envelope validation. It asserts envelope metadata (handlerId, eventId, correlationId, ISO8601 timestamps) and correlation carryover. +- Manual buttons let you trigger individual flows and inspect recent payloads. +- Harness hides itself when `runtime.isDevelopment` is false so production builds incur zero overhead. +- Helper APIs (`createCorrelationId`, `validateServerEnvelope`) are exercised end to end; subscription logs record the `server_restart` broadcast emitted on first connection after a runtime restart. + +### WebSocket Event Console + +- Location: `Settings → Developer → WebSocket Event Console`. +- Enabling capture calls `websocket.request("ws_event_console_subscribe", { requestedAt })`. The handler (`DevWebsocketTestHandler`) refuses the subscription outside development mode and registers the SID as a **diagnostic watcher** by calling `WebSocketManager.register_diagnostic_watcher`. Only connected SIDs can subscribe. +- Disabling capture calls `websocket.request("ws_event_console_unsubscribe", {})`. Disconnecting also triggers `WebSocketManager.unregister_diagnostic_watcher`, so stranded watchers never accumulate. +- While at least one watcher exists, the manager streams `ws_dev_console_event` envelopes (documented in `contracts/event-schemas.md`). Each payload contains: + - `kind`: `"inbound" | "outbound" | "lifecycle"` + - `eventType`, `sid`, `targets[]`, delivery/buffer flags + - `resultSummary` (handler counts, per-handler status, durationMs) + - `payloadSummary` (first few keys + byte size) +- Lifecycle broadcasts (`ws_lifecycle_connect` / `ws_lifecycle_disconnect`) are emitted asynchronously via `broadcast(..., diagnostic=True)` so long-running handlers can’t block dispatch. +- The modal UI exposes: + - Start/stop capture (explicitly controls subscription state). + - Resubscribe button (detach + resubscribe) to recover gracefully after Socket.IO reconnects. + - Clear button (resets the in-memory ring buffer). + - “Handled-only” toggle that filters inbound entries to ones that resolved to registered handlers or produced errors. +- When the watcher set becomes empty the manager immediately stops streaming diagnostics, guaranteeing zero steady-state overhead outside development. + +### Instrumentation & Logging + +- `WebSocketManager` offloads handler execution via `DeferredTask` and may record `durationMs` when development diagnostics are active (Event Console watchers subscribed). These metrics flow into the Event Console stream (and may also appear in `request()` / `request_all()` results), keeping steady-state overhead near zero when diagnostics are closed. +- Lifecycle events capture `connectionCount`, ISO8601 timestamps, and SID so dashboards can correlate UI behaviour with connection churn. +- Backend logging: use `PrintStyle.debug/info/warning` and always include `handlerId`, `eventType`, `sid`, and `correlationId`. The manager already logs connection events, missing handlers, and buffer overflows. +- Frontend logging: `websocket.debugLog()` mirrors backend debug messages but only when `window.runtimeInfo.isDevelopment` is true. + +### Access Logs & Transport Troubleshooting + +- Settings → Developer includes a persisted `uvicorn_access_logs_enabled` switch. When enabled, `run_ui.py` enables Uvicorn access logs so transport issues (CORS, handshake failures) can be traced. +- The long-standing `websocket_server_restart_enabled` switch (same section) controls whether newly connected clients receive the `server_restart` broadcast that carries `runtimeId` metadata. + +### Common Issues + +1. **`CONNECTION_NOT_FOUND`** – `emit_to` called with an SID that never existed or expired long ago. Use `get_sids_for_user` before emitting or guard on connection presence. +2. **Timeout Rejections** – `request()` and `request_all()` reject only when the transport times out, not when a handler takes too long. Inspect the returned result arrays for `TIMEOUT` entries and consider increasing `timeoutMs`. +3. **Origin Rejected** – the Socket.IO handshake was rejected because the `Origin` header did not match the expected UI origin. Ensure you access the UI and the WebSocket endpoint on the same scheme/host/port, and verify any reverse proxy preserves the `Origin` header. +4. **Diagnostics Subscriptions Failing** – only available in development mode and for connected SIDs. Verify the browser tab still holds an active session and that `window.runtimeInfo.isDevelopment` is true before opening the modal. + +--- + +## Best Practices Checklist + +- [ ] Always validate inbound payloads in `process_event` (required fields, type constraints, length limits). +- [ ] Propagate `correlationId` through multi-step workflows so logs and envelopes align. +- [ ] Respect the 50 MB payload cap; prefer HTTP + polling for bulk data transfers. +- [ ] Ensure long-running operations emit progress via `emit_to` or switch to an async task with periodic updates. +- [ ] Buffer-sensitive actions (`emit_to`) should handle `ConnectionNotFoundError` from unknown SIDs gracefully. +- [ ] When adding new handlers, update the developer harness if new scenarios need coverage. +- [ ] Keep `PrintStyle` logs meaningful—include `handlerId`, `eventType`, `sid`, and `correlationId`. +- [ ] In Alpine components, call `websocket.off()` during teardown to avoid duplicate subscriptions. + +--- + +## Quick Reference Tables + +### Operation Matrix + +| Direction | API | Ack? | Filters | Notes | +|-----------|-----|------|---------|-------| +| Client → Server | `emit(event, data, { correlationId? })` | No | None | Fire-and-forget. | +| Client → Server | `request(event, data, { timeoutMs?, correlationId? })` | Yes (`{ correlationId, results[] }`) | None | Aggregates per handler. Timeout entries appear inside `results`. | +| Server → Client | `emit_to(sid, ...)` | No | None | Raises `ConnectionNotFoundError` for unknown `sid`. Buffers if disconnected. | +| Server → Client | `broadcast(...)` | No | `exclude_sids` only | Iterates over current connections; uses the same envelope as `emit_to`. | +| Server → Client | `request(...)` | Yes (`{ correlationId, results[] }`) | None | Equivalent of client `request` but targeted at one SID from the server. | +| Server → Client | `request_all(...)` | Yes (`[{ sid, correlationId, results[] }]`) | None | Server-initiated fan-out. | + +### Metadata Cheat Sheet + +| Field | Produced By | Guarantees | +|-------|-------------|------------| +| `correlationId` | Manager | Present on every response/envelope. Caller-supplied ID is preserved; otherwise manager generates UUIDv4 hex. | +| `eventId` | Manager | Unique UUIDv4 per server→client delivery. Helpful for dedup / auditing. | +| `handlerId` | Handler / Manager | Deterministic value `module.Class`. Used for results. | +| `ts` | Manager | ISO8601 UTC with millisecond precision. Replaces `+00:00` with `Z`. | +| `results[]` | Manager | Array of `{ handlerId, ok, data?, error? }`. Errors include `code`, `error`, and optional `details`. | + +--- + +## Further Reading + +- **QuickStart** – [`specs/003-websocket-event-handlers/quickstart.md`](../specs/003-websocket-event-handlers/quickstart.md) for a step-by-step introduction. +- **Contracts** – Backend, frontend, schema, and security contracts define the canonical API surface: + - [`websocket-handler-interface.md`](../specs/003-websocket-event-handlers/contracts/websocket-handler-interface.md) + - [`frontend-api.md`](../specs/003-websocket-event-handlers/contracts/frontend-api.md) + - [`event-schemas.md`](../specs/003-websocket-event-handlers/contracts/event-schemas.md) + - [`security-contract.md`](../specs/003-websocket-event-handlers/contracts/security-contract.md) +- **Implementation Reference** – Inspect `python/helpers/websocket_manager.py`, `python/helpers/websocket.py`, `webui/js/websocket.js`, and the developer harness in `webui/components/settings/developer/websocket-test-store.js` for concrete examples. + +> **Tip:** When extending the infrastructure (new metadata) start by updating the contracts, sync the manager/frontend helpers, and then document the change here so producers and consumers stay in lockstep. + +## Error Codes Registry (Draft for Phase 6) + +The WebSocket stack standardizes backend error codes returned in `RequestResultItem.error.code`. This registry documents the currently used codes and their intended meaning. Client and server implementations should reference these values verbatim (UPPER_SNAKE_CASE). + +| Code | Scope | Meaning | Typical Remediation | Example Payload | +|------|-------|---------|---------------------|-----------------| +| `NO_HANDLERS` | Manager routing | No handler is registered for the requested `eventType`. | Register a handler for the event or correct the event name. | `{ "handlerId": "WebSocketManager", "ok": false, "error": { "code": "NO_HANDLERS", "error": "No handler for 'missing'" } }` | +| `TIMEOUT` | Aggregated or single request | The request exceeded `timeoutMs`. | Increase `timeoutMs`, reduce handler processing time, or split work. | `{ "handlerId": "ExampleHandler", "ok": false, "error": { "code": "TIMEOUT", "error": "Request timeout" } }` | +| `CONNECTION_NOT_FOUND` | Single‑sid request | Target `sid` is not connected/known. | Use an active `sid` or retry after reconnect. | `{ "handlerId": "WebSocketManager", "ok": false, "error": { "code": "CONNECTION_NOT_FOUND", "error": "Connection 'sid-123' not found" } }` | +| `HARNESS_UNKNOWN_EVENT` | Developer harness | Harness test handler received an unsupported event name. | Update harness sources or disable the step before running automation. | `{ "handlerId": "python.websocket_handlers.dev_websocket_test_handler.DevWebsocketTestHandler", "ok": false, "error": { "code": "HARNESS_UNKNOWN_EVENT", "error": "Unhandled event", "details": "ws_tester_foo" } }` | + +Notes +- Error payload shape follows the contract documented in `contracts/event-schemas.md` (`RequestResultItem.error`). +- Codes are case‑sensitive. Use exactly as listed. +- Future codes will be appended here and referenced by inline docstrings/JSDoc. + +### Client-Side Error Codes (Draft) + +The frontend can originate errors during validation, connection, or request execution. Today these surface as thrown exceptions/promise rejections (not as `RequestResultItem`). When server→client request/ack lands in the future, these codes will also be serialised in `RequestResultItem.error.code` for protocol symmetry. + +| Code | Scope | Current Delivery | Meaning | Typical Remediation | Example | +|------|-------|------------------|---------|---------------------|---------| +| `VALIDATION_ERROR` | Producer options / payload | Exception (throw) | Invalid options (e.g., bad `timeoutMs`/`correlationId`) or non-object payload | Fix caller options and payload shapes | `new Error("timeoutMs must be a non-negative number")` | +| `PAYLOAD_TOO_LARGE` | Size precheck (50MB cap) | Exception (throw) | Client precheck rejects payloads exceeding cap before emit | Reduce payload or chunk via HTTP; keep binaries off WS | `new Error("Payload size exceeds maximum (.. > .. bytes)")` | +| `NOT_CONNECTED` | Socket status | Exception (throw) | Auto-connect could not establish a session (user logged out, server offline, handshake rejected) | Check login state, server availability, and Origin policy; optional `await websocket.connect()` for diagnostics | `new Error("Not connected")` | +| `REQUEST_TIMEOUT` | request() | Not used (end-state) | Timeouts are represented inside `results[]` as `error.code="TIMEOUT"` (Promise resolves). | Inspect `results[]` for `TIMEOUT` items and handle in UI. | N/A | +| `CONNECT_ERROR` | Socket connect_error | Exception (throw/log) | Transport/handshake failure | Check server availability, CORS, or network | `new Error("WebSocket connection failed: ...")` | + +Notes +- These are currently local exceptions, not part of the aggregated results payload. Calling code should `try/catch` or handle promise rejections. +- When server→client request/ack is introduced, the same codes will be serialised into `RequestResultItem.error.code` to maintain symmetry with backend codes. +- Prefer branching on `code` when available; avoid coupling to full message strings. + +### IDE Hints (Non‑enforcing) + +To surface recognized codes without adding toolchain dependencies, front‑end can use a JSDoc union type near the helper exports: + +```javascript +/** @typedef {('NO_HANDLERS'|'TIMEOUT'|'CONNECTION_NOT_FOUND')} WsErrorCode */ +``` + +Back‑end can reference this registry via concise docstrings at error construction points (e.g., `_build_error_result`) to improve discoverability. + +--- + +## Phase 6 – Registry & Helper Work Status + +Current status +- This registry table is drafted and linked; it documents codes already produced by the manager/helpers today. + +Remaining work (tracked in Phase 6 tasks) +- T148: Ensure the registry is complete and cross‑referenced from comments/docstrings (backend) and JSDoc typedefs (frontend). No new linter/tooling. +- T144: Reference the registry from contracts and quickstart examples; align all examples to documented codes. +- T141/T143: Add/adjust tests to assert known codes only in helper/manager paths. +- T145–T147: Ensure the harness logs/validates codes in envelopes/results as part of the automatic and manual suites. + +Related references +- [`event-schemas.md`](../specs/003-websocket-event-handlers/contracts/event-schemas.md) +- [`websocket-handler-interface.md`](../specs/003-websocket-event-handlers/contracts/websocket-handler-interface.md) +- [`frontend-api.md`](../specs/003-websocket-event-handlers/contracts/frontend-api.md) diff --git a/python/api/api_log_get.py b/python/api/api_log_get.py index 8111dbea5c..e4042c5b25 100644 --- a/python/api/api_log_get.py +++ b/python/api/api_log_get.py @@ -55,7 +55,7 @@ async def process(self, input: dict, request: Request) -> dict | Response: "returned_items": len(log_items), "start_position": start_pos, "progress": context.log.progress, - "progress_active": context.log.progress_active, + "progress_active": bool(context.log.progress_active), "items": log_items } } diff --git a/python/api/chat_create.py b/python/api/chat_create.py index f73f3416d8..46139aeb5c 100644 --- a/python/api/chat_create.py +++ b/python/api/chat_create.py @@ -12,7 +12,7 @@ async def process(self, input: Input, request: Request) -> Output: # context instance - get or create current_context = AgentContext.get(current_ctxid) - + # get/create new context new_context = self.use_context(new_ctxid) @@ -25,6 +25,10 @@ async def process(self, input: Input, request: Request) -> Output: if current_data_2: new_context.set_output_data(projects.CONTEXT_DATA_KEY_PROJECT, current_data_2) + # New context should appear in other tabs' chat lists via state_push. + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="api.chat_create.CreateChat") + return { "ok": True, "ctxid": new_context.id, diff --git a/python/api/chat_remove.py b/python/api/chat_remove.py index 671e43d9ea..ee6f52aef3 100644 --- a/python/api/chat_remove.py +++ b/python/api/chat_remove.py @@ -25,6 +25,10 @@ async def process(self, input: Input, request: Request) -> Output: for task in tasks: await scheduler.remove_task_by_uuid(task.uuid) + # Context removal affects global chat/task lists in all tabs. + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="api.chat_remove.RemoveChat") + return { "message": "Context removed.", } diff --git a/python/api/chat_reset.py b/python/api/chat_reset.py index 668b08e268..92e8dc723e 100644 --- a/python/api/chat_reset.py +++ b/python/api/chat_reset.py @@ -18,6 +18,10 @@ async def process(self, input: Input, request: Request) -> Output: persist_chat.save_tmp_chat(context) persist_chat.remove_msg_files(ctxid) + # Reset updates context metadata (log guid/version) and must refresh other tabs' lists. + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="api.chat_reset.Reset") + return { "message": "Agent restarted.", } diff --git a/python/api/csrf_token.py b/python/api/csrf_token.py index f4d1d63c0f..fabe1e29a9 100644 --- a/python/api/csrf_token.py +++ b/python/api/csrf_token.py @@ -25,7 +25,6 @@ def requires_csrf(cls) -> bool: return False async def process(self, input: Input, request: Request) -> Output: - # check for allowed origin to prevent dns rebinding attacks origin_check = await self.check_allowed_origin(request) if not origin_check["ok"]: @@ -38,7 +37,6 @@ async def process(self, input: Input, request: Request) -> Output: if "csrf_token" not in session: session["csrf_token"] = secrets.token_urlsafe(32) - # return the csrf token and runtime id return { "ok": True, "token": session["csrf_token"], @@ -119,7 +117,7 @@ def get_default_allowed_origins(self) -> list[str]: def initialize_allowed_origins(self, request: Request): """ If A0 is hosted on a server, add the first visit origin to ALLOWED_ORIGINS. - This simplifies deployment process as users can access their new instance without + This simplifies deployment process as users can access their new instance without additional setup while keeping it secure. """ # dotenv value is already set, do nothing @@ -144,5 +142,3 @@ def initialize_allowed_origins(self, request: Request): # if not, add it to the allowed origins allowed_origins.append(req_origin) dotenv.save_dotenv_value(ALLOWED_ORIGINS_KEY, ",".join(allowed_origins)) - - \ No newline at end of file diff --git a/python/api/notifications_history.py b/python/api/notifications_history.py index 1b9e9f3d23..83629288fc 100644 --- a/python/api/notifications_history.py +++ b/python/api/notifications_history.py @@ -13,8 +13,9 @@ async def process(self, input: dict, request: Request) -> dict | Response: notification_manager = AgentContext.get_notification_manager() # Return all notifications for history modal + notifications = notification_manager.output_all() return { - "notifications": [n.output() for n in notification_manager.notifications], + "notifications": notifications, "guid": notification_manager.guid, - "count": len(notification_manager.notifications), + "count": len(notifications), } diff --git a/python/api/notifications_mark_read.py b/python/api/notifications_mark_read.py index 6f3d182d83..4a25f167a8 100644 --- a/python/api/notifications_mark_read.py +++ b/python/api/notifications_mark_read.py @@ -21,15 +21,11 @@ async def process(self, input: dict, request: Request) -> dict | Response: if not notification_ids: return {"success": False, "error": "No notification IDs provided"} + if not isinstance(notification_ids, list): + return {"success": False, "error": "notification_ids must be a list"} + # Mark specific notifications as read - marked_count = 0 - for notification_id in notification_ids: - # Find notification by ID and mark as read - for notification in notification_manager.notifications: - if notification.id == notification_id and not notification.read: - notification.mark_read() - marked_count += 1 - break + marked_count = notification_manager.mark_read_by_ids(notification_ids) return { "success": True, diff --git a/python/api/poll.py b/python/api/poll.py index dbe7105c66..b6989e55d2 100644 --- a/python/api/poll.py +++ b/python/api/poll.py @@ -1,125 +1,14 @@ from python.helpers.api import ApiHandler, Request, Response -from agent import AgentContext, AgentContextType - -from python.helpers.task_scheduler import TaskScheduler -from python.helpers.localization import Localization -from python.helpers.dotenv import get_dotenv_value +from python.helpers.state_snapshot import build_snapshot class Poll(ApiHandler): async def process(self, input: dict, request: Request) -> dict | Response: - ctxid = input.get("context", "") - from_no = input.get("log_from", 0) - notifications_from = input.get("notifications_from", 0) - - # Get timezone from input (default to dotenv default or UTC if not provided) - timezone = input.get("timezone", get_dotenv_value("DEFAULT_USER_TIMEZONE", "UTC")) - Localization.get().set_timezone(timezone) - - # context instance - get or create only if ctxid is provided - if ctxid: - try: - context = self.use_context(ctxid, create_if_not_exists=False) - except Exception as e: - context = None - else: - context = None - - # Get logs only if we have a context - logs = context.log.output(start=from_no) if context else [] - - # Get notifications from global notification manager - notification_manager = AgentContext.get_notification_manager() - notifications = notification_manager.output(start=notifications_from) - - # loop AgentContext._contexts - - # Get a task scheduler instance - scheduler = TaskScheduler.get() - - # Always reload the scheduler on each poll to ensure we have the latest task state - # await scheduler.reload() # does not seem to be needed - - # loop AgentContext._contexts and divide into contexts and tasks - - ctxs = [] - tasks = [] - processed_contexts = set() # Track processed context IDs - - all_ctxs = list(AgentContext._contexts.values()) - # First, identify all tasks - for ctx in all_ctxs: - # Skip if already processed - if ctx.id in processed_contexts: - continue - - # Skip BACKGROUND contexts as they should be invisible to users - if ctx.type == AgentContextType.BACKGROUND: - processed_contexts.add(ctx.id) - continue - - # Create the base context data that will be returned - context_data = ctx.output() - - context_task = scheduler.get_task_by_uuid(ctx.id) - # Determine if this is a task-dedicated context by checking if a task with this UUID exists - is_task_context = ( - context_task is not None and context_task.context_id == ctx.id - ) - - if not is_task_context: - ctxs.append(context_data) - else: - # If this is a task, get task details from the scheduler - task_details = scheduler.serialize_task(ctx.id) - if task_details: - # Add task details to context_data with the same field names - # as used in scheduler endpoints to maintain UI compatibility - context_data.update({ - "task_name": task_details.get("name"), # name is for context, task_name for the task name - "uuid": task_details.get("uuid"), - "state": task_details.get("state"), - "type": task_details.get("type"), - "system_prompt": task_details.get("system_prompt"), - "prompt": task_details.get("prompt"), - "last_run": task_details.get("last_run"), - "last_result": task_details.get("last_result"), - "attachments": task_details.get("attachments", []), - "context_id": task_details.get("context_id"), - }) - - # Add type-specific fields - if task_details.get("type") == "scheduled": - context_data["schedule"] = task_details.get("schedule") - elif task_details.get("type") == "planned": - context_data["plan"] = task_details.get("plan") - else: - context_data["token"] = task_details.get("token") - - tasks.append(context_data) - - # Mark as processed - processed_contexts.add(ctx.id) - - # Sort tasks and chats by their creation date, descending - ctxs.sort(key=lambda x: x["created_at"], reverse=True) - tasks.sort(key=lambda x: x["created_at"], reverse=True) - - # data from this server - return { - "deselect_chat": ctxid and not context, - "context": context.id if context else "", - "contexts": ctxs, - "tasks": tasks, - "logs": logs, - "log_guid": context.log.guid if context else "", - "log_version": len(context.log.updates) if context else 0, - "log_progress": context.log.progress if context else 0, - "log_progress_active": context.log.progress_active if context else False, - "paused": context.paused if context else False, - "notifications": notifications, - "notifications_guid": notification_manager.guid, - "notifications_version": len(notification_manager.updates), - } + return await build_snapshot( + context=input.get("context"), + log_from=input.get("log_from", 0), + notifications_from=input.get("notifications_from", 0), + timezone=input.get("timezone"), + ) diff --git a/python/extensions/monologue_end/_50_memorize_fragments.py b/python/extensions/monologue_end/_50_memorize_fragments.py index 932c7bf063..df8c25c156 100644 --- a/python/extensions/monologue_end/_50_memorize_fragments.py +++ b/python/extensions/monologue_end/_50_memorize_fragments.py @@ -26,8 +26,23 @@ async def execute(self, loop_data: LoopData = LoopData(), **kwargs): # memorize in background task = asyncio.create_task(self.memorize(loop_data, log_item)) + # Ensure progress bar resets after background work completes when the chat is idle. + task.add_done_callback(lambda _task, owner_no=log_item.no: self._reset_progress_if_idle(owner_no)) return task + def _reset_progress_if_idle(self, owner_no: int) -> None: + try: + ctx = self.agent.context + if ctx and ctx.streaming_agent is None: + # Only reset if this background task is still the source of the current progress. + # This prevents clobbering progress from a newer operation that started meanwhile. + if getattr(ctx.log, "progress_no", None) != owner_no: + return + ctx.log.set_initial_progress() + except Exception: + # Best-effort only: do not let background completion callbacks raise. + return + async def memorize(self, loop_data: LoopData, log_item: LogItem, **kwargs): set = settings.get_settings() @@ -98,7 +113,7 @@ async def log_callback(content): txt = f"{memory}" if set["memory_memorize_consolidation"]: - + try: # Use intelligent consolidation system from python.helpers.memory_consolidation import create_memory_consolidator @@ -183,7 +198,7 @@ async def log_callback(content): ) if rem: log_item.stream(result=f"\nReplaced {len(rem)} previous memories.") - + diff --git a/python/extensions/monologue_end/_51_memorize_solutions.py b/python/extensions/monologue_end/_51_memorize_solutions.py index a66cd4e15d..ae2ad3364b 100644 --- a/python/extensions/monologue_end/_51_memorize_solutions.py +++ b/python/extensions/monologue_end/_51_memorize_solutions.py @@ -17,7 +17,7 @@ async def execute(self, loop_data: LoopData = LoopData(), **kwargs): if not set["memory_memorize_enabled"]: return - + # show full util message log_item = self.agent.context.log.log( type="util", @@ -26,8 +26,23 @@ async def execute(self, loop_data: LoopData = LoopData(), **kwargs): # memorize in background task = asyncio.create_task(self.memorize(loop_data, log_item)) + # Ensure progress bar resets after background work completes when the chat is idle. + task.add_done_callback(lambda _task, owner_no=log_item.no: self._reset_progress_if_idle(owner_no)) return task + def _reset_progress_if_idle(self, owner_no: int) -> None: + try: + ctx = self.agent.context + if ctx and ctx.streaming_agent is None: + # Only reset if this background task is still the source of the current progress. + # This prevents clobbering progress from a newer operation that started meanwhile. + if getattr(ctx.log, "progress_no", None) != owner_no: + return + ctx.log.set_initial_progress() + except Exception: + # Best-effort only: do not let background completion callbacks raise. + return + async def memorize(self, loop_data: LoopData, log_item: LogItem, **kwargs): set = settings.get_settings() diff --git a/python/extensions/response_stream/_20_live_response.py b/python/extensions/response_stream/_20_live_response.py index f9455ed82e..34ef4ff2a2 100644 --- a/python/extensions/response_stream/_20_live_response.py +++ b/python/extensions/response_stream/_20_live_response.py @@ -1,9 +1,5 @@ -from python.helpers import persist_chat, tokens from python.helpers.extension import Extension from agent import LoopData -import asyncio -from python.helpers.log import LogItem -from python.helpers import log class LiveResponse(Extension): @@ -11,18 +7,16 @@ class LiveResponse(Extension): async def execute( self, loop_data: LoopData = LoopData(), - text: str = "", - parsed: dict = {}, + _text: str = "", + parsed: dict | None = None, **kwargs, ): try: - if ( - not "tool_name" in parsed - or parsed["tool_name"] != "response" - or "tool_args" not in parsed - or "text" not in parsed["tool_args"] - or not parsed["tool_args"]["text"] - ): + parsed = parsed if isinstance(parsed, dict) else {} + tool_args = parsed.get("tool_args") + is_response = parsed.get("tool_name") == "response" + has_text = isinstance(tool_args, dict) and bool(tool_args.get("text")) + if not (is_response and has_text): return # not a response # create log message and store it in loop data temporary params @@ -37,5 +31,5 @@ async def execute( # update log message log_item = loop_data.params_temporary["log_item_response"] log_item.update(content=parsed["tool_args"]["text"]) - except Exception as e: + except Exception: pass diff --git a/python/helpers/api.py b/python/helpers/api.py index 6c90c6e566..9593d98c98 100644 --- a/python/helpers/api.py +++ b/python/helpers/api.py @@ -10,12 +10,14 @@ from python.helpers.errors import format_error from werkzeug.serving import make_server +ThreadLockType = Union[threading.Lock, threading.RLock] + Input = dict Output = Union[Dict[str, Any], Response, TypedDict] # type: ignore class ApiHandler: - def __init__(self, app: Flask, thread_lock: threading.Lock): + def __init__(self, app: Flask, thread_lock: ThreadLockType): self.app = app self.thread_lock = thread_lock diff --git a/python/helpers/log.py b/python/helpers/log.py index a799666588..4116b83419 100644 --- a/python/helpers/log.py +++ b/python/helpers/log.py @@ -1,19 +1,43 @@ -from dataclasses import dataclass, field +import copy import json -from typing import Any, Literal, Optional, Dict, TypeVar, TYPE_CHECKING - -T = TypeVar("T") +import threading import uuid -from collections import OrderedDict # Import OrderedDict -from python.helpers.strings import truncate_text_by_ratio -import copy -from typing import TypeVar +from collections import OrderedDict +from dataclasses import dataclass +from typing import Any, Literal, Optional, TYPE_CHECKING, TypeVar, cast + from python.helpers.secrets import get_secrets_manager +from python.helpers.strings import truncate_text_by_ratio if TYPE_CHECKING: from agent import AgentContext + +_MARK_DIRTY_ALL = None +_MARK_DIRTY_FOR_CONTEXT = None + + +def _lazy_mark_dirty_all(*, reason: str | None = None) -> None: + # Lazy import to avoid circular import at module load time (AgentContext -> Log). + global _MARK_DIRTY_ALL + if _MARK_DIRTY_ALL is None: + from python.helpers.state_monitor_integration import mark_dirty_all + + _MARK_DIRTY_ALL = mark_dirty_all + _MARK_DIRTY_ALL(reason=reason) + + +def _lazy_mark_dirty_for_context(context_id: str, *, reason: str | None = None) -> None: + # Lazy import to avoid circular import at module load time (AgentContext -> Log). + global _MARK_DIRTY_FOR_CONTEXT + if _MARK_DIRTY_FOR_CONTEXT is None: + from python.helpers.state_monitor_integration import mark_dirty_for_context + + _MARK_DIRTY_FOR_CONTEXT = mark_dirty_for_context + _MARK_DIRTY_FOR_CONTEXT(context_id, reason=reason) + + T = TypeVar("T") Type = Literal[ @@ -66,14 +90,14 @@ def _truncate_value(val: T) -> T: v = val[k] del val[k] val[_truncate_key(k)] = _truncate_value(v) - return val + return cast(T, val) # If list or tuple, recursively truncate each item if isinstance(val, list): for i in range(len(val)): val[i] = _truncate_value(val[i]) - return val + return cast(T, val) if isinstance(val, tuple): - return tuple(_truncate_value(x) for x in val) # type: ignore + return cast(T, tuple(_truncate_value(x) for x in val)) # Convert non-str values to json for consistent length measurement if isinstance(val, str): @@ -91,7 +115,7 @@ def _truncate_value(val: T) -> T: removed = len(raw) - VALUE_MAX_LEN replacement = f"\n\n<< {removed} Characters hidden >>\n\n" truncated = truncate_text_by_ratio(raw, VALUE_MAX_LEN, replacement, ratio=0.3) - return truncated + return cast(T, truncated) def _truncate_content(text: str | None, type: Type) -> str: @@ -116,9 +140,6 @@ def _truncate_content(text: str | None, type: Type) -> str: return truncated - - - @dataclass class LogItem: log: "Log" @@ -187,10 +208,14 @@ def output(self): class Log: def __init__(self): - self.context: "AgentContext|None" = None # set from outside + self._lock = threading.RLock() + self.context: "AgentContext|None" = None # set from outside self.guid: str = str(uuid.uuid4()) self.updates: list[int] = [] self.logs: list[LogItem] = [] + self.progress: str = "" + self.progress_no: int = 0 + self.progress_active: bool = False self.set_initial_progress() def log( @@ -204,16 +229,17 @@ def log( id: Optional[str] = None, **kwargs, ) -> LogItem: + with self._lock: + # add a minimal item to the log + item = LogItem( + log=self, + no=len(self.logs), + type=type, + ) + self.logs.append(item) - # add a minimal item to the log - item = LogItem( - log=self, - no=len(self.logs), - type=type, - ) - self.logs.append(item) - - # and update it (to have just one implementation) + # Update outside the lock - the heavy masking/truncation work should not hold + # the lock; we only need locking while mutating shared arrays/fields. self._update_item( no=item.no, type=type, @@ -223,8 +249,11 @@ def log( temp=temp, update_progress=update_progress, id=id, + notify_state_monitor=False, **kwargs, ) + + self._notify_state_monitor() return item def _update_item( @@ -237,88 +266,144 @@ def _update_item( temp: bool | None = None, update_progress: ProgressUpdate | None = None, id: Optional[str] = None, + notify_state_monitor: bool = True, **kwargs, ): - item = self.logs[no] - - if id is not None: - item.id = id - - if type is not None: - item.type = type - - if temp is not None: - item.temp = temp - - if update_progress is not None: - item.update_progress = update_progress - + # Capture the effective type for truncation without holding the lock during + # masking/truncation work. + with self._lock: + current_type = self.logs[no].type + type_for_truncation = type if type is not None else current_type - # adjust all content before processing + heading_out: str | None = None if heading is not None: - heading = self._mask_recursive(heading) - heading = _truncate_heading(heading) - item.heading = heading + heading_out = _truncate_heading(self._mask_recursive(heading)) + + content_out: str | None = None if content is not None: - content = self._mask_recursive(content) - content = _truncate_content(content, item.type) - item.content = content + content_out = _truncate_content(self._mask_recursive(content), type_for_truncation) + + kvps_out: OrderedDict | None = None if kvps is not None: - kvps = OrderedDict(copy.deepcopy(kvps)) - kvps = self._mask_recursive(kvps) - kvps = _truncate_value(kvps) - item.kvps = kvps - elif item.kvps is None: - item.kvps = OrderedDict() - if kwargs: - kwargs = copy.deepcopy(kwargs) - kwargs = self._mask_recursive(kwargs) - item.kvps.update(kwargs) + kvps_out_tmp = OrderedDict(copy.deepcopy(kvps)) + kvps_out_tmp = self._mask_recursive(kvps_out_tmp) + kvps_out_tmp = _truncate_value(kvps_out_tmp) + kvps_out = OrderedDict(kvps_out_tmp) - self.updates += [item.no] - self._update_progress_from_item(item) + kwargs_out: dict | None = None + if kwargs: + kwargs_out = copy.deepcopy(kwargs) + kwargs_out = self._mask_recursive(kwargs_out) + + with self._lock: + item = self.logs[no] + + if id is not None: + item.id = id + + if type is not None: + item.type = type + + if temp is not None: + item.temp = temp + + if update_progress is not None: + item.update_progress = update_progress + + if heading_out is not None: + item.heading = heading_out + + if content_out is not None: + item.content = content_out + + if kvps_out is not None: + item.kvps = kvps_out + elif item.kvps is None: + item.kvps = OrderedDict() + + if kwargs_out: + if item.kvps is None: + item.kvps = OrderedDict() + item.kvps.update(kwargs_out) + + self.updates.append(item.no) + + if item.heading and item.update_progress != "none": + if item.no >= self.progress_no: + self.progress = item.heading + self.progress_no = ( + item.no if item.update_progress == "persistent" else -1 + ) + self.progress_active = True + if notify_state_monitor: + self._notify_state_monitor_for_context_update() + + def _notify_state_monitor(self) -> None: + ctx = self.context + if not ctx: + return + # Logs update both the active chat stream (sid-bound) and the global chats list + # (context metadata like last_message/log_version). Broadcast so all tabs refresh + # their chat/task lists without leaking logs (logs are still scoped per-sid). + _lazy_mark_dirty_all(reason="log.Log._notify_state_monitor") + + def _notify_state_monitor_for_context_update(self) -> None: + ctx = self.context + if not ctx: + return + # Log item updates only need to refresh the active chat stream for any sid + # currently projecting this context. Avoid global fanout at high frequency. + _lazy_mark_dirty_for_context(ctx.id, reason="log.Log._update_item") def set_progress(self, progress: str, no: int = 0, active: bool = True): progress = self._mask_recursive(progress) progress = _truncate_progress(progress) - self.progress = progress - if not no: - no = len(self.logs) - self.progress_no = no - self.progress_active = active + changed = False + ctx = self.context + with self._lock: + prev_progress = self.progress + prev_active = self.progress_active + + self.progress = progress + if not no: + no = len(self.logs) + self.progress_no = no + self.progress_active = active + + changed = self.progress != prev_progress or self.progress_active != prev_active + + if changed and ctx: + # Progress changes are included in every snapshot, but push sync requires a + # dirty mark even when no log items changed. + _lazy_mark_dirty_for_context(ctx.id, reason="log.Log.set_progress") def set_initial_progress(self): self.set_progress("Waiting for input", 0, False) def output(self, start=None, end=None): - if start is None: - start = 0 - if end is None: - end = len(self.updates) + with self._lock: + if start is None: + start = 0 + if end is None: + end = len(self.updates) + updates = self.updates[start:end] + logs = list(self.logs) out = [] seen = set() - for update in self.updates[start:end]: - if update not in seen: - out.append(self.logs[update].output()) + for update in updates: + if update not in seen and update < len(logs): + out.append(logs[update].output()) seen.add(update) - return out def reset(self): - self.guid = str(uuid.uuid4()) - self.updates = [] - self.logs = [] + with self._lock: + self.guid = str(uuid.uuid4()) + self.updates = [] + self.logs = [] self.set_initial_progress() - def _update_progress_from_item(self, item: LogItem): - if item.heading and item.update_progress != "none": - if item.no >= self.progress_no: - self.set_progress( - item.heading, - (item.no if item.update_progress == "persistent" else -1), - ) - def _mask_recursive(self, obj: T) -> T: """Recursively mask secrets in nested objects.""" try: @@ -333,13 +418,13 @@ def _mask_recursive(self, obj: T) -> T: # print(f"Context ID mismatch: {self_id} != {current_id}") if isinstance(obj, str): - return secrets_mgr.mask_values(obj) + return cast(Any, secrets_mgr.mask_values(obj)) elif isinstance(obj, dict): return {k: self._mask_recursive(v) for k, v in obj.items()} # type: ignore elif isinstance(obj, list): return [self._mask_recursive(item) for item in obj] # type: ignore else: return obj - except Exception as _e: + except Exception: # If masking fails, return original object - return obj \ No newline at end of file + return obj diff --git a/python/helpers/mcp_server.py b/python/helpers/mcp_server.py index 3c0308ed9c..d41fdeb725 100644 --- a/python/helpers/mcp_server.py +++ b/python/helpers/mcp_server.py @@ -1,9 +1,11 @@ import os +import asyncio from typing import Annotated, Literal, Union from urllib.parse import urlparse from openai import BaseModel from pydantic import Field -from fastmcp import FastMCP # type: ignore +import fastmcp +from fastmcp import FastMCP import contextvars from agent import AgentContext, AgentContextType, UserMessage @@ -15,7 +17,8 @@ from starlette.middleware.base import BaseHTTPMiddleware from starlette.exceptions import HTTPException as StarletteHTTPException from starlette.types import ASGIApp, Receive, Scope, Send -from fastmcp.server.http import create_sse_app # type: ignore +from fastmcp.server.http import create_sse_app, create_base_app, build_resource_metadata_url # type: ignore +from starlette.routing import Mount # type: ignore from starlette.requests import Request import threading @@ -319,37 +322,39 @@ def reconfigure(self, token: str): message_path = f"/t-{self.token}/messages/" # Update settings in the MCP server instance if provided - mcp_server.settings.message_path = message_path - mcp_server.settings.sse_path = sse_path + # Keep FastMCP settings synchronized so downstream helpers that read these + # values (including deprecated accessors) resolve the runtime paths. + fastmcp.settings.message_path = message_path + fastmcp.settings.sse_path = sse_path + fastmcp.settings.streamable_http_path = http_path # Create new MCP apps with updated settings with self._lock: + middleware = [Middleware(BaseHTTPMiddleware, dispatch=mcp_middleware)] + self.sse_app = create_sse_app( server=mcp_server, - message_path=mcp_server.settings.message_path, - sse_path=mcp_server.settings.sse_path, - auth_server_provider=mcp_server._auth_server_provider, - auth_settings=mcp_server.settings.auth, - debug=mcp_server.settings.debug, - routes=mcp_server._additional_http_routes, - middleware=[Middleware(BaseHTTPMiddleware, dispatch=mcp_middleware)], + message_path=message_path, + sse_path=sse_path, + auth=mcp_server.auth, + debug=fastmcp.settings.debug, + middleware=list(middleware), ) - # For HTTP, we need to create a custom app since the lifespan manager - # doesn't work properly in our Flask/Werkzeug environment self.http_app = self._create_custom_http_app( http_path, - mcp_server._auth_server_provider, - mcp_server.settings.auth, - mcp_server.settings.debug, - mcp_server._additional_http_routes, + middleware=list(middleware), ) - def _create_custom_http_app(self, streamable_http_path, auth_server_provider, auth_settings, debug, routes): - """Create a custom HTTP app that manages the session manager manually.""" - from fastmcp.server.http import setup_auth_middleware_and_routes, create_base_app # type: ignore + def _create_custom_http_app( + self, + streamable_http_path: str, + *, + middleware: list[Middleware], + ) -> ASGIApp: + """Create a Streamable HTTP app with manual session manager lifecycle.""" + from mcp.server.streamable_http_manager import StreamableHTTPSessionManager # type: ignore - from starlette.routing import Mount from mcp.server.auth.middleware.bearer_auth import RequireAuthMiddleware # type: ignore import anyio @@ -357,9 +362,6 @@ def _create_custom_http_app(self, streamable_http_path, auth_server_provider, au server_middleware = [] self.http_session_task_group = None - - - # Create session manager self.http_session_manager = StreamableHTTPSessionManager( app=mcp_server._mcp_server, event_store=None, @@ -367,10 +369,7 @@ def _create_custom_http_app(self, streamable_http_path, auth_server_provider, au stateless=False, ) - - # Custom ASGI handler that ensures task group is initialized async def handle_streamable_http(scope, receive, send): - # Lazy initialization of task group if self.http_session_task_group is None: self.http_session_task_group = anyio.create_task_group() await self.http_session_task_group.__aenter__() @@ -380,20 +379,25 @@ async def handle_streamable_http(scope, receive, send): if self.http_session_manager: await self.http_session_manager.handle_request(scope, receive, send) - # Get auth middleware and routes - auth_middleware, auth_routes, required_scopes = setup_auth_middleware_and_routes( - auth_server_provider, auth_settings - ) + auth_provider = mcp_server.auth - server_routes.extend(auth_routes) - server_middleware.extend(auth_middleware) + if auth_provider: + server_routes.extend(auth_provider.get_routes(mcp_path=streamable_http_path)) + server_middleware.extend(auth_provider.get_middleware()) + + resource_url = auth_provider._get_resource_url(streamable_http_path) + resource_metadata_url = ( + build_resource_metadata_url(resource_url) if resource_url else None + ) - # Add StreamableHTTP routes with or without auth - if auth_server_provider: server_routes.append( Mount( streamable_http_path, - app=RequireAuthMiddleware(handle_streamable_http, required_scopes), + app=RequireAuthMiddleware( + handle_streamable_http, + auth_provider.required_scopes, + resource_metadata_url, + ), ) ) else: @@ -404,18 +408,16 @@ async def handle_streamable_http(scope, receive, send): ) ) - # Add custom routes with lowest precedence - if routes: - server_routes.extend(routes) + additional_routes = mcp_server._get_additional_http_routes() + if additional_routes: + server_routes.extend(additional_routes) - # Add middleware - server_middleware.append(Middleware(BaseHTTPMiddleware, dispatch=mcp_middleware)) + server_middleware.extend(middleware) - # Create and return the app return create_base_app( routes=server_routes, middleware=server_middleware, - debug=debug, + debug=fastmcp.settings.debug, ) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: diff --git a/python/helpers/notification.py b/python/helpers/notification.py index 1aefff7b4d..a15cb1b385 100644 --- a/python/helpers/notification.py +++ b/python/helpers/notification.py @@ -1,5 +1,6 @@ from dataclasses import dataclass import uuid +import threading from datetime import datetime, timezone, timedelta from enum import Enum @@ -11,6 +12,7 @@ class NotificationType(Enum): ERROR = "error" PROGRESS = "progress" + class NotificationPriority(Enum): NORMAL = 10 HIGH = 20 @@ -40,7 +42,7 @@ def __post_init__(self): def mark_read(self): self.read = True - self.manager._update_item(self.no, read=True) + self.manager.update_item(self.no, read=True) def output(self): return { @@ -60,6 +62,7 @@ def output(self): class NotificationManager: def __init__(self, max_notifications: int = 100): + self._lock = threading.RLock() self.guid: str = str(uuid.uuid4()) self.updates: list[int] = [] self.notifications: list[NotificationItem] = [] @@ -90,75 +93,136 @@ def add_notification( display_time: int = 3, group: str = "", ) -> NotificationItem: - # Create notification item - item = NotificationItem( - manager=self, - no=len(self.notifications), - type=NotificationType(type), - priority=NotificationPriority(priority), - title=title, - message=message, - detail=detail, - timestamp=datetime.now(timezone.utc), - display_time=display_time, - group=group, - ) - - # Add to notifications - self.notifications.append(item) - self.updates.append(item.no) - - # Enforce limit - self._enforce_limit() - + with self._lock: + # Create notification item + item = NotificationItem( + manager=self, + no=len(self.notifications), + type=NotificationType(type), + priority=NotificationPriority(priority), + title=title, + message=message, + detail=detail, + timestamp=datetime.now(timezone.utc), + display_time=display_time, + group=group, + ) + + # Add to notifications + self.notifications.append(item) + self.updates.append(item.no) + + # Enforce limit + self._enforce_limit() + + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="notification.NotificationManager.add_notification") return item def _enforce_limit(self): - if len(self.notifications) > self.max_notifications: - # Remove oldest notifications - to_remove = len(self.notifications) - self.max_notifications - self.notifications = self.notifications[to_remove:] - # Adjust notification numbers - for i, notification in enumerate(self.notifications): - notification.no = i - # Adjust updates list - self.updates = [no - to_remove for no in self.updates if no >= to_remove] + with self._lock: + if len(self.notifications) > self.max_notifications: + # Remove oldest notifications + to_remove = len(self.notifications) - self.max_notifications + self.notifications = self.notifications[to_remove:] + # Adjust notification numbers + for i, notification in enumerate(self.notifications): + notification.no = i + # Adjust updates list + self.updates = [no - to_remove for no in self.updates if no >= to_remove] def get_recent_notifications(self, seconds: int = 30) -> list[NotificationItem]: cutoff = datetime.now(timezone.utc) - timedelta(seconds=seconds) - return [n for n in self.notifications if n.timestamp >= cutoff] + with self._lock: + return [n for n in self.notifications if n.timestamp >= cutoff] def output(self, start: int | None = None, end: int | None = None) -> list[dict]: - if start is None: - start = 0 - if end is None: - end = len(self.updates) + with self._lock: + if start is None: + start = 0 + if end is None: + end = len(self.updates) + updates = self.updates[start:end] + notifications = list(self.notifications) out = [] seen = set() - for update in self.updates[start:end]: - if update not in seen and update < len(self.notifications): - out.append(self.notifications[update].output()) + for update in updates: + if update not in seen and update < len(notifications): + out.append(notifications[update].output()) seen.add(update) - return out + def output_all(self) -> list[dict]: + with self._lock: + notifications = list(self.notifications) + return [n.output() for n in notifications] + + def mark_read_by_ids(self, notification_ids: list[str]) -> int: + ids = {nid for nid in notification_ids if isinstance(nid, str) and nid.strip()} + if not ids: + return 0 + + changed_nos: list[int] = [] + with self._lock: + for notification in self.notifications: + if notification.id in ids and not notification.read: + notification.read = True + changed_nos.append(notification.no) + if changed_nos: + self.updates.extend(changed_nos) + + if not changed_nos: + return 0 + + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="notification.NotificationManager.mark_read_by_ids") + return len(changed_nos) + + def update_item(self, no: int, **kwargs) -> None: + self._update_item(no, **kwargs) + def _update_item(self, no: int, **kwargs): - if no < len(self.notifications): - item = self.notifications[no] - for key, value in kwargs.items(): - if hasattr(item, key): - setattr(item, key, value) - self.updates.append(no) + changed = False + with self._lock: + if no < len(self.notifications): + item = self.notifications[no] + for key, value in kwargs.items(): + if hasattr(item, key): + setattr(item, key, value) + self.updates.append(no) + changed = True + + if not changed: + return + + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="notification.NotificationManager._update_item") def mark_all_read(self): - for notification in self.notifications: - notification.read = True + changed_nos: list[int] = [] + with self._lock: + for notification in self.notifications: + if not notification.read: + notification.read = True + changed_nos.append(notification.no) + if changed_nos: + self.updates.extend(changed_nos) + + if not changed_nos: + return + + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="notification.NotificationManager.mark_all_read") def clear_all(self): - self.notifications = [] - self.updates = [] - self.guid = str(uuid.uuid4()) + with self._lock: + self.notifications = [] + self.updates = [] + self.guid = str(uuid.uuid4()) + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="notification.NotificationManager.clear_all") def get_notifications_by_type(self, type: NotificationType) -> list[NotificationItem]: - return [n for n in self.notifications if n.type == type] \ No newline at end of file + with self._lock: + return [n for n in self.notifications if n.type == type] diff --git a/python/helpers/persist_chat.py b/python/helpers/persist_chat.py index 55867e6fe5..3e79030b95 100644 --- a/python/helpers/persist_chat.py +++ b/python/helpers/persist_chat.py @@ -44,7 +44,7 @@ def save_tmp_chat(context: AgentContext): def save_tmp_chats(): """Save all contexts to the chats folder""" - for _, context in AgentContext._contexts.items(): + for context in AgentContext.all(): # Skip BACKGROUND contexts as they should be ephemeral if context.type == AgentContextType.BACKGROUND: continue @@ -164,13 +164,17 @@ def _serialize_agent(agent: Agent): def _serialize_log(log: Log): + # Guard against concurrent log mutations while serializing. + with log._lock: + logs = [item.output() for item in log.logs[-LOG_SIZE:]] # serialize LogItem objects + guid = log.guid + progress = log.progress + progress_no = log.progress_no return { - "guid": log.guid, - "logs": [ - item.output() for item in log.logs[-LOG_SIZE:] - ], # serialize LogItem objects - "progress": log.progress, - "progress_no": log.progress_no, + "guid": guid, + "logs": logs, + "progress": progress, + "progress_no": progress_no, } @@ -271,6 +275,7 @@ def _deserialize_log(data: dict[str, Any]) -> "Log": content=item_data.get("content", ""), kvps=OrderedDict(item_data["kvps"]) if item_data["kvps"] else None, temp=item_data.get("temp", False), + id=item_data.get("id", None), ) ) log.updates.append(i) diff --git a/python/helpers/print_style.py b/python/helpers/print_style.py index 188697c866..25b7a408eb 100644 --- a/python/helpers/print_style.py +++ b/python/helpers/print_style.py @@ -1,8 +1,20 @@ import os, webcolors, html import sys from datetime import datetime +from collections.abc import Mapping from . import files +_runtime_module = None + + +def _get_runtime(): + global _runtime_module + if _runtime_module is None: + from . import runtime as runtime_module # Local import to avoid circular dependency + + _runtime_module = runtime_module + return _runtime_module + class PrintStyle: last_endline = True log_file_path = None @@ -90,9 +102,39 @@ def _close_html_log(): with open(PrintStyle.log_file_path, "a") as f: f.write("") + @staticmethod + def _format_args(args, sep): + if not args: + return "" + + head, *tail = args + + if isinstance(head, str) and tail and ("%" in head or "{" in head): + is_mapping = len(tail) == 1 and isinstance(tail[0], Mapping) + try: + return head % (tail[0] if is_mapping else tuple(tail)) + except (TypeError, ValueError, KeyError): + try: + return head.format(**tail[0]) if is_mapping else head.format(*tail) + except (KeyError, IndexError, ValueError): + pass + + return sep.join(str(item) for item in args) + + @staticmethod + def _prefixed_args(prefix: str, args: tuple) -> tuple: + if not args: + return (f"{prefix}:",) + + first, *rest = args + if isinstance(first, str): + return (f"{prefix}: {first}", *rest) + + return (f"{prefix}:", *args) + def get(self, *args, sep=' ', **kwargs): - text = sep.join(map(str, args)) - + text = self._format_args(args, sep) + # Automatically mask secrets in all print output try: if not hasattr(self, "secrets_mgr"): @@ -102,25 +144,29 @@ def get(self, *args, sep=' ', **kwargs): except Exception: # If masking fails, proceed without masking to avoid breaking functionality pass - + return text, self._get_styled_text(text), self._get_html_styled_text(text) - def print(self, *args, sep=' ', **kwargs): + def print(self, *args, sep=' ', end='\n', flush=True): self._add_padding_if_needed() if not PrintStyle.last_endline: - print() + if not self.log_only: + print() self._log_html("
") - plain_text, styled_text, html_text = self.get(*args, sep=sep, **kwargs) + plain_text, styled_text, html_text = self.get(*args, sep=sep) if not self.log_only: - print(styled_text, end='\n', flush=True) - self._log_html(html_text+"
\n") - PrintStyle.last_endline = True - - def stream(self, *args, sep=' ', **kwargs): + print(styled_text, end=end, flush=flush) + if end.endswith('\n'): + self._log_html(html_text + "
\n") + else: + self._log_html(html_text) + PrintStyle.last_endline = end.endswith('\n') + + def stream(self, *args, sep=' ', flush=True): self._add_padding_if_needed() - plain_text, styled_text, html_text = self.get(*args, sep=sep, **kwargs) + plain_text, styled_text, html_text = self.get(*args, sep=sep) if not self.log_only: - print(styled_text, end='', flush=True) + print(styled_text, end='', flush=flush) self._log_html(html_text) PrintStyle.last_endline = False @@ -129,32 +175,46 @@ def is_last_line_empty(self): return bool(lines) and not lines[-1].strip() @staticmethod - def standard(text: str): - PrintStyle().print(text) + def standard(*args, sep=' ', end='\n', flush=True): + PrintStyle().print(*args, sep=sep, end=end, flush=flush) @staticmethod - def hint(text: str): - PrintStyle(font_color="#6C3483", padding=True).print("Hint: "+text) + def hint(*args, sep=' ', end='\n', flush=True): + prefixed = PrintStyle._prefixed_args("Hint", args) + PrintStyle(font_color="#6C3483", padding=True).print(*prefixed, sep=sep, end=end, flush=flush) @staticmethod - def info(text: str): - PrintStyle(font_color="#0000FF", padding=True).print("Info: "+text) + def info(*args, sep=' ', end='\n', flush=True): + prefixed = PrintStyle._prefixed_args("Info", args) + PrintStyle(font_color="#0000FF", padding=True).print(*prefixed, sep=sep, end=end, flush=flush) @staticmethod - def success(text: str): - PrintStyle(font_color="#008000", padding=True).print("Success: "+text) + def success(*args, sep=' ', end='\n', flush=True): + prefixed = PrintStyle._prefixed_args("Success", args) + PrintStyle(font_color="#008000", padding=True).print(*prefixed, sep=sep, end=end, flush=flush) @staticmethod - def warning(text: str): - PrintStyle(font_color="#FFA500", padding=True).print("Warning: "+text) + def warning(*args, sep=' ', end='\n', flush=True): + prefixed = PrintStyle._prefixed_args("Warning", args) + PrintStyle(font_color="#FFA500", padding=True).print(*prefixed, sep=sep, end=end, flush=flush) @staticmethod - def debug(text: str): - PrintStyle(font_color="#808080", padding=True).print("Debug: "+text) + def debug(*args, sep=' ', end='\n', flush=True): + # Only emit debug output when running in development mode + try: + runtime_module = _get_runtime() + if not runtime_module.is_development(): + return + except Exception: + # If runtime detection fails, default to emitting to avoid hiding logs during development setup + pass + prefixed = PrintStyle._prefixed_args("Debug", args) + PrintStyle(font_color="#808080", padding=True).print(*prefixed, sep=sep, end=end, flush=flush) @staticmethod - def error(text: str): - PrintStyle(font_color="red", padding=True).print("Error: "+text) + def error(*args, sep=' ', end='\n', flush=True): + prefixed = PrintStyle._prefixed_args("Error", args) + PrintStyle(font_color="red", padding=True).print(*prefixed, sep=sep, end=end, flush=flush) # Ensure HTML file is closed properly when the program exits import atexit diff --git a/python/helpers/process.py b/python/helpers/process.py index 59c994d321..fe535f751f 100644 --- a/python/helpers/process.py +++ b/python/helpers/process.py @@ -33,4 +33,4 @@ def restart_process(): def exit_process(): PrintStyle.standard("Exiting process...") - sys.exit(0) \ No newline at end of file + sys.exit(0) diff --git a/python/helpers/projects.py b/python/helpers/projects.py index 6e25738c6e..73deeba469 100644 --- a/python/helpers/projects.py +++ b/python/helpers/projects.py @@ -27,7 +27,7 @@ class FileStructureInjectionSettings(TypedDict): class SubAgentSettings(TypedDict): enabled: bool - + class BasicProjectData(TypedDict): title: str description: str @@ -229,7 +229,7 @@ def _get_projects_list(parent_dir): return projects -def activate_project(context_id: str, name: str): +def activate_project(context_id: str, name: str, *, mark_dirty: bool = True): from agent import AgentContext data = load_edit_project_data(name) @@ -247,8 +247,12 @@ def activate_project(context_id: str, name: str): # persist persist_chat.save_tmp_chat(context) + if mark_dirty: + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="projects.activate_project") + -def deactivate_project(context_id: str): +def deactivate_project(context_id: str, *, mark_dirty: bool = True): from agent import AgentContext context = AgentContext.get(context_id) @@ -260,24 +264,34 @@ def deactivate_project(context_id: str): # persist persist_chat.save_tmp_chat(context) + if mark_dirty: + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="projects.deactivate_project") + def reactivate_project_in_chats(name: str): from agent import AgentContext for context in AgentContext.all(): if context.get_data(CONTEXT_DATA_KEY_PROJECT) == name: - activate_project(context.id, name) + activate_project(context.id, name, mark_dirty=False) persist_chat.save_tmp_chat(context) + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="projects.reactivate_project_in_chats") + def deactivate_project_in_chats(name: str): from agent import AgentContext for context in AgentContext.all(): if context.get_data(CONTEXT_DATA_KEY_PROJECT) == name: - deactivate_project(context.id) + deactivate_project(context.id, mark_dirty=False) persist_chat.save_tmp_chat(context) + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="projects.deactivate_project_in_chats") + def build_system_prompt_vars(name: str): project_data = load_basic_project_data(name) @@ -409,7 +423,7 @@ def get_file_structure(name: str, basic_data: BasicProjectData|None=None) -> str project_folder = get_project_folder(name) if basic_data is None: basic_data = load_basic_project_data(name) - + tree = str(file_tree.file_tree( project_folder, max_depth=basic_data["file_structure"]["max_depth"], @@ -425,5 +439,3 @@ def get_file_structure(name: str, basic_data: BasicProjectData|None=None) -> str tree += "\n # Empty" return tree - - \ No newline at end of file diff --git a/python/helpers/settings.py b/python/helpers/settings.py index f09124bd70..edd18c87e2 100644 --- a/python/helpers/settings.py +++ b/python/helpers/settings.py @@ -123,6 +123,8 @@ class Settings(TypedDict): rfc_port_ssh: int shell_interface: Literal['local','ssh'] + websocket_server_restart_enabled: bool + uvicorn_access_logs_enabled: bool stt_model_size: str stt_language: str @@ -949,6 +951,61 @@ def convert_out(settings: Settings) -> SettingsOutput: } ) + testing_section: SettingsSection | None = None + if runtime.is_development(): + testing_fields: list[SettingsField] = [ + { + "id": "websocket_tester", + "title": "WebSocket Test Harness", + "description": "Open the developer harness to run automated and manual WebSocket validation suites.", + "type": "button", + "value": "Open Harness", + } + ] + testing_fields.append( + { + "id": "websocket_event_console", + "title": "WebSocket Event Console", + "description": "Inspect inbound/outbound envelopes and lifecycle events in real time (development only).", + "type": "button", + "value": "Open Console", + } + ) + + testing_section = { + "id": "dev_testing", + "title": "Testing", + "description": "Utilities for validating WebSocket infrastructure in development environments.", + "fields": testing_fields, + "tab": "developer", + } + + dev_fields.append( + { + "id": "websocket_server_restart_enabled", + "title": "Broadcast server restart event", + "description": "Emit a fire-and-forget `server_restart` broadcast to clients after the server starts.", + "type": "switch", + "value": settings.get( + "websocket_server_restart_enabled", + default_settings["websocket_server_restart_enabled"], + ), + } + ) + + dev_fields.append( + { + "id": "uvicorn_access_logs_enabled", + "title": "Enable uvicorn access logs", + "description": "Temporarily enable uvicorn access logs for debugging WebSocket transport issues (default off).", + "type": "switch", + "value": settings.get( + "uvicorn_access_logs_enabled", + default_settings["uvicorn_access_logs_enabled"], + ), + } + ) + dev_section: SettingsSection = { "id": "dev", "title": "Development", @@ -1309,30 +1366,32 @@ def convert_out(settings: Settings) -> SettingsOutput: "tab": "backup", } - # Add the section to the result - result: SettingsOutput = { - "sections": [ - agent_section, - chat_model_section, - util_model_section, - browser_model_section, - embed_model_section, - memory_section, - speech_section, - api_keys_section, - litellm_section, - secrets_section, - auth_section, - mcp_client_section, - mcp_server_section, - a2a_section, - external_api_section, - update_checker_section, - backup_section, - dev_section, - # code_exec_section, - ] - } + sections: list[SettingsSection] = [ + agent_section, + chat_model_section, + util_model_section, + browser_model_section, + embed_model_section, + memory_section, + speech_section, + api_keys_section, + litellm_section, + secrets_section, + auth_section, + mcp_client_section, + mcp_server_section, + a2a_section, + external_api_section, + update_checker_section, + backup_section, + dev_section, + # code_exec_section, + ] + + if testing_section: + sections.append(testing_section) + + result: SettingsOutput = {"sections": sections} return result @@ -1444,6 +1503,7 @@ def _read_settings_file() -> Settings | None: content = files.read_file(SETTINGS_FILE) parsed = json.loads(content) return normalize_settings(parsed) + return None def _write_settings_file(settings: Settings): @@ -1552,6 +1612,8 @@ def get_default_settings() -> Settings: rfc_port_http=get_default_value("rfc_port_http", 55080), rfc_port_ssh=get_default_value("rfc_port_ssh", 55022), shell_interface=get_default_value("shell_interface", "local" if runtime.is_dockerized() else "ssh"), + websocket_server_restart_enabled=get_default_value("websocket_server_restart_enabled", True), + uvicorn_access_logs_enabled=get_default_value("uvicorn_access_logs_enabled", False), stt_model_size=get_default_value("stt_model_size", "base"), stt_language=get_default_value("stt_language", "en"), stt_silence_threshold=get_default_value("stt_silence_threshold", 0.3), @@ -1578,7 +1640,7 @@ def _apply_settings(previous: Settings | None): from initialize import initialize_agent config = initialize_agent() - for ctx in AgentContext._contexts.values(): + for ctx in AgentContext.all(): ctx.config = config # reinitialize context config with new settings # apply config to agents agent = ctx.agent0 @@ -1684,14 +1746,14 @@ def _env_to_dict(data: str): line = line.strip() if not line or line.startswith('#'): continue - + if '=' not in line: continue - + key, value = line.split('=', 1) key = key.strip() value = value.strip() - + # If quoted, treat as string if value.startswith('"') and value.endswith('"'): result[key] = value[1:-1].replace('\\"', '"') # Unescape quotes @@ -1703,7 +1765,7 @@ def _env_to_dict(data: str): result[key] = json.loads(value) except (json.JSONDecodeError, ValueError): result[key] = value - + return result @@ -1720,7 +1782,7 @@ def _dict_to_env(data_dict): else: # Numbers and other types as unquoted strings lines.append(f'{key}={value}') - + return "\n".join(lines) diff --git a/python/helpers/state_monitor.py b/python/helpers/state_monitor.py new file mode 100644 index 0000000000..f1c9ddaba4 --- /dev/null +++ b/python/helpers/state_monitor.py @@ -0,0 +1,380 @@ +from __future__ import annotations + +import asyncio +import threading +import time +from dataclasses import dataclass, field +from typing import Any, TYPE_CHECKING + +from python.helpers import runtime +from python.helpers.print_style import PrintStyle +from python.helpers.state_snapshot import ( + StateRequestV1, + advance_state_request_after_snapshot, + build_snapshot_from_request, +) +from python.helpers.websocket import ConnectionNotFoundError + +if TYPE_CHECKING: # pragma: no cover - hints only + from python.helpers.websocket_manager import WebSocketManager + + +ConnectionIdentity = tuple[str, str] # (namespace, sid) + + +@dataclass +class ConnectionProjection: + namespace: str + sid: str + request: StateRequestV1 | None = None + seq: int = 0 + seq_base: int = 0 + # Incremented on every dirty signal. Used to coalesce bursts without delaying + # pushes indefinitely during continuous activity (throttled coalescing). + dirty_version: int = 0 + pushed_version: int = 0 + # Development-only diagnostics - last known cause of the most recent dirty wave. + dirty_reason: str | None = None + dirty_wave_id: str | None = None + created_at: float = field(default_factory=time.time) + + +class StateMonitor: + """Per-sid dirty tracking with debounced snapshot push scheduling.""" + + def __init__(self, debounce_seconds: float = 0.025) -> None: + self.debounce_seconds = float(debounce_seconds) + self._lock = threading.RLock() + self._projections: dict[ConnectionIdentity, ConnectionProjection] = {} + self._debounce_handles: dict[ConnectionIdentity, asyncio.TimerHandle] = {} + self._push_tasks: dict[ConnectionIdentity, asyncio.Task[None]] = {} + self._manager: WebSocketManager | None = None + self._emit_handler_id: str | None = None + self._dispatcher_loop: asyncio.AbstractEventLoop | None = None + self._dirty_wave_seq: int = 0 + + def bind_manager(self, manager: "WebSocketManager", *, handler_id: str | None = None) -> None: + with self._lock: + self._manager = manager + if handler_id: + self._emit_handler_id = handler_id + # Use the manager's dispatcher loop for all scheduling so mark_dirty can be + # invoked safely from non-async contexts and other threads. + self._dispatcher_loop = getattr(manager, "_dispatcher_loop", None) + if runtime.is_development(): + PrintStyle.debug( + f"[StateMonitor] bind_manager handler_id={handler_id or self._emit_handler_id}" + ) + + def register_sid(self, namespace: str, sid: str) -> None: + identity: ConnectionIdentity = (namespace, sid) + with self._lock: + self._projections.setdefault( + identity, ConnectionProjection(namespace=namespace, sid=sid) + ) + if runtime.is_development(): + PrintStyle.debug(f"[StateMonitor] register_sid namespace={namespace} sid={sid}") + + def unregister_sid(self, namespace: str, sid: str) -> None: + identity: ConnectionIdentity = (namespace, sid) + with self._lock: + handle = self._debounce_handles.pop(identity, None) + if handle is not None: + handle.cancel() + task = self._push_tasks.pop(identity, None) + if task is not None: + task.cancel() + self._projections.pop(identity, None) + if runtime.is_development(): + PrintStyle.debug( + f"[StateMonitor] unregister_sid namespace={namespace} sid={sid}" + ) + + def mark_dirty_all(self, *, reason: str | None = None) -> None: + wave_id = None + if runtime.is_development(): + with self._lock: + self._dirty_wave_seq += 1 + wave_id = f"all_{self._dirty_wave_seq}" + with self._lock: + identities = list(self._projections.keys()) + for namespace, sid in identities: + self.mark_dirty(namespace, sid, reason=reason, wave_id=wave_id) + + def mark_dirty_for_context(self, context_id: str, *, reason: str | None = None) -> None: + if not isinstance(context_id, str) or not context_id.strip(): + return + target = context_id.strip() + wave_id = None + if runtime.is_development(): + with self._lock: + self._dirty_wave_seq += 1 + wave_id = f"ctx_{self._dirty_wave_seq}" + with self._lock: + identities = [ + identity + for identity, projection in self._projections.items() + if projection.request is not None and projection.request.context == target + ] + for namespace, sid in identities: + self.mark_dirty(namespace, sid, reason=reason, wave_id=wave_id) + + def update_projection( + self, + namespace: str, + sid: str, + *, + request: StateRequestV1, + seq_base: int, + ) -> None: + identity: ConnectionIdentity = (namespace, sid) + with self._lock: + projection = self._projections.setdefault( + identity, ConnectionProjection(namespace=namespace, sid=sid) + ) + projection.request = request + projection.seq_base = seq_base + projection.seq = seq_base + if runtime.is_development(): + PrintStyle.debug( + f"[StateMonitor] update_projection namespace={namespace} sid={sid} context={request.context!r} " + f"log_from={request.log_from} notifications_from={request.notifications_from} " + f"timezone={request.timezone!r} seq_base={seq_base}" + ) + + def mark_dirty( + self, + namespace: str, + sid: str, + *, + reason: str | None = None, + wave_id: str | None = None, + ) -> None: + identity: ConnectionIdentity = (namespace, sid) + loop = self._dispatcher_loop + if loop is None or loop.is_closed(): + try: + loop = asyncio.get_running_loop() + except RuntimeError: + return + + try: + running_loop = asyncio.get_running_loop() + except RuntimeError: + running_loop = None + + if running_loop is loop: + self._mark_dirty_on_loop(identity, reason=reason, wave_id=wave_id) + return + + loop.call_soon_threadsafe(self._mark_dirty_on_loop, identity, reason, wave_id) + + def _mark_dirty_on_loop( + self, + identity: ConnectionIdentity, + reason: str | None = None, + wave_id: str | None = None, + ) -> None: + with self._lock: + projection = self._projections.get(identity) + if projection is None: + return + projection.dirty_version += 1 + if runtime.is_development(): + projection.dirty_reason = ( + reason.strip() + if isinstance(reason, str) and reason.strip() + else "unknown" + ) + projection.dirty_wave_id = wave_id + self._schedule_debounce_on_loop(identity) + + def _schedule_debounce_on_loop(self, identity: ConnectionIdentity) -> None: + loop = asyncio.get_running_loop() + with self._lock: + projection = self._projections.get(identity) + if projection is None: + return + # INVARIANT.STATE.GATING: do not schedule pushes until a successful state_request + # established seq_base for this sid. + if projection.seq_base <= 0: + return + + # Throttled coalescing: schedule at most one push per debounce window. + # Do not postpone the scheduled push on subsequent dirties; this keeps + # streaming updates smooth while still capping to <= 1 push / 100ms / sid. + existing = self._debounce_handles.get(identity) + if existing is not None and not existing.cancelled(): + return + + running = self._push_tasks.get(identity) + if running is not None and not running.done(): + return + + handle = loop.call_later( + self.debounce_seconds, self._on_debounce_fire, identity + ) + self._debounce_handles[identity] = handle + if runtime.is_development(): + PrintStyle.debug( + f"[StateMonitor] schedule_push namespace={projection.namespace} sid={projection.sid} " + f"delay_s={self.debounce_seconds} " + f"dirty={projection.dirty_version} pushed={projection.pushed_version} " + f"reason={projection.dirty_reason!r} wave={projection.dirty_wave_id!r}" + ) + + def _on_debounce_fire(self, identity: ConnectionIdentity) -> None: + with self._lock: + self._debounce_handles.pop(identity, None) + existing = self._push_tasks.get(identity) + if existing is not None and not existing.done(): + return + task = asyncio.create_task(self._flush_push(identity)) + self._push_tasks[identity] = task + + async def _flush_push(self, identity: ConnectionIdentity) -> None: + namespace, sid = identity + task = asyncio.current_task() + base_version = 0 + dirty_reason: str | None = None + dirty_wave_id: str | None = None + try: + with self._lock: + projection = self._projections.get(identity) + manager = self._manager + handler_id = self._emit_handler_id + + if projection is None: + return + if manager is None: + # The handler binds the manager on connect; if not bound yet, + # we cannot emit. Keep dirty cleared to avoid infinite retry loops. + return + if projection.seq_base <= 0: + # INVARIANT.STATE.GATING: no push before a successful state_request. + return + + request = projection.request + if request is None: + return + base_version = projection.dirty_version + dirty_reason = projection.dirty_reason + dirty_wave_id = projection.dirty_wave_id + + snapshot = await build_snapshot_from_request(request=request) + + with self._lock: + projection = self._projections.get(identity) + if projection is None: + return + if projection.request != request: + return + + # INVARIANT.STATE.SEQ_MONOTONIC + SEQ_RESET_ON_REQUEST + projection.seq += 1 + seq = projection.seq + + # Advance cursors after successful snapshot emission (incremental mode). + projection.request = advance_state_request_after_snapshot(request, snapshot) + + # Mark all dirties up to `base_version` as pushed. If new dirties + # arrived while building/emitting, a follow-up push will be scheduled. + projection.pushed_version = max(projection.pushed_version, base_version) + + payload = { + "runtime_epoch": runtime.get_runtime_id(), + "seq": seq, + "snapshot": snapshot, + } + + try: + if runtime.is_development(): + logs_len = ( + len(snapshot.get("logs", [])) + if isinstance(snapshot.get("logs"), list) + else None + ) + PrintStyle.debug( + f"[StateMonitor] emit state_push namespace={namespace} sid={sid} seq={seq} " + f"context={request.context!r} logs_len={logs_len} " + f"reason={dirty_reason!r} wave={dirty_wave_id!r}" + ) + await manager.emit_to( + namespace, + sid, + "state_push", + payload, + handler_id=handler_id, + ) + except ConnectionNotFoundError: + # Sid was removed before the emit; treat as benign. + if runtime.is_development(): + PrintStyle.debug( + f"[StateMonitor] emit skipped: sid not found namespace={namespace} sid={sid}" + ) + return + except RuntimeError: + # Dispatcher loop may be closing (e.g., during shutdown or test teardown). + if runtime.is_development(): + PrintStyle.debug( + f"[StateMonitor] emit skipped: dispatcher closing namespace={namespace} sid={sid}" + ) + return + finally: + follow_up = False + dirty_version = 0 + pushed_version = 0 + with self._lock: + if task is not None and self._push_tasks.get(identity) is task: + self._push_tasks.pop(identity, None) + projection = self._projections.get(identity) + if projection is not None: + dirty_version = projection.dirty_version + pushed_version = projection.pushed_version + follow_up = dirty_version > pushed_version + + # More dirties accumulated during push; schedule another coalesced push. + # IMPORTANT: this must not run from inside the `finally` block (a `return` in + # `finally` can swallow exceptions from the push task). + if not follow_up: + return + + if runtime.is_development(): + PrintStyle.debug( + f"[StateMonitor] follow_up_push namespace={namespace} sid={sid} dirty={dirty_version} pushed={pushed_version}" + ) + try: + loop = self._dispatcher_loop or asyncio.get_running_loop() + except RuntimeError: + return + if loop.is_closed(): + return + loop.call_soon_threadsafe(self._schedule_debounce_on_loop, identity) + + # Testing hook: keep argument surface stable for future extensions + def _debug_state(self) -> dict[str, Any]: # pragma: no cover - helper + with self._lock: + return { + "identities": list(self._projections.keys()), + "handles": list(self._debounce_handles.keys()), + } + + +# Store singleton in a mutable container to avoid `global` assignment warnings while +# keeping a simple module-level accessor API. +_STATE_MONITOR_HOLDER: dict[str, StateMonitor | None] = {"monitor": None} +_STATE_MONITOR_LOCK = threading.RLock() + + +def get_state_monitor() -> StateMonitor: + with _STATE_MONITOR_LOCK: + monitor = _STATE_MONITOR_HOLDER.get("monitor") + if monitor is None: + monitor = StateMonitor() + _STATE_MONITOR_HOLDER["monitor"] = monitor + return monitor + + +def _reset_state_monitor_for_testing() -> None: # pragma: no cover - helper + with _STATE_MONITOR_LOCK: + _STATE_MONITOR_HOLDER["monitor"] = None diff --git a/python/helpers/state_monitor_integration.py b/python/helpers/state_monitor_integration.py new file mode 100644 index 0000000000..4a00b42f1f --- /dev/null +++ b/python/helpers/state_monitor_integration.py @@ -0,0 +1,13 @@ +from __future__ import annotations + + +def mark_dirty_all(*, reason: str | None = None) -> None: + from python.helpers.state_monitor import get_state_monitor + + get_state_monitor().mark_dirty_all(reason=reason) + + +def mark_dirty_for_context(context_id: str, *, reason: str | None = None) -> None: + from python.helpers.state_monitor import get_state_monitor + + get_state_monitor().mark_dirty_for_context(context_id, reason=reason) diff --git a/python/helpers/state_snapshot.py b/python/helpers/state_snapshot.py new file mode 100644 index 0000000000..cbaa9fa5a2 --- /dev/null +++ b/python/helpers/state_snapshot.py @@ -0,0 +1,320 @@ +from __future__ import annotations + +import types +from typing import Any, Mapping, TypedDict, Union, get_args, get_origin, get_type_hints + +from dataclasses import dataclass + +import pytz # type: ignore[import-untyped] + +from agent import AgentContext, AgentContextType + +from python.helpers.dotenv import get_dotenv_value +from python.helpers.localization import Localization +from python.helpers.task_scheduler import TaskScheduler + + +class SnapshotV1(TypedDict): + deselect_chat: bool + context: str + contexts: list[dict[str, Any]] + tasks: list[dict[str, Any]] + logs: list[dict[str, Any]] + log_guid: str + log_version: int + # Historical behavior: when no context is selected, log_progress is 0 (falsy). + # When a context is active, it is usually a string. + log_progress: str | int + log_progress_active: bool + paused: bool + notifications: list[dict[str, Any]] + notifications_guid: str + notifications_version: int + + +@dataclass(frozen=True) +class StateRequestV1: + context: str | None + log_from: int + notifications_from: int + timezone: str + + +class StateRequestValidationError(ValueError): + def __init__( + self, + *, + reason: str, + message: str, + details: dict[str, Any] | None = None, + ) -> None: + super().__init__(message) + self.reason = reason + self.details = details or {} + + +def _annotation_to_isinstance_types(annotation: Any) -> tuple[type, ...]: + """Convert type annotation to tuple suitable for isinstance().""" + origin = get_origin(annotation) + + # Handle Union (typing.Union or types.UnionType from X | Y) + _union_type = getattr(types, "UnionType", None) + if origin is Union or origin is _union_type: + result: list[type] = [] + for arg in get_args(annotation): + result.extend(_annotation_to_isinstance_types(arg)) + return tuple(result) + + # Generic aliases: list[X] -> list, dict[K,V] -> dict + if origin is not None: + return (origin,) + + if isinstance(annotation, type): + return (annotation,) + + return () + + +def _build_schema_from_typeddict(td: type) -> dict[str, tuple[type, ...]]: + """Extract field names and isinstance-compatible types from TypedDict.""" + return {k: _annotation_to_isinstance_types(v) for k, v in get_type_hints(td).items()} + + +_SNAPSHOT_V1_SCHEMA = _build_schema_from_typeddict(SnapshotV1) +SNAPSHOT_SCHEMA_V1_KEYS: tuple[str, ...] = tuple(_SNAPSHOT_V1_SCHEMA.keys()) + + +def validate_snapshot_schema_v1(snapshot: Mapping[str, Any]) -> None: + if not isinstance(snapshot, dict): + raise TypeError("snapshot must be a dict") + expected = set(SNAPSHOT_SCHEMA_V1_KEYS) + actual = set(snapshot.keys()) + missing = sorted(expected - actual) + extra = sorted(actual - expected) + if missing or extra: + message = "snapshot schema mismatch" + if missing: + message += f"; missing={missing}" + if extra: + message += f"; unexpected={extra}" + raise ValueError(message) + + for key, expected_types in _SNAPSHOT_V1_SCHEMA.items(): + if expected_types and not isinstance(snapshot.get(key), expected_types): + type_desc = " | ".join(t.__name__ for t in expected_types) + raise TypeError(f"snapshot.{key} must be {type_desc}") + + +def _coerce_non_negative_int(value: Any, default: int = 0) -> int: + try: + as_int = int(value) + except (TypeError, ValueError): + return default + return as_int if as_int >= 0 else default + + +def parse_state_request_payload(payload: Mapping[str, Any]) -> StateRequestV1: + context = payload.get("context") + log_from = payload.get("log_from") + notifications_from = payload.get("notifications_from") + timezone = payload.get("timezone") + + if context is not None and not isinstance(context, str): + raise StateRequestValidationError( + reason="context_type", + message="context must be a string or null", + details={"context_type": type(context).__name__}, + ) + if not isinstance(log_from, int) or log_from < 0: + raise StateRequestValidationError( + reason="log_from", + message="log_from must be an integer >= 0", + details={"log_from": log_from}, + ) + if not isinstance(notifications_from, int) or notifications_from < 0: + raise StateRequestValidationError( + reason="notifications_from", + message="notifications_from must be an integer >= 0", + details={"notifications_from": notifications_from}, + ) + if not isinstance(timezone, str) or not timezone.strip(): + raise StateRequestValidationError( + reason="timezone_empty", + message="timezone must be a non-empty string", + details={"timezone": timezone}, + ) + + tz = timezone.strip() + try: + pytz.timezone(tz) + except pytz.exceptions.UnknownTimeZoneError as exc: + raise StateRequestValidationError( + reason="timezone_invalid", + message="timezone must be a valid IANA timezone name", + details={"timezone": tz}, + ) from exc + + ctxid: str | None = context.strip() if isinstance(context, str) else None + if ctxid == "": + ctxid = None + return StateRequestV1( + context=ctxid, + log_from=log_from, + notifications_from=notifications_from, + timezone=tz, + ) + + +def _coerce_state_request_inputs( + *, + context: Any, + log_from: Any, + notifications_from: Any, + timezone: Any, +) -> StateRequestV1: + tz = timezone if isinstance(timezone, str) and timezone else None + tz = tz or get_dotenv_value("DEFAULT_USER_TIMEZONE", "UTC") + + ctxid: str | None = context.strip() if isinstance(context, str) else None + if ctxid == "": + ctxid = None + + return StateRequestV1( + context=ctxid, + log_from=_coerce_non_negative_int(log_from, default=0), + notifications_from=_coerce_non_negative_int(notifications_from, default=0), + timezone=tz, + ) + + +def advance_state_request_after_snapshot( + request: StateRequestV1, + snapshot: Mapping[str, Any], +) -> StateRequestV1: + log_from = request.log_from + notifications_from = request.notifications_from + + try: + log_from = int(snapshot.get("log_version", log_from)) + except (TypeError, ValueError): + pass + + try: + notifications_from = int(snapshot.get("notifications_version", notifications_from)) + except (TypeError, ValueError): + pass + + return StateRequestV1( + context=request.context, + log_from=log_from, + notifications_from=notifications_from, + timezone=request.timezone, + ) + + +async def build_snapshot_from_request(*, request: StateRequestV1) -> SnapshotV1: + """Build a poll-shaped snapshot for both /poll and state_push.""" + + Localization.get().set_timezone(request.timezone) + + ctxid = request.context if isinstance(request.context, str) else "" + ctxid = ctxid.strip() + + from_no = _coerce_non_negative_int(request.log_from, default=0) + notifications_from_no = _coerce_non_negative_int(request.notifications_from, default=0) + + active_context = AgentContext.get(ctxid) if ctxid else None + + logs = active_context.log.output(start=from_no) if active_context else [] + + notification_manager = AgentContext.get_notification_manager() + notifications = notification_manager.output(start=notifications_from_no) + + scheduler = TaskScheduler.get() + + ctxs: list[dict[str, Any]] = [] + tasks: list[dict[str, Any]] = [] + processed_contexts: set[str] = set() + + all_ctxs = AgentContext.all() + for ctx in all_ctxs: + if ctx.id in processed_contexts: + continue + + if ctx.type == AgentContextType.BACKGROUND: + processed_contexts.add(ctx.id) + continue + + context_data = ctx.output() + + context_task = scheduler.get_task_by_uuid(ctx.id) + is_task_context = context_task is not None and context_task.context_id == ctx.id + + if not is_task_context: + ctxs.append(context_data) + else: + task_details = scheduler.serialize_task(ctx.id) + if task_details: + context_data.update( + { + "task_name": task_details.get("name"), + "uuid": task_details.get("uuid"), + "state": task_details.get("state"), + "type": task_details.get("type"), + "system_prompt": task_details.get("system_prompt"), + "prompt": task_details.get("prompt"), + "last_run": task_details.get("last_run"), + "last_result": task_details.get("last_result"), + "attachments": task_details.get("attachments", []), + "context_id": task_details.get("context_id"), + } + ) + + if task_details.get("type") == "scheduled": + context_data["schedule"] = task_details.get("schedule") + elif task_details.get("type") == "planned": + context_data["plan"] = task_details.get("plan") + else: + context_data["token"] = task_details.get("token") + + tasks.append(context_data) + + processed_contexts.add(ctx.id) + + ctxs.sort(key=lambda x: x["created_at"], reverse=True) + tasks.sort(key=lambda x: x["created_at"], reverse=True) + + snapshot: SnapshotV1 = { + "deselect_chat": bool(ctxid) and active_context is None, + "context": active_context.id if active_context else "", + "contexts": ctxs, + "tasks": tasks, + "logs": logs, + "log_guid": active_context.log.guid if active_context else "", + "log_version": len(active_context.log.updates) if active_context else 0, + "log_progress": active_context.log.progress if active_context else 0, + "log_progress_active": bool(active_context.log.progress_active) if active_context else False, + "paused": active_context.paused if active_context else False, + "notifications": notifications, + "notifications_guid": notification_manager.guid, + "notifications_version": len(notification_manager.updates), + } + + validate_snapshot_schema_v1(snapshot) + return snapshot + + +async def build_snapshot( + *, + context: str | None, + log_from: int, + notifications_from: int, + timezone: str | None, +) -> SnapshotV1: + request = _coerce_state_request_inputs( + context=context, + log_from=log_from, + notifications_from=notifications_from, + timezone=timezone, + ) + return await build_snapshot_from_request(request=request) diff --git a/python/helpers/task_scheduler.py b/python/helpers/task_scheduler.py index 5f9321754a..6ccba3a011 100644 --- a/python/helpers/task_scheduler.py +++ b/python/helpers/task_scheduler.py @@ -677,14 +677,20 @@ def get_tasks_by_context_id(self, context_id: str, only_running: bool = False) - async def add_task(self, task: Union[ScheduledTask, AdHocTask, PlannedTask]) -> "TaskScheduler": await self._tasks.add_task(task) ctx = await self._get_chat_context(task) # invoke context creation + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="task_scheduler.TaskScheduler.add_task") return self async def remove_task_by_uuid(self, task_uuid: str) -> "TaskScheduler": await self._tasks.remove_task_by_uuid(task_uuid) + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="task_scheduler.TaskScheduler.remove_task_by_uuid") return self async def remove_task_by_name(self, name: str) -> "TaskScheduler": await self._tasks.remove_task_by_name(name) + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="task_scheduler.TaskScheduler.remove_task_by_name") return self def get_task_by_uuid(self, task_uuid: str) -> Union[ScheduledTask, AdHocTask, PlannedTask] | None: @@ -754,7 +760,11 @@ async def update_task_checked( def _update_task(task): task.update(**update_params) - return await self._tasks.update_task_by_uuid(task_uuid, _update_task, verify_func) + updated = await self._tasks.update_task_by_uuid(task_uuid, _update_task, verify_func) + if updated is not None: + from python.helpers.state_monitor_integration import mark_dirty_all + mark_dirty_all(reason="task_scheduler.TaskScheduler.update_task_checked") + return updated async def update_task(self, task_uuid: str, **update_params) -> Union[ScheduledTask, AdHocTask, PlannedTask] | None: return await self.update_task_checked(task_uuid, lambda task: True, **update_params) diff --git a/python/helpers/websocket.py b/python/helpers/websocket.py new file mode 100644 index 0000000000..772d7259a3 --- /dev/null +++ b/python/helpers/websocket.py @@ -0,0 +1,568 @@ +from __future__ import annotations + +import re +import threading +from abc import ABC, abstractmethod +from urllib.parse import urlparse +from typing import Any, Iterable, Optional, TYPE_CHECKING + +import socketio + +if TYPE_CHECKING: # pragma: no cover - hints only + from python.helpers.websocket_manager import WebSocketManager + +_EVENT_NAME_PATTERN = re.compile(r"^[a-z][a-z0-9_]*$") +_RESERVED_EVENT_NAMES: set[str] = { + "connect", + "disconnect", + "error", + "ping", + "pong", + "connect_error", + "reconnect", + "reconnect_attempt", + "reconnect_error", + "reconnect_failed", +} + + +def _default_port_for_scheme(scheme: str) -> int | None: + if scheme == "http": + return 80 + if scheme == "https": + return 443 + return None + + +def normalize_origin(value: Any) -> str | None: + """Normalize an Origin/Referer header value to scheme://host[:port].""" + if not isinstance(value, str) or not value.strip(): + return None + parsed = urlparse(value.strip()) + if not parsed.scheme or not parsed.hostname: + return None + origin = f"{parsed.scheme}://{parsed.hostname}" + if parsed.port: + origin += f":{parsed.port}" + return origin + + +def _parse_host_header(value: Any) -> tuple[str | None, int | None]: + if not isinstance(value, str) or not value.strip(): + return None, None + parsed = urlparse(f"http://{value.strip()}") + return parsed.hostname, parsed.port + + +def validate_ws_origin(environ: dict[str, Any]) -> tuple[bool, str | None]: + """Validate the browser Origin during the Socket.IO handshake. + + This is the minimum baseline recommended by RFC 6455 (Origin considerations) + and OWASP (CSWSH mitigation): reject cross-origin WebSocket handshakes when + the server is intended for a specific web UI origin. + """ + + raw_origin = environ.get("HTTP_ORIGIN") or environ.get("HTTP_REFERER") + origin = normalize_origin(raw_origin) + if origin is None: + return False, "missing_origin" + + origin_parsed = urlparse(origin) + origin_host = origin_parsed.hostname.lower() if origin_parsed.hostname else None + origin_port = origin_parsed.port or _default_port_for_scheme(origin_parsed.scheme) + if origin_host is None or origin_port is None: + return False, "invalid_origin" + + # Build candidate request host/port pairs. Prefer explicit Host header, fall back to + # forwarded headers (reverse proxies) and finally SERVER_NAME. + raw_host = environ.get("HTTP_HOST") + req_host, req_port = _parse_host_header(raw_host) + if not req_host: + req_host = environ.get("SERVER_NAME") + + if req_port is None: + server_port_raw = environ.get("SERVER_PORT") + try: + server_port = int(server_port_raw) if server_port_raw is not None else None + except (TypeError, ValueError): + server_port = None + if server_port is not None and server_port > 0: + req_port = server_port + + if req_host: + req_host = req_host.lower() + if req_port is None: + req_port = origin_port + + forwarded_host_raw = environ.get("HTTP_X_FORWARDED_HOST") + forwarded_host = None + forwarded_port = None + if isinstance(forwarded_host_raw, str) and forwarded_host_raw.strip(): + first = forwarded_host_raw.split(",")[0].strip() + forwarded_host, forwarded_port = _parse_host_header(first) + if forwarded_host: + forwarded_host = forwarded_host.lower() + + forwarded_proto_raw = environ.get("HTTP_X_FORWARDED_PROTO") + forwarded_scheme = None + if isinstance(forwarded_proto_raw, str) and forwarded_proto_raw.strip(): + forwarded_scheme = forwarded_proto_raw.split(",")[0].strip().lower() + forwarded_scheme = forwarded_scheme or origin_parsed.scheme + forwarded_port = ( + forwarded_port + if forwarded_port is not None + else _default_port_for_scheme(forwarded_scheme) or origin_port + ) + + candidates: list[tuple[str, int]] = [] + if req_host: + candidates.append((req_host, int(req_port))) + if forwarded_host: + candidates.append((forwarded_host, int(forwarded_port))) + + if not candidates: + return False, "missing_host" + + for host, port in candidates: + if origin_host == host and origin_port == port: + return True, None + + # Preserve the original mismatch semantics for debugging. + if origin_host not in {host for host, _ in candidates}: + return False, "origin_host_mismatch" + return False, "origin_port_mismatch" + + +class SingletonInstantiationError(RuntimeError): + """Raised when a WebSocketHandler subclass is instantiated directly. + + Handlers must be retrieved via ``get_instance`` to guarantee singleton + semantics and consistent lifecycle behaviour. + """ + + +class ConnectionNotFoundError(RuntimeError): + """Raised when attempting to emit to a non-existent WebSocket connection.""" + + def __init__(self, sid: str, *, namespace: str | None = None) -> None: + self.sid = sid + self.namespace = namespace + if namespace: + super().__init__(f"Connection not found: namespace={namespace} sid={sid}") + else: + super().__init__(f"Connection not found: {sid}") + + +class WebSocketResult: + """Helper wrapper for standardized handler results. + + Instances are converted to the canonical ``RequestResultItem`` shape by + :class:`WebSocketManager`. Helper constructors enforce payload validation so + handlers no longer need to hand‑craft dictionaries. + """ + + __slots__ = ("_ok", "_data", "_error", "_correlation_id", "_duration_ms") + + def __init__( + self, + ok: bool, + data: dict[str, Any] | None = None, + error: dict[str, Any] | None = None, + correlation_id: str | None = None, + duration_ms: float | None = None, + ) -> None: + if ok and error: + raise ValueError("Cannot be both ok and have an error") + if not ok and not error: + raise ValueError("Must either be ok or have an error") + if data is not None and not isinstance(data, dict): + raise TypeError("Data payload must be a dictionary or None") + if error is not None and not isinstance(error, dict): + raise TypeError("Error payload must be a dictionary or None") + if correlation_id is not None and not isinstance(correlation_id, str): + raise TypeError("Correlation ID must be a string or None") + if duration_ms is not None and not isinstance(duration_ms, (int, float)): + raise TypeError("Duration must be a number or None") + + self._ok = bool(ok) + self._data = dict(data) if data is not None else None + self._error = dict(error) if error is not None else None + self._correlation_id = correlation_id + self._duration_ms = float(duration_ms) if duration_ms is not None else None + + @classmethod + def ok( + cls, + data: dict[str, Any] | None = None, + *, + correlation_id: str | None = None, + duration_ms: float | None = None, + ) -> "WebSocketResult": + if data is not None and not isinstance(data, dict): + raise TypeError("WebSocketResult.ok data must be a dict or None") + payload = dict(data) if data is not None else None + return cls( + ok=True, + data=payload, + correlation_id=correlation_id, + duration_ms=duration_ms, + ) + + @classmethod + def error( + cls, + *, + code: str, + message: str, + details: Any | None = None, + correlation_id: str | None = None, + duration_ms: float | None = None, + ) -> "WebSocketResult": + if not isinstance(code, str) or not code.strip(): + raise ValueError("Error code must be a non-empty string") + if not isinstance(message, str) or not message.strip(): + raise ValueError("Error message must be a non-empty string") + + error_payload: dict[str, Any] = {"code": code, "error": message} + if details is not None: + error_payload["details"] = details + return cls( + ok=False, + error=error_payload, + correlation_id=correlation_id, + duration_ms=duration_ms, + ) + + def as_result( + self, + *, + handler_id: str, + fallback_correlation_id: str | None, + duration_ms: float | None = None, + ) -> dict[str, Any]: + result: dict[str, Any] = { + "handlerId": handler_id, + "ok": self._ok, + } + + effective_duration = ( + self._duration_ms if self._duration_ms is not None else duration_ms + ) + if effective_duration is not None: + result["durationMs"] = round(effective_duration, 4) + + correlation = ( + self._correlation_id + if self._correlation_id is not None + else fallback_correlation_id + ) + if correlation is not None: + result["correlationId"] = correlation + + if self._ok: + result["data"] = dict(self._data) if self._data is not None else {} + else: + result["error"] = dict(self._error) if self._error is not None else { + "code": "INTERNAL_ERROR", + "error": "Internal server error", + } + return result + + +class WebSocketHandler(ABC): + """Base class for WebSocket event handlers. + + The interface mirrors :class:`python.helpers.api.ApiHandler` with declarative + security configuration and lifecycle hooks while enforcing event-naming + conventions. + """ + + _instances: dict[type["WebSocketHandler"], "WebSocketHandler"] = {} + _construction_tokens: dict[type["WebSocketHandler"], bool] = {} + _singleton_lock = threading.RLock() + + def __init__(self, socketio: socketio.AsyncServer, lock: threading.RLock) -> None: + """Create a handler bound to the shared Socket.IO instance.""" + + cls = self.__class__ + if not WebSocketHandler._construction_tokens.get(cls): + raise SingletonInstantiationError( + f"{cls.__name__} must be instantiated via {cls.__name__}.get_instance()" + ) + + self.socketio: socketio.AsyncServer = socketio + self.lock: threading.RLock = lock + self._manager: Optional[WebSocketManager] = None + self._namespace: str | None = None + + @classmethod + def get_instance( + cls, + socketio: socketio.AsyncServer | None = None, + lock: threading.RLock | None = None, + *args: Any, + **kwargs: Any, + ) -> "WebSocketHandler": + """Return the singleton instance for ``cls``. + + Args: + socketio: Shared AsyncServer instance (required on first call). + lock: Shared threading lock (required on first call). + *args: Optional subclass-specific constructor args. + **kwargs: Optional subclass-specific constructor kwargs. + """ + + if cls is WebSocketHandler: + raise TypeError("WebSocketHandler must be subclassed before use") + + with WebSocketHandler._singleton_lock: + instance = WebSocketHandler._instances.get(cls) + if instance is not None: + return instance + + if socketio is None or lock is None: + raise ValueError( + f"{cls.__name__}.get_instance() requires socketio and lock on first call" + ) + + WebSocketHandler._construction_tokens[cls] = True + try: + instance = cls(socketio, lock, *args, **kwargs) + finally: + WebSocketHandler._construction_tokens.pop(cls, None) + + WebSocketHandler._instances[cls] = instance + return instance + + @classmethod + def _reset_instance_for_testing(cls) -> None: + """Reset the cached singleton instance (testing helper).""" + + with WebSocketHandler._singleton_lock: + WebSocketHandler._instances.pop(cls, None) + WebSocketHandler._construction_tokens.pop(cls, None) + + @classmethod + @abstractmethod + def get_event_types(cls) -> list[str]: + """Return the list of event types this handler subscribes to.""" + + @classmethod + def validate_event_types(cls, event_types: Iterable[str]) -> list[str]: + """Validate event type declarations. + + Ensures that every event name follows ``lowercase_snake_case`` naming, + does not collide with Socket.IO reserved events, and that the handler + does not declare duplicates. + """ + + validated: list[str] = [] + seen: set[str] = set() + for event in event_types: + if not isinstance(event, str): + raise TypeError("Event type declarations must be strings") + if not _EVENT_NAME_PATTERN.fullmatch(event): + raise ValueError( + f"Invalid event type '{event}' – must match lowercase_snake_case" + ) + if event in _RESERVED_EVENT_NAMES: + raise ValueError( + f"Event type '{event}' is reserved by Socket.IO and cannot be used" + ) + if event in seen: + raise ValueError(f"Duplicate event type '{event}' declared in handler") + seen.add(event) + validated.append(event) + if not validated: + raise ValueError("Handlers must declare at least one event type") + return validated + + @classmethod + def requires_auth(cls) -> bool: + """Return whether an authenticated Flask session is required.""" + + return True + + @classmethod + def requires_csrf(cls) -> bool: + """Return whether CSRF validation is required for the handler. + + This mirrors ApiHandler.requires_csrf(): by default, authenticated + WebSocket handlers also require CSRF validation during the Socket.IO + connect step. + """ + + return cls.requires_auth() + + async def on_connect(self, sid: str) -> None: + """Lifecycle hook invoked when a client connects.""" + + return None + + async def on_disconnect(self, sid: str) -> None: + """Lifecycle hook invoked when a client disconnects.""" + + return None + + @abstractmethod + async def process_event( + self, + event_type: str, + data: dict[str, Any], + sid: str, + ) -> dict[str, Any] | WebSocketResult | None: + """Process an incoming event dispatched to the handler. + + Returning ``None`` indicates fire-and-forget semantics. Returning a + dictionary includes the payload in the Socket.IO acknowledgement. + """ + + def bind_manager(self, manager: WebSocketManager, *, namespace: str) -> None: + """Associate this handler instance with the shared WebSocket manager.""" + + self._manager = manager + self._namespace = namespace + + @property + def namespace(self) -> str: + if not self._namespace: + raise RuntimeError("WebSocketHandler is missing namespace binding") + return self._namespace + + @property + def manager(self) -> WebSocketManager: + """Return the bound WebSocket manager. + + Raises: + RuntimeError: If the handler has not been registered yet. + """ + + if not self._manager: + raise RuntimeError("WebSocketHandler is not registered with a manager") + return self._manager + + @property + def identifier(self) -> str: + """Return a stable identifier used in aggregated responses.""" + + return f"{self.__class__.__module__}.{self.__class__.__name__}" + + async def emit_to( + self, + sid: str, + event_type: str, + data: dict[str, Any], + *, + correlation_id: str | None = None, + ) -> None: + """Emit an event to a specific connection or buffer it if offline.""" + await self.manager.emit_to( + self.namespace, + sid, + event_type, + data, + handler_id=self.identifier, + correlation_id=correlation_id, + ) + + async def broadcast( + self, + event_type: str, + data: dict[str, Any], + *, + exclude_sids: str | Iterable[str] | None = None, + correlation_id: str | None = None, + ) -> None: + """Broadcast an event to all connections, optionally excluding one.""" + await self.manager.broadcast( + self.namespace, + event_type, + data, + exclude_sids=exclude_sids, + handler_id=self.identifier, + correlation_id=correlation_id, + ) + + # ------------------------------------------------------------------ + # Convenience wrappers for standardized result helpers + # ------------------------------------------------------------------ + + @staticmethod + def result_ok( + data: dict[str, Any] | None = None, + *, + correlation_id: str | None = None, + duration_ms: float | None = None, + ) -> WebSocketResult: + """Return a standardized success result.""" + + return WebSocketResult.ok( + data=data, + correlation_id=correlation_id, + duration_ms=duration_ms, + ) + + @staticmethod + def result_error( + *, + code: str, + message: str, + details: Any | None = None, + correlation_id: str | None = None, + duration_ms: float | None = None, + ) -> WebSocketResult: + """Return a standardized error result.""" + + return WebSocketResult.error( + code=code, + message=message, + details=details, + correlation_id=correlation_id, + duration_ms=duration_ms, + ) + + async def request( + self, + sid: str, + event_type: str, + data: dict[str, Any], + *, + timeout_ms: int = 0, + include_handlers: Iterable[str] | None = None, + ) -> dict[str, Any]: + """Send a request-response event to a specific connection and aggregate results. + + Returns a payload shaped as ``{"correlationId": str, "results": RequestResultItem[]}``. + """ + + return await self.manager.request_for_sid( + namespace=self.namespace, + sid=sid, + event_type=event_type, + data=data, + timeout_ms=timeout_ms, + handler_id=self.identifier, + include_handlers=set(include_handlers) if include_handlers else None, + ) + + async def request_all( + self, + event_type: str, + data: dict[str, Any], + *, + timeout_ms: int = 0, + exclude_handlers: Iterable[str] | None = None, + ) -> list[dict[str, Any]]: + """Fan a request out to every active connection and aggregate responses. + + Each entry in the returned list is ``{"sid": str, "correlationId": str, "results": RequestResultItem[]}``. + """ + + return await self.manager.route_event_all( + self.namespace, + event_type=event_type, + data=data, + timeout_ms=timeout_ms, + exclude_handlers=set(exclude_handlers) if exclude_handlers else None, + handler_id=self.identifier, + ) diff --git a/python/helpers/websocket_manager.py b/python/helpers/websocket_manager.py new file mode 100644 index 0000000000..6474ede335 --- /dev/null +++ b/python/helpers/websocket_manager.py @@ -0,0 +1,1147 @@ +from __future__ import annotations + +import asyncio +import time +import threading +from collections import defaultdict, deque +from dataclasses import dataclass, field +from datetime import datetime, timedelta, timezone +from typing import Any, Callable, Deque, Dict, Iterable, List, Optional, Set + +import socketio +import uuid + +from python.helpers.defer import DeferredTask +from python.helpers.print_style import PrintStyle +from python.helpers import runtime +from python.helpers.websocket import ConnectionNotFoundError, WebSocketHandler, WebSocketResult + +BUFFER_MAX_SIZE = 100 +BUFFER_TTL = timedelta(hours=1) + + +def _utcnow() -> datetime: + return datetime.now(timezone.utc) + + +@dataclass +class BufferedEvent: + event_type: str + data: dict[str, Any] + handler_id: str | None = None + correlation_id: str | None = None + timestamp: datetime = field(default_factory=_utcnow) + + +@dataclass +class ConnectionInfo: + namespace: str + sid: str + connected_at: datetime = field(default_factory=_utcnow) + last_activity: datetime = field(default_factory=_utcnow) + + +ConnectionIdentity = tuple[str, str] # (namespace, sid) + + +@dataclass +class _HandlerExecution: + handler: WebSocketHandler + value: Any + duration_ms: float | None + + +DIAGNOSTIC_EVENT = "ws_dev_console_event" +LIFECYCLE_CONNECT_EVENT = "ws_lifecycle_connect" +LIFECYCLE_DISCONNECT_EVENT = "ws_lifecycle_disconnect" + + +class WebSocketManager: + def __init__(self, socketio: socketio.AsyncServer, lock) -> None: + self.socketio = socketio + self.lock = lock + self.handlers: defaultdict[str, defaultdict[str, List[WebSocketHandler]]] = defaultdict( + lambda: defaultdict(list) + ) + self.connections: Dict[ConnectionIdentity, ConnectionInfo] = {} + self.buffers: defaultdict[ConnectionIdentity, Deque[BufferedEvent]] = defaultdict(deque) + self._known_sids: Set[ConnectionIdentity] = set() + self._identifier: str = f"{self.__class__.__module__}.{self.__class__.__name__}" + # Session tracking (single-user default) + self.user_to_sids: defaultdict[str, Set[ConnectionIdentity]] = defaultdict(set) + self.sid_to_user: Dict[ConnectionIdentity, str | None] = {} + self._ALL_USERS_BUCKET = "allUsers" + self._server_restart_enabled: bool = False + self._diagnostic_watchers: Set[ConnectionIdentity] = set() + self._diagnostics_enabled: bool = runtime.is_development() + self._dispatcher_loop: asyncio.AbstractEventLoop | None = None + self._handler_worker: DeferredTask | None = None + + # Internal: development-only debug logging to avoid noise in production + def _debug(self, message: str) -> None: + if runtime.is_development(): + PrintStyle.debug(message) + + def _ensure_dispatcher_loop(self) -> None: + if self._dispatcher_loop is None: + try: + self._dispatcher_loop = asyncio.get_running_loop() + except RuntimeError: + return + + def _get_handler_worker(self) -> DeferredTask: + if self._handler_worker is None: + self._handler_worker = DeferredTask(thread_name="WebSocketHandlers") + return self._handler_worker + + async def _run_on_dispatcher_loop(self, coro: Any) -> Any: + self._ensure_dispatcher_loop() + dispatcher_loop = self._dispatcher_loop + if dispatcher_loop is None: + return await coro + if dispatcher_loop.is_closed(): + try: + coro.close() + except Exception: # pragma: no cover - best-effort cleanup + pass + raise RuntimeError("Dispatcher event loop is closed") + + try: + running_loop = asyncio.get_running_loop() + except RuntimeError: + running_loop = None + + if running_loop is dispatcher_loop: + return await coro + + future = asyncio.run_coroutine_threadsafe(coro, dispatcher_loop) + return await asyncio.wrap_future(future) + + def _diagnostics_active(self) -> bool: + if not self._diagnostics_enabled: + return False + with self.lock: + return bool(self._diagnostic_watchers) + + def _copy_diagnostic_watchers(self) -> list[ConnectionIdentity]: + with self.lock: + return list(self._diagnostic_watchers) + + def register_diagnostic_watcher(self, namespace: str, sid: str) -> bool: + if not self._diagnostics_enabled: + return False + identity: ConnectionIdentity = (namespace, sid) + with self.lock: + if identity not in self.connections: + return False + self._diagnostic_watchers.add(identity) + return True + + def unregister_diagnostic_watcher(self, namespace: str, sid: str) -> None: + identity: ConnectionIdentity = (namespace, sid) + with self.lock: + self._diagnostic_watchers.discard(identity) + + def _timestamp(self) -> str: + return _utcnow().isoformat(timespec="milliseconds").replace("+00:00", "Z") + + def _summarize_payload(self, payload: dict[str, Any] | None) -> dict[str, Any]: + if not isinstance(payload, dict): + return {} + summary: dict[str, Any] = {} + for key in list(payload.keys())[:5]: + value = payload[key] + if isinstance(value, (str, int, float, bool)) or value is None: + preview = value + elif isinstance(value, dict): + preview = f"dict({len(value)})" + elif isinstance(value, list): + preview = f"list({len(value)})" + else: + preview = value.__class__.__name__ + summary[key] = preview + summary["__sizeBytes__"] = len(str(payload).encode("utf-8")) + return summary + + def _summarize_results(self, results: List[dict[str, Any]]) -> dict[str, Any]: + summary = {"ok": 0, "error": 0, "handlers": []} + for result in results: + handler_id = result.get("handlerId") + ok = bool(result.get("ok")) + if ok: + summary["ok"] += 1 + else: + summary["error"] += 1 + summary["handlers"].append( + { + "handlerId": handler_id, + "ok": ok, + "errorCode": (result.get("error") or {}).get("code"), + "durationMs": result.get("durationMs"), + } + ) + summary["handlerCount"] = len(summary["handlers"]) + return summary + + async def _publish_diagnostic_event( + self, payload: dict[str, Any] | Callable[[], dict[str, Any]] + ) -> None: + if not self._diagnostics_enabled: + return + watchers = self._copy_diagnostic_watchers() + if not watchers: + return + effective_payload = payload() if callable(payload) else payload + if ( + isinstance(effective_payload, dict) + and "sourceNamespace" not in effective_payload + ): + origin = effective_payload.get("namespace") + if isinstance(origin, str) and origin.strip(): + effective_payload = { + **effective_payload, + "sourceNamespace": origin.strip(), + } + + async def _emit_to_watcher(identity: ConnectionIdentity) -> None: + namespace, sid = identity + try: + await self.emit_to( + namespace, + sid, + DIAGNOSTIC_EVENT, + effective_payload, + handler_id=self._identifier, + diagnostic=True, + ) + except ConnectionNotFoundError: + self.unregister_diagnostic_watcher(namespace, sid) + + await asyncio.gather(*(_emit_to_watcher(identity) for identity in watchers)) + + def _schedule_lifecycle_broadcast( + self, namespace: str, event_type: str, payload: dict[str, Any] + ) -> None: + async def _broadcast() -> None: + try: + await self.broadcast( + namespace, + event_type, + payload, + diagnostic=True, + ) + except Exception as exc: # pragma: no cover - diagnostic + self._debug(f"Failed to broadcast lifecycle event {event_type}: {exc}") + + asyncio.create_task(_broadcast()) + + def _normalize_handler_filter( + self, value: Any, field_name: str + ) -> Set[str] | None: + if value is None: + return None + if isinstance(value, str): + return {value} + try: + iterator = iter(value) + except TypeError as exc: # pragma: no cover - defensive + raise ValueError(f"{field_name} must be an array of handler identifiers") from exc + + normalized: Set[str] = set() + for item in iterator: + if not isinstance(item, str): + raise ValueError( + f"{field_name} values must be handler identifier strings" + ) + normalized.add(item) + return normalized + + def _normalize_sid_filter( + self, value: str | Iterable[str] | None + ) -> Set[str]: + if value is None: + return set() + if isinstance(value, str): + return {value} + normalized: Set[str] = set() + for item in value: + normalized.add(str(item)) + return normalized + + def _select_handlers( + self, + namespace: str, + event_type: str, + *, + include: Set[str] | None, + exclude: Set[str] | None, + ) -> tuple[list[WebSocketHandler], Set[str]]: + registered = self.handlers.get(namespace, {}).get(event_type, []) + available_ids = {handler.identifier for handler in registered} + + if include is not None: + unknown = include - available_ids + if unknown: + raise ValueError( + f"Unknown handler(s) in includeHandlers for namespace '{namespace}': " + f"{', '.join(sorted(unknown))}" + ) + if exclude is not None: + unknown = exclude - available_ids + if unknown: + raise ValueError( + f"Unknown handler(s) in excludeHandlers for namespace '{namespace}': " + f"{', '.join(sorted(unknown))}" + ) + + selected: list[WebSocketHandler] = [] + for handler in registered: + ident = handler.identifier + if include is not None and ident not in include: + continue + if exclude is not None and ident in exclude: + continue + selected.append(handler) + + return selected, available_ids + + def _resolve_correlation_id(self, payload: dict[str, Any]) -> str: + value = payload.get("correlationId") + if isinstance(value, str) and value.strip(): + correlation_id = value.strip() + else: + correlation_id = uuid.uuid4().hex + payload["correlationId"] = correlation_id + return correlation_id + + def register_handlers( + self, handlers_by_namespace: dict[str, Iterable[WebSocketHandler]] + ) -> None: + for namespace, handlers in handlers_by_namespace.items(): + for handler in handlers: + handler.bind_manager(self, namespace=namespace) + declared = handler.get_event_types() + try: + validated_events = handler.validate_event_types(declared) + except Exception as exc: + PrintStyle.error( + f"Failed to register handler {handler.identifier}: {exc}" + ) + raise + + PrintStyle.info( + "Registered WebSocket handler %s namespace=%s for events: %s" + % (handler.identifier, namespace, ", ".join(validated_events)) + ) + for event_type in validated_events: + existing = self.handlers[namespace].get(event_type) + if existing: + PrintStyle.warning( + f"Duplicate handler registration for namespace '{namespace}' event '{event_type}'" + ) + self.handlers[namespace][event_type].append(handler) + self._debug( + f"Registered handler {handler.identifier} namespace={namespace} event='{event_type}'" + ) + + def iter_event_types(self, namespace: str) -> Iterable[str]: + return list(self.handlers.get(namespace, {}).keys()) + + def iter_namespaces(self) -> list[str]: + return list(self.handlers.keys()) + + async def _invoke_handler( + self, + handler: WebSocketHandler, + event_type: str, + payload: dict[str, Any], + sid: str, + ) -> _HandlerExecution: + instrument = self._diagnostics_active() + start = time.perf_counter() if instrument else None + try: + value = await self._get_handler_worker().execute_inside( + handler.process_event, event_type, payload, sid + ) + except Exception as exc: # pragma: no cover - handled by caller + duration_ms = ( + (time.perf_counter() - start) * 1000 if start is not None else None + ) + return _HandlerExecution(handler, exc, duration_ms) + duration_ms = ( + (time.perf_counter() - start) * 1000 if start is not None else None + ) + return _HandlerExecution(handler, value, duration_ms) + + async def handle_connect( + self, namespace: str, sid: str, user_id: str | None = None + ) -> None: + self._ensure_dispatcher_loop() + user_bucket = user_id or "single_user" + identity: ConnectionIdentity = (namespace, sid) + with self.lock: + self.connections[identity] = ConnectionInfo(namespace=namespace, sid=sid) + self._known_sids.add(identity) + self.sid_to_user[identity] = user_bucket + self.user_to_sids[self._ALL_USERS_BUCKET].add(identity) + self.user_to_sids[user_bucket].add(identity) + connection_count = sum( + 1 for conn_identity in self.connections if conn_identity[0] == namespace + ) + PrintStyle.info(f"WebSocket connected: namespace={namespace} sid={sid}") + await self._run_lifecycle(namespace, lambda h: h.on_connect(sid)) + await self._flush_buffer(identity) + if self._server_restart_enabled: + await self.emit_to( + namespace, + sid, + "server_restart", + { + "emittedAt": _utcnow() + .isoformat(timespec="milliseconds") + .replace("+00:00", "Z"), + "runtimeId": runtime.get_runtime_id(), + }, + handler_id=self._identifier, + ) + PrintStyle.info( + f"server_restart broadcast emitted to namespace={namespace} sid={sid}" + ) + lifecycle_payload = { + "namespace": namespace, + "sid": sid, + "connectionCount": connection_count, + "timestamp": self._timestamp(), + } + await self._publish_diagnostic_event( + { + "kind": "lifecycle", + "event": "connect", + **lifecycle_payload, + } + ) + self._schedule_lifecycle_broadcast( + namespace, LIFECYCLE_CONNECT_EVENT, lifecycle_payload + ) + + async def handle_disconnect(self, namespace: str, sid: str) -> None: + self._ensure_dispatcher_loop() + identity: ConnectionIdentity = (namespace, sid) + with self.lock: + self.connections.pop(identity, None) + # session tracking cleanup + user_bucket = self.sid_to_user.pop(identity, None) + if self._ALL_USERS_BUCKET in self.user_to_sids: + self.user_to_sids[self._ALL_USERS_BUCKET].discard(identity) + if not self.user_to_sids[self._ALL_USERS_BUCKET]: + self.user_to_sids.pop(self._ALL_USERS_BUCKET, None) + if user_bucket and user_bucket in self.user_to_sids: + self.user_to_sids[user_bucket].discard(identity) + if not self.user_to_sids[user_bucket]: + self.user_to_sids.pop(user_bucket, None) + connection_count = sum( + 1 for conn_identity in self.connections if conn_identity[0] == namespace + ) + self.unregister_diagnostic_watcher(namespace, sid) + PrintStyle.info(f"WebSocket disconnected: namespace={namespace} sid={sid}") + await self._run_lifecycle(namespace, lambda h: h.on_disconnect(sid)) + lifecycle_payload = { + "namespace": namespace, + "sid": sid, + "connectionCount": connection_count, + "timestamp": self._timestamp(), + } + await self._publish_diagnostic_event( + { + "kind": "lifecycle", + "event": "disconnect", + **lifecycle_payload, + } + ) + self._schedule_lifecycle_broadcast( + namespace, LIFECYCLE_DISCONNECT_EVENT, lifecycle_payload + ) + + async def route_event( + self, + namespace: str, + event_type: str, + data: dict[str, Any], + sid: str, + ack: Optional[Callable[[Any], None]] = None, + *, + include_handlers: Set[str] | None = None, + exclude_handlers: Set[str] | None = None, + allow_exclude: bool = False, + handler_id: str | None = None, + ) -> dict[str, Any]: + self._ensure_dispatcher_loop() + incoming = dict(data or {}) + correlation_id = self._resolve_correlation_id(incoming) + self._debug( + f"Routing event namespace={namespace} '{event_type}' sid={sid} correlation={correlation_id}" + ) + + include_meta_raw = incoming.pop("includeHandlers", None) + exclude_meta_raw = incoming.pop("excludeHandlers", None) + + if "data" in incoming and isinstance(incoming.get("data"), dict): + handler_payload = dict(incoming.get("data") or {}) + if "excludeSids" in incoming: + handler_payload["excludeSids"] = incoming.get("excludeSids") + else: + handler_payload = dict(incoming) + + handler_payload["correlationId"] = correlation_id + + try: + include_meta = self._normalize_handler_filter( + include_meta_raw, "includeHandlers" + ) + except ValueError as exc: + error = self._build_error_result( + handler_id=handler_id or self._identifier, + code="INVALID_FILTER", + message=str(exc), + correlation_id=correlation_id, + ) + if ack: + ack({"correlationId": correlation_id, "results": [error]}) + return {"correlationId": correlation_id, "results": [error]} + + try: + exclude_meta = self._normalize_handler_filter( + exclude_meta_raw, "excludeHandlers" + ) + except ValueError as exc: + error = self._build_error_result( + handler_id=handler_id or self._identifier, + code="INVALID_FILTER", + message=str(exc), + correlation_id=correlation_id, + ) + payload_error = {"correlationId": correlation_id, "results": [error]} + if ack: + ack(payload_error) + return payload_error + + if exclude_meta_raw is not None and not allow_exclude: + error = self._build_error_result( + handler_id=handler_id or self._identifier, + code="INVALID_FILTER", + message="excludeHandlers is not supported for this operation", + correlation_id=correlation_id, + ) + if ack: + ack({"correlationId": correlation_id, "results": [error]}) + return {"correlationId": correlation_id, "results": [error]} + + if include_handlers is not None and include_meta is not None: + if include_handlers != include_meta: + error = self._build_error_result( + handler_id=handler_id or self._identifier, + code="INVALID_FILTER", + message="Conflicting includeHandlers filters supplied", + correlation_id=correlation_id, + ) + if ack: + ack({"correlationId": correlation_id, "results": [error]}) + return {"correlationId": correlation_id, "results": [error]} + + if allow_exclude and exclude_handlers is not None and exclude_meta is not None: + if exclude_handlers != exclude_meta: + error = self._build_error_result( + handler_id=handler_id or self._identifier, + code="INVALID_FILTER", + message="Conflicting excludeHandlers filters supplied", + correlation_id=correlation_id, + ) + if ack: + ack({"correlationId": correlation_id, "results": [error]}) + return {"correlationId": correlation_id, "results": [error]} + + include = include_handlers or include_meta + exclude = exclude_handlers or (exclude_meta if allow_exclude else None) + + registered = self.handlers.get(namespace, {}).get(event_type, []) + if not registered: + PrintStyle.warning(f"No handlers registered for event '{event_type}'") + error = self._build_error_result( + handler_id=handler_id or self._identifier, + code="NO_HANDLERS", + message=f"No handler for namespace '{namespace}' event '{event_type}'", + correlation_id=correlation_id, + ) + if ack: + ack({"correlationId": correlation_id, "results": [error]}) + return {"correlationId": correlation_id, "results": [error]} + + try: + selected_handlers, _ = self._select_handlers( + namespace, event_type, include=include, exclude=exclude + ) + except ValueError as exc: + error = self._build_error_result( + handler_id=handler_id or self._identifier, + code="INVALID_FILTER", + message=str(exc), + correlation_id=correlation_id, + ) + if ack: + ack({"correlationId": correlation_id, "results": [error]}) + return {"correlationId": correlation_id, "results": [error]} + + if not selected_handlers: + error = self._build_error_result( + handler_id=handler_id or self._identifier, + code="NO_HANDLERS", + message=f"No handler for '{event_type}' after applying filters", + correlation_id=correlation_id, + ) + if ack: + ack({"correlationId": correlation_id, "results": [error]}) + return {"correlationId": correlation_id, "results": [error]} + + with self.lock: + info = self.connections.get((namespace, sid)) + if info: + info.last_activity = _utcnow() + + executions = await asyncio.gather( + *[ + self._invoke_handler(handler, event_type, dict(handler_payload), sid) + for handler in selected_handlers + ] + ) + + results: List[dict[str, Any]] = [] + for execution in executions: + handler = execution.handler + value = execution.value + duration_ms = execution.duration_ms + + if isinstance(value, Exception): # pragma: no cover - defensive logging + PrintStyle.error( + f"Error in handler {handler.identifier} for '{event_type}' (correlation {correlation_id}): {value}" + ) + results.append( + self._build_error_result( + handler_id=handler.identifier, + code="HANDLER_ERROR", + message="Internal server error", + details=str(value), + correlation_id=correlation_id, + duration_ms=duration_ms, + ) + ) + continue + + if isinstance(value, WebSocketResult): + results.append( + value.as_result( + handler_id=handler.identifier, + fallback_correlation_id=correlation_id, + duration_ms=duration_ms, + ) + ) + continue + + if value is None: + helper_result = WebSocketResult(ok=True) + elif isinstance(value, dict): + helper_result = WebSocketResult(ok=True, data=value) + else: + helper_result = WebSocketResult(ok=True, data={"result": value}) + + results.append( + helper_result.as_result( + handler_id=handler.identifier, + fallback_correlation_id=correlation_id, + duration_ms=duration_ms, + ) + ) + + await self._publish_diagnostic_event( + lambda: { + "kind": "inbound", + "sourceNamespace": namespace, + "namespace": namespace, + "eventType": event_type, + "sid": sid, + "correlationId": correlation_id, + "timestamp": self._timestamp(), + "handlerCount": len(selected_handlers), + "durationMs": sum((exec.duration_ms or 0.0) for exec in executions), + "resultSummary": self._summarize_results(results), + "payloadSummary": self._summarize_payload(handler_payload), + } + ) + + response_payload = {"correlationId": correlation_id, "results": results} + if ack: + ack(response_payload) + self._debug( + f"Completed event namespace={namespace} '{event_type}' sid={sid} correlation={correlation_id}" + ) + return response_payload + + async def request_for_sid( + self, + *, + namespace: str, + sid: str, + event_type: str, + data: dict[str, Any], + timeout_ms: int = 0, + handler_id: str | None = None, + include_handlers: Set[str] | None = None, + ) -> dict[str, Any]: + payload = dict(data or {}) + correlation_id = self._resolve_correlation_id(payload) + + with self.lock: + connected = (namespace, sid) in self.connections + if not connected: + return { + "correlationId": correlation_id, + "results": [ + self._build_error_result( + handler_id=handler_id or self._identifier, + code="CONNECTION_NOT_FOUND", + message=f"Connection '{sid}' not found in namespace '{namespace}'", + correlation_id=correlation_id, + ) + ], + } + + async def _invoke() -> dict[str, Any]: + return await self.route_event( + namespace, + event_type, + payload, + sid, + include_handlers=include_handlers, + handler_id=handler_id, + ) + + if timeout_ms and timeout_ms > 0: + try: + return await asyncio.wait_for(_invoke(), timeout=timeout_ms / 1000) + except asyncio.TimeoutError: + PrintStyle.warning( + f"request timeout for sid {sid} event '{event_type}'" + ) + return { + "correlationId": correlation_id, + "results": [ + self._build_error_result( + handler_id=handler_id or self._identifier, + code="TIMEOUT", + message="Request timeout", + correlation_id=correlation_id, + ) + ], + } + return await _invoke() + + async def route_event_all( + self, + namespace: str, + event_type: str, + data: dict[str, Any], + *, + timeout_ms: int = 0, + exclude_handlers: Set[str] | None = None, + handler_id: str | None = None, + ) -> list[dict[str, Any]]: + """Fan-out a request to all active connections and aggregate responses.""" + + base_payload = dict(data or {}) + exclude_meta_raw = base_payload.pop("excludeHandlers", None) + exclude_combined: Set[str] | None = exclude_handlers + correlation_id = self._resolve_correlation_id(base_payload) + + if exclude_meta_raw is not None: + try: + exclude_meta = self._normalize_handler_filter( + exclude_meta_raw, "excludeHandlers" + ) + except ValueError as exc: + error = self._build_error_result( + handler_id=handler_id or self._identifier, + code="INVALID_FILTER", + message=str(exc), + correlation_id=correlation_id, + ) + return [ + { + "sid": "__invalid__", + "correlationId": correlation_id, + "results": [error], + } + ] + + if exclude_combined is None: + exclude_combined = exclude_meta + elif exclude_meta is not None and exclude_combined != exclude_meta: + error = self._build_error_result( + handler_id=handler_id or self._identifier, + code="INVALID_FILTER", + message="Conflicting excludeHandlers filters supplied", + correlation_id=correlation_id, + ) + return [ + { + "sid": "__invalid__", + "correlationId": correlation_id, + "results": [error], + } + ] + + self._debug( + f"Starting requestAll namespace={namespace} for '{event_type}' correlation={correlation_id}" + ) + + with self.lock: + active_sids = [ + conn_identity[1] + for conn_identity in self.connections.keys() + if conn_identity[0] == namespace + ] + if not active_sids: + self._debug( + f"No active connections for requestAll namespace={namespace} '{event_type}' correlation={correlation_id}" + ) + return [] + + timeout_seconds = timeout_ms / 1000 if timeout_ms and timeout_ms > 0 else None + + async def _invoke_for_sid(target_sid: str) -> dict[str, Any]: + async def _dispatch() -> dict[str, Any]: + return await self.route_event( + namespace, + event_type, + base_payload, + target_sid, + allow_exclude=True, + exclude_handlers=exclude_combined, + handler_id=handler_id, + ) + + if timeout_seconds is None: + return await _dispatch() + + try: + task = asyncio.create_task(_dispatch()) + return await asyncio.wait_for(asyncio.shield(task), timeout=timeout_seconds) + except asyncio.TimeoutError: + PrintStyle.warning( + f"requestAll timeout for sid {target_sid} correlation={correlation_id}" + ) + # Ensure any late exceptions are observed so asyncio does not log + # "Task exception was never retrieved". + try: + task.add_done_callback(lambda t: t.exception()) # type: ignore[arg-type] + except Exception: # pragma: no cover - defensive + pass + return { + "correlationId": correlation_id, + "results": [ + self._build_error_result( + handler_id=handler_id or self._identifier, + code="TIMEOUT", + message="Request timeout", + correlation_id=correlation_id, + ) + ], + } + + tasks = { + sid: asyncio.create_task(_invoke_for_sid(sid)) for sid in active_sids + } + + aggregated: list[dict[str, Any]] = [] + for sid, task in tasks.items(): + result = await task + if isinstance(result, dict): + aggregated.append( + { + "sid": sid, + "correlationId": result.get("correlationId", correlation_id), + "results": result.get("results", []), + } + ) + else: + aggregated.append( + { + "sid": sid, + "correlationId": correlation_id, + "results": result, + } + ) + + self._debug( + f"Completed requestAll namespace={namespace} for '{event_type}' correlation={correlation_id}" + ) + return aggregated + + def _wrap_envelope( + self, + handler_id: str | None, + data: dict[str, Any], + *, + correlation_id: str | None = None, + ) -> dict[str, Any]: + hid = handler_id or self._identifier + ts = _utcnow().isoformat(timespec="milliseconds").replace("+00:00", "Z") + event_id = str(uuid.uuid4()) + correlation = correlation_id or str(uuid.uuid4()) + return { + "handlerId": hid, + "eventId": event_id, + "correlationId": correlation, + "ts": ts, + "data": data or {}, + } + + async def emit_to( + self, + namespace: str, + sid: str, + event_type: str, + data: dict[str, Any], + *, + handler_id: str | None = None, + correlation_id: str | None = None, + diagnostic: bool = False, + ) -> None: + envelope = self._wrap_envelope( + handler_id, + data, + correlation_id=correlation_id, + ) + delivered = False + buffered = False + identity: ConnectionIdentity = (namespace, sid) + + with self.lock: + connected = identity in self.connections + known = identity in self._known_sids or identity in self.buffers + + if connected: + self._debug( + "Emit to namespace=%s sid=%s event=%s eventId=%s correlationId=%s handlerId=%s" + % ( + namespace, + sid, + event_type, + envelope.get("eventId"), + envelope.get("correlationId"), + envelope.get("handlerId"), + ) + ) + await self._run_on_dispatcher_loop( + self.socketio.emit(event_type, envelope, to=sid, namespace=namespace) + ) + delivered = True + else: + if not known: + raise ConnectionNotFoundError(sid, namespace=namespace) + with self.lock: + self._buffer_event( + identity, + event_type, + data, + handler_id, + envelope["correlationId"], + ) + buffered = True + + if not diagnostic: + await self._publish_diagnostic_event( + lambda: { + "kind": "outbound", + "direction": "emit_to", + "eventType": event_type, + "namespace": namespace, + "sid": sid, + "correlationId": envelope["correlationId"], + "handlerId": envelope["handlerId"], + "timestamp": self._timestamp(), + "delivered": delivered, + "buffered": buffered, + "payloadSummary": self._summarize_payload(data), + } + ) + + async def broadcast( + self, + namespace: str, + event_type: str, + data: dict[str, Any], + *, + exclude_sids: str | Iterable[str] | None = None, + handler_id: str | None = None, + correlation_id: str | None = None, + diagnostic: bool = False, + ) -> None: + excluded = self._normalize_sid_filter(exclude_sids) + + targets: list[str] = [] + with self.lock: + current_identities = list(self.connections.keys()) + for conn_identity in current_identities: + if conn_identity[0] != namespace: + continue + sid = conn_identity[1] + if sid in excluded: + continue + targets.append(sid) + await self.emit_to( + namespace, + sid, + event_type, + data, + handler_id=handler_id, + correlation_id=correlation_id, + diagnostic=diagnostic, + ) + + if not diagnostic: + await self._publish_diagnostic_event( + lambda: { + "kind": "outbound", + "direction": "broadcast", + "eventType": event_type, + "namespace": namespace, + "targets": targets[:10], + "targetCount": len(targets), + "correlationId": correlation_id, + "handlerId": handler_id or self._identifier, + "timestamp": self._timestamp(), + "payloadSummary": self._summarize_payload(data), + } + ) + + async def _run_lifecycle(self, namespace: str, fn: Callable[[WebSocketHandler], Any]) -> None: + seen: Set[WebSocketHandler] = set() + coros: list[Any] = [] + for handler_list in self.handlers.get(namespace, {}).values(): + for handler in handler_list: + if handler in seen: + continue + seen.add(handler) + coros.append(self._get_handler_worker().execute_inside(fn, handler)) + if coros: + await asyncio.gather(*coros, return_exceptions=True) + + def _buffer_event( + self, + identity: ConnectionIdentity, + event_type: str, + data: dict[str, Any], + handler_id: str | None, + correlation_id: str | None, + ) -> None: + namespace, sid = identity + buffer = self.buffers[identity] + buffer.append( + BufferedEvent( + event_type=event_type, + data=data, + handler_id=handler_id, + correlation_id=correlation_id, + ) + ) + while len(buffer) > BUFFER_MAX_SIZE: + dropped = buffer.popleft() + PrintStyle.warning( + f"Dropping buffered event '{dropped.event_type}' for namespace={namespace} sid={sid} (overflow)" + ) + self._debug( + f"Buffered event namespace={namespace} '{event_type}' sid={sid} (queue length={len(buffer)})" + ) + + async def _flush_buffer(self, identity: ConnectionIdentity) -> None: + self._ensure_dispatcher_loop() + buffer = self.buffers.get(identity) + if not buffer: + return + namespace, sid = identity + now = _utcnow() + delivered = 0 + while buffer: + event = buffer.popleft() + if now - event.timestamp > BUFFER_TTL: + self._debug( + f"Discarding expired buffered event '{event.event_type}' for sid {sid}" + ) + continue + envelope = self._wrap_envelope( + event.handler_id, + event.data, + correlation_id=event.correlation_id, + ) + self._debug( + "Flush to sid=%s event=%s eventId=%s correlationId=%s handlerId=%s" + % ( + sid, + event.event_type, + envelope.get("eventId"), + envelope.get("correlationId"), + envelope.get("handlerId"), + ) + ) + await self._run_on_dispatcher_loop( + self.socketio.emit( + event.event_type, envelope, to=sid, namespace=namespace + ) + ) + delivered += 1 + if identity in self.buffers: + self.buffers.pop(identity, None) + if delivered: + PrintStyle.info( + f"Flushed {delivered} buffered event(s) to namespace={namespace} sid={sid}" + ) + + def _build_error_result( + self, + *, + handler_id: str | None = None, + code: str, + message: str, + details: str | None = None, + correlation_id: str | None = None, + duration_ms: float | None = None, + ) -> dict[str, Any]: + error_payload = {"code": code, "error": message} + if details: + error_payload["details"] = details + result: dict[str, Any] = { + "handlerId": handler_id or self._identifier, + "ok": False, + "error": error_payload, + } + if correlation_id is not None: + result["correlationId"] = correlation_id + if duration_ms is not None: + result["durationMs"] = round(duration_ms, 4) + return result + + # Session tracking helpers (single-user defaults) + def get_sids_for_user(self, user: str | None = None) -> list[str]: + """Return SIDs for a user; single-user default returns all active SIDs.""" + with self.lock: + bucket = self._ALL_USERS_BUCKET if user is None else user + return list(self.user_to_sids.get(bucket, set())) + + def get_user_for_sid(self, sid: str) -> str | None: + """Return user identifier for a SID or None.""" + with self.lock: + return self.sid_to_user.get(sid) + + def set_server_restart_broadcast(self, enabled: bool) -> None: + """Enable or disable automatic server restart broadcasts.""" + + self._server_restart_enabled = bool(enabled) diff --git a/python/helpers/websocket_namespace_discovery.py b/python/helpers/websocket_namespace_discovery.py new file mode 100644 index 0000000000..134317bd8c --- /dev/null +++ b/python/helpers/websocket_namespace_discovery.py @@ -0,0 +1,186 @@ +from __future__ import annotations + +import importlib.util +import inspect +import os +from dataclasses import dataclass +from types import ModuleType +from typing import Iterable + +from python.helpers.files import get_abs_path +from python.helpers.print_style import PrintStyle +from python.helpers.websocket import WebSocketHandler + + +@dataclass(frozen=True) +class NamespaceDiscovery: + namespace: str + handler_classes: tuple[type[WebSocketHandler], ...] + source_files: tuple[str, ...] + + +def _to_namespace(entry_name: str) -> str: + if entry_name == "_default": + return "/" + stripped = entry_name[: -len("_handler")] if entry_name.endswith("_handler") else entry_name + if not stripped: + raise ValueError(f"Invalid handler entry name: {entry_name!r}") + return f"/{stripped}" + + +def _unique_module_name(file_path: str) -> str: + # Use a stable, unique module name derived from the relative path to avoid + # collisions when importing different files with the same basename. + rel_path = os.path.relpath(file_path, get_abs_path(".")) + rel_no_ext = os.path.splitext(rel_path)[0] + safe = "".join(ch if ch.isalnum() else "_" for ch in rel_no_ext) + return f"a0_ws_ns_{safe}" + + +def _import_module(file_path: str) -> ModuleType: + abs_path = get_abs_path(file_path) + module_name = _unique_module_name(abs_path) + spec = importlib.util.spec_from_file_location(module_name, abs_path) + if spec is None or spec.loader is None: + raise ImportError(f"Could not load module from {abs_path}") + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def _get_handler_classes(module: ModuleType) -> list[type[WebSocketHandler]]: + discovered: list[type[WebSocketHandler]] = [] + for _name, cls in inspect.getmembers(module, inspect.isclass): + if cls is WebSocketHandler: + continue + if not issubclass(cls, WebSocketHandler): + continue + if cls.__module__ != module.__name__: + continue + discovered.append(cls) + return discovered + + +def discover_websocket_namespaces( + *, + handlers_folder: str = "python/websocket_handlers", + include_root_default: bool = True, +) -> list[NamespaceDiscovery]: + """ + Discover websocket namespaces from first-level filesystem entries. + + Supported entries: + - File entry: `*_handler.py` defines an application namespace. + - Folder entry: `/` or `_handler/` defines an application namespace and loads + `*.py` files one level deep (ignores `__init__.py` and ignores deeper nesting). + - Reserved root mapping: `_default.py` maps to `/` when `include_root_default=True`. + """ + + abs_folder = get_abs_path(handlers_folder) + entries: list[NamespaceDiscovery] = [] + + try: + filenames = sorted(os.listdir(abs_folder)) + except FileNotFoundError: + PrintStyle.warning(f"WebSocket handlers folder not found: {abs_folder}") + return [] + + for entry in filenames: + entry_path = os.path.join(abs_folder, entry) + + # Folder entries define namespaces and can host multiple handler modules. + if os.path.isdir(entry_path): + if entry.startswith("__"): + continue + namespace = _to_namespace(entry) + + handler_classes: list[type[WebSocketHandler]] = [] + source_files: list[str] = [] + + try: + child_names = sorted(os.listdir(entry_path)) + except FileNotFoundError: + continue + + for child in child_names: + if not child.endswith(".py"): + continue + if child == "__init__.py": + continue + child_path = os.path.join(entry_path, child) + if not os.path.isfile(child_path): + # Ignore deeper nesting. + continue + + module = _import_module(child_path) + discovered = _get_handler_classes(module) + if not discovered: + raise RuntimeError( + f"WebSocket handler module {child_path} defines no WebSocketHandler subclasses" + ) + if len(discovered) > 1: + raise RuntimeError( + f"WebSocket handler module {child_path} defines multiple WebSocketHandler subclasses: " + f"{', '.join(sorted(cls.__name__ for cls in discovered))}" + ) + handler_classes.append(discovered[0]) + source_files.append(child_path) + + if not handler_classes: + PrintStyle.warning( + f"WebSocket handlers folder entry '{entry_path}' is empty; treating namespace '{namespace}' as unregistered" + ) + continue + + entries.append( + NamespaceDiscovery( + namespace=namespace, + handler_classes=tuple(handler_classes), + source_files=tuple(source_files), + ) + ) + continue + + # File entries define namespaces. + if not entry.endswith(".py"): + continue + if entry == "__init__.py": + continue + + if entry == "_default.py": + if not include_root_default: + continue + entry_name = "_default" + else: + if not entry.endswith("_handler.py"): + continue + entry_name = entry[: -len("_handler.py")] + + namespace = _to_namespace(entry_name) + module_path = os.path.join(abs_folder, entry) + + module = _import_module(module_path) + handler_classes = _get_handler_classes(module) + if not handler_classes: + raise RuntimeError( + f"WebSocket handler module {module_path} defines no WebSocketHandler subclasses" + ) + if len(handler_classes) > 1: + raise RuntimeError( + f"WebSocket handler module {module_path} defines multiple WebSocketHandler subclasses: " + f"{', '.join(sorted(cls.__name__ for cls in handler_classes))}" + ) + + entries.append( + NamespaceDiscovery( + namespace=namespace, + handler_classes=(handler_classes[0],), + source_files=(module_path,), + ) + ) + + return entries + + +def iter_discovered_namespaces(discoveries: Iterable[NamespaceDiscovery]) -> list[str]: + return [entry.namespace for entry in discoveries] diff --git a/python/websocket_handlers/_default.py b/python/websocket_handlers/_default.py new file mode 100644 index 0000000000..cd90a9a752 --- /dev/null +++ b/python/websocket_handlers/_default.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from typing import Any + +from python.helpers.websocket import WebSocketHandler, WebSocketResult + + +class RootDefaultHandler(WebSocketHandler): + """Reserved root (`/`) namespace diagnostics-only handler. + + Root is intentionally *not* used for application traffic. This handler exists to support + optional low-risk diagnostics on `/` without making root behave like a global namespace. + """ + + @classmethod + def requires_auth(cls) -> bool: + return False + + @classmethod + def requires_csrf(cls) -> bool: + return False + + @classmethod + def get_event_types(cls) -> list[str]: + # Diagnostics-only noop endpoint. + return ["ws_root_echo"] + + async def process_event( + self, event_type: str, data: dict[str, Any], sid: str + ) -> dict[str, Any] | WebSocketResult | None: + return {"ok": True, "namespace": self.namespace, "sid": sid, "echo": data} diff --git a/python/websocket_handlers/dev_websocket_test_handler.py b/python/websocket_handlers/dev_websocket_test_handler.py new file mode 100644 index 0000000000..948dca57bb --- /dev/null +++ b/python/websocket_handlers/dev_websocket_test_handler.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +import asyncio +from typing import Any, Dict + +from python.helpers.print_style import PrintStyle +from python.helpers import runtime +from python.helpers.websocket import WebSocketHandler, WebSocketResult + + +class DevWebsocketTestHandler(WebSocketHandler): + """Test harness handler powering the developer WebSocket validation component.""" + + @classmethod + def get_event_types(cls) -> list[str]: + return [ + "ws_tester_emit", + "ws_tester_request", + "ws_tester_request_delayed", + "ws_tester_trigger_persistence", + "ws_tester_request_all", + "ws_tester_broadcast_demo_trigger", + "ws_event_console_subscribe", + "ws_event_console_unsubscribe", + ] + + async def process_event( + self, event_type: str, data: Dict[str, Any], sid: str + ) -> dict[str, Any] | WebSocketResult | None: + if event_type == "ws_event_console_subscribe": + if not runtime.is_development(): + return self.result_error( + code="NOT_AVAILABLE", + message="Event console is available only in development mode", + ) + registered = self.manager.register_diagnostic_watcher(self.namespace, sid) + if not registered: + return self.result_error( + code="SUBSCRIBE_FAILED", + message="Unable to subscribe to diagnostics", + ) + return self.result_ok( + {"status": "subscribed", "timestamp": data.get("requestedAt")} + ) + + if event_type == "ws_event_console_unsubscribe": + self.manager.unregister_diagnostic_watcher(self.namespace, sid) + return self.result_ok({"status": "unsubscribed"}) + + if event_type == "ws_tester_emit": + message = data.get("message", "emit") + payload = { + "message": message, + "echo": True, + "timestamp": data.get("timestamp"), + } + await self.broadcast("ws_tester_broadcast", payload) + PrintStyle.info(f"Harness emit broadcasted message='{message}'") + return None + + if event_type == "ws_tester_request": + value = data.get("value") + response = { + "echo": value, + "handler": self.identifier, + "status": "ok", + } + PrintStyle.debug("Harness request responded with echo %s", value) + return self.result_ok( + response, + correlation_id=data.get("correlationId"), + ) + + if event_type == "ws_tester_request_delayed": + delay_ms = int(data.get("delay_ms", 0)) + await asyncio.sleep(delay_ms / 1000) + PrintStyle.warning( + "Harness delayed request finished after %s ms", delay_ms + ) + return self.result_ok( + { + "status": "delayed", + "delay_ms": delay_ms, + "handler": self.identifier, + }, + correlation_id=data.get("correlationId"), + ) + + if event_type == "ws_tester_trigger_persistence": + phase = data.get("phase", "unknown") + payload = { + "phase": phase, + "handler": self.identifier, + } + await self.emit_to(sid, "ws_tester_persistence", payload) + PrintStyle.info(f"Harness persistence event phase='{phase}' -> {sid}") + return None + + if event_type == "ws_tester_request_all": + marker = data.get("marker") + PrintStyle.debug( + "Harness requestAll invoked by %s marker='%s'", sid, marker + ) + exclude_handlers = data.get("excludeHandlers") + aggregated = await self.request_all( + "ws_tester_request", + data, + timeout_ms=2_000, + exclude_handlers=exclude_handlers, + ) + return self.result_ok( + {"results": aggregated}, + correlation_id=data.get("correlationId"), + ) + + if event_type == "ws_tester_broadcast_demo_trigger": + payload = { + "demo": True, + "requested_at": data.get("requested_at"), + } + await self.broadcast("ws_tester_broadcast_demo", payload) + PrintStyle.info("Harness broadcast demo event dispatched") + return None + + PrintStyle.warning(f"Harness received unknown event '{event_type}'") + return self.result_error( + code="HARNESS_UNKNOWN_EVENT", + message="Unhandled event", + details=event_type, + ) diff --git a/python/websocket_handlers/hello_handler.py b/python/websocket_handlers/hello_handler.py new file mode 100644 index 0000000000..28219ccdc3 --- /dev/null +++ b/python/websocket_handlers/hello_handler.py @@ -0,0 +1,19 @@ +from __future__ import annotations + +from python.helpers.print_style import PrintStyle +from python.helpers.websocket import WebSocketHandler + + +class HelloHandler(WebSocketHandler): + """Sample handler used for foundational testing.""" + + @classmethod + def get_event_types(cls) -> list[str]: + return ["hello_request"] + + async def process_event(self, event_type: str, data: dict, sid: str): + name = data.get("name") or "stranger" + PrintStyle.info(f"hello_request from {sid} ({name})") + return {"message": f"Hello, {name}!", "handler": self.identifier} + + diff --git a/python/websocket_handlers/state_sync_handler.py b/python/websocket_handlers/state_sync_handler.py new file mode 100644 index 0000000000..8d86620c84 --- /dev/null +++ b/python/websocket_handlers/state_sync_handler.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +from python.helpers import runtime +from python.helpers.print_style import PrintStyle +from python.helpers.websocket import WebSocketHandler, WebSocketResult +from python.helpers.state_monitor import get_state_monitor +from python.helpers.state_snapshot import ( + StateRequestValidationError, + parse_state_request_payload, +) + + +class StateSyncHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["state_request"] + + async def on_connect(self, sid: str) -> None: + monitor = get_state_monitor() + monitor.bind_manager(self.manager, handler_id=self.identifier) + monitor.register_sid(self.namespace, sid) + PrintStyle.info(f"[StateSyncHandler] connect sid={sid}") + + async def on_disconnect(self, sid: str) -> None: + get_state_monitor().unregister_sid(self.namespace, sid) + PrintStyle.info(f"[StateSyncHandler] disconnect sid={sid}") + + async def process_event(self, event_type: str, data: dict, sid: str) -> dict | WebSocketResult | None: + correlation_id = data.get("correlationId") + try: + request = parse_state_request_payload(data) + except StateRequestValidationError as exc: + PrintStyle.warning( + f"[StateSyncHandler] INVALID_REQUEST sid={sid} reason={exc.reason} details={exc.details!r}" + ) + return self.result_error( + code="INVALID_REQUEST", + message=str(exc), + correlation_id=correlation_id, + ) + + PrintStyle.debug( + f"[StateSyncHandler] state_request sid={sid} context={request.context!r} " + f"log_from={request.log_from} notifications_from={request.notifications_from} timezone={request.timezone!r} " + f"correlation_id={correlation_id}" + ) + + # Baseline sequence must be reset on every state_request (new sync period). + # V1 policy: seq_base starts >0 to allow simple gating checks. + seq_base = 1 + monitor = get_state_monitor() + monitor.update_projection( + self.namespace, + sid, + request=request, + seq_base=seq_base, + ) + # INVARIANT.STATE.INITIAL_SNAPSHOT: schedule a full snapshot quickly after handshake. + monitor.mark_dirty( + self.namespace, + sid, + reason="state_sync_handler.StateSyncHandler.state_request", + ) + PrintStyle.debug(f"[StateSyncHandler] state_request accepted sid={sid} seq_base={seq_base}") + + return self.result_ok( + { + "runtime_epoch": runtime.get_runtime_id(), + "seq_base": seq_base, + }, + correlation_id=correlation_id, + ) diff --git a/requirements.txt b/requirements.txt index a14a853393..1d853e665d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ browser-use==0.5.11 docker==7.1.0 duckduckgo-search==6.1.12 faiss-cpu==1.11.0 -fastmcp==2.3.4 +fastmcp==2.13.1 fasta2a==0.5.0 flask[async]==3.0.3 flask-basicauth==0.2.0 @@ -19,7 +19,7 @@ langchain-unstructured[all-docs]==0.1.6 openai-whisper==20240930 lxml_html_clean==0.3.1 markdown==3.7 -mcp==1.13.1 +mcp==1.22.0 newspaper3k==0.2.8 paramiko==3.5.0 playwright==1.52.0 @@ -47,4 +47,7 @@ html2text>=2024.2.26 beautifulsoup4>=4.12.3 boto3>=1.35.0 exchangelib>=5.4.3 -pywinpty==3.0.2; sys_platform == "win32" \ No newline at end of file +pywinpty==3.0.2; sys_platform == "win32" +python-socketio>=5.14.2 +uvicorn>=0.38.0 +wsproto>=1.2.0 diff --git a/run_ui.py b/run_ui.py index 1691f69e74..5971c20fef 100644 --- a/run_ui.py +++ b/run_ui.py @@ -1,23 +1,32 @@ -import asyncio from datetime import timedelta import os import secrets -import hashlib import time import socket import struct from functools import wraps import threading + +import uvicorn from flask import Flask, request, Response, session, redirect, url_for, render_template_string from werkzeug.wrappers.response import Response as BaseResponse + import initialize -from python.helpers import files, git, mcp_server, fasta2a_server +from python.helpers import files, git, mcp_server, fasta2a_server, settings as settings_helper from python.helpers.files import get_abs_path from python.helpers import runtime, dotenv, process +from python.helpers.websocket import WebSocketHandler, validate_ws_origin from python.helpers.extract_tools import load_classes_from_folder from python.helpers.api import ApiHandler from python.helpers.print_style import PrintStyle from python.helpers import login +import socketio # type: ignore[import-untyped] +from socketio import ASGIApp, packet +from starlette.applications import Starlette +from starlette.routing import Mount +from uvicorn.middleware.wsgi import WSGIMiddleware +from python.helpers.websocket_manager import WebSocketManager +from python.helpers.websocket_namespace_discovery import discover_websocket_namespaces # disable logging import logging @@ -42,7 +51,24 @@ PERMANENT_SESSION_LIFETIME=timedelta(days=1) ) -lock = threading.Lock() +lock = threading.RLock() + +socketio_server = socketio.AsyncServer( + async_mode="asgi", + namespaces="*", + cors_allowed_origins=lambda _origin, environ: validate_ws_origin(environ)[0], + logger=False, + engineio_logger=False, + ping_interval=25, # explicit default to avoid future lib changes + ping_timeout=20, # explicit default to avoid future lib changes + max_http_buffer_size=50 * 1024 * 1024, +) + +websocket_manager = WebSocketManager(socketio_server, lock) +_settings = settings_helper.get_settings() +websocket_manager.set_server_restart_broadcast( + _settings.get("websocket_server_restart_enabled", True) +) # Set up basic authentication for UI and API but not MCP # basic_auth = BasicAuth(webapp) @@ -50,9 +76,9 @@ def is_loopback_address(address): loopback_checker = { - socket.AF_INET: lambda x: struct.unpack("!I", socket.inet_aton(x))[0] - >> (32 - 8) - == 127, + socket.AF_INET: lambda x: ( + struct.unpack("!I", socket.inet_aton(x))[0] >> (32 - 8) + ) == 127, socket.AF_INET6: lambda x: x == "::1", } address_type = "hostname" @@ -81,6 +107,7 @@ def is_loopback_address(address): return False return True + def requires_api_key(f): @wraps(f) async def decorated(*args, **kwargs): @@ -128,11 +155,12 @@ async def decorated(*args, **kwargs): if session.get('authentication') != user_pass_hash: return redirect(url_for('login_handler')) - + return await f(*args, **kwargs) return decorated + def csrf_protect(f): @wraps(f) async def decorated(*args, **kwargs): @@ -146,27 +174,30 @@ async def decorated(*args, **kwargs): return decorated + @webapp.route("/login", methods=["GET", "POST"]) async def login_handler(): error = None if request.method == 'POST': user = dotenv.get_dotenv_value("AUTH_LOGIN") password = dotenv.get_dotenv_value("AUTH_PASSWORD") - + if request.form['username'] == user and request.form['password'] == password: session['authentication'] = login.get_credentials_hash() return redirect(url_for('serve_index')) else: error = 'Invalid Credentials. Please try again.' - + login_page_content = files.read_file("webui/login.html") return render_template_string(login_page_content, error=error) + @webapp.route("/logout") async def logout_handler(): session.pop('authentication', None) return redirect(url_for('login_handler')) + # handle default address, load index @webapp.route("/", methods=["GET"]) @requires_auth @@ -183,31 +214,192 @@ async def serve_index(): index = files.replace_placeholders_text( _content=index, version_no=gitinfo["version"], - version_time=gitinfo["commit_time"] + version_time=gitinfo["commit_time"], + runtime_id=runtime.get_runtime_id(), + runtime_is_development=("true" if runtime.is_development() else "false"), ) return index -def run(): - PrintStyle().print("Initializing framework...") - # Suppress only request logs but keep the startup messages - from werkzeug.serving import WSGIRequestHandler - from werkzeug.serving import make_server - from werkzeug.middleware.dispatcher import DispatcherMiddleware - from a2wsgi import ASGIMiddleware +def _build_websocket_handlers_by_namespace( + socketio_server: socketio.AsyncServer, + lock: threading.RLock, +) -> dict[str, list[WebSocketHandler]]: + discoveries = discover_websocket_namespaces( + handlers_folder="python/websocket_handlers", + include_root_default=True, + ) + + handlers_by_namespace: dict[str, list[WebSocketHandler]] = {} + for discovery in discoveries: + namespace = discovery.namespace + for handler_cls in discovery.handler_classes: + handler = handler_cls.get_instance(socketio_server, lock) + handlers_by_namespace.setdefault(namespace, []).append(handler) + + return handlers_by_namespace + + +def configure_websocket_namespaces( + *, + webapp: Flask, + socketio_server: socketio.AsyncServer, + websocket_manager: WebSocketManager, + handlers_by_namespace: dict[str, list[WebSocketHandler]], +) -> set[str]: + namespace_map: dict[str, list[WebSocketHandler]] = { + namespace: list(handlers) for namespace, handlers in handlers_by_namespace.items() + } - PrintStyle().print("Starting server...") + # Always include the reserved root namespace. It is unhandled for application events by + # default, but request-style calls must resolve deterministically with NO_HANDLERS. + namespace_map.setdefault("/", []) + + websocket_manager.register_handlers(namespace_map) + + allowed_namespaces = set(namespace_map.keys()) + original_handle_connect = socketio_server._handle_connect # type: ignore[attr-defined] + + async def _handle_connect_with_namespace_gatekeeper(eio_sid, namespace, data): + requested = namespace or "/" + if requested not in allowed_namespaces: + await socketio_server._send_packet( + eio_sid, + socketio_server.packet_class( + packet.CONNECT_ERROR, + data={ + "message": "UNKNOWN_NAMESPACE", + "data": {"code": "UNKNOWN_NAMESPACE", "namespace": requested}, + }, + namespace=requested, + ), + ) + return + await original_handle_connect(eio_sid, namespace, data) + + socketio_server._handle_connect = _handle_connect_with_namespace_gatekeeper # type: ignore[assignment] + + def _register_namespace_handlers( + namespace: str, namespace_handlers: list[WebSocketHandler] + ) -> None: + # A namespace is the WebSocket equivalent of an API endpoint. + # Security requirements must be consistent within the namespace (no any()-based union). + auth_required = False + csrf_required = False + if namespace_handlers: + auth_required = bool(namespace_handlers[0].requires_auth()) + csrf_required = bool(namespace_handlers[0].requires_csrf()) + for handler in namespace_handlers[1:]: + if ( + bool(handler.requires_auth()) != auth_required + or bool(handler.requires_csrf()) != csrf_required + ): + raise ValueError( + f"WebSocket namespace {namespace!r} has mixed auth/csrf requirements across handlers" + ) + + @socketio_server.on("connect", namespace=namespace) + async def _connect( # type: ignore[override] + sid, + environ, + _auth, + _namespace: str = namespace, + _auth_required: bool = auth_required, + _csrf_required: bool = csrf_required, + ): + with webapp.request_context(environ): + origin_ok, origin_reason = validate_ws_origin(environ) + if not origin_ok: + PrintStyle.warning( + f"WebSocket origin validation failed for {_namespace} {sid}: {origin_reason or 'invalid'}" + ) + return False - class NoRequestLoggingWSGIRequestHandler(WSGIRequestHandler): - def log_request(self, code="-", size="-"): - pass # Override to suppress request logging + if _auth_required: + credentials_hash = login.get_credentials_hash() + if credentials_hash: + if session.get("authentication") != credentials_hash: + PrintStyle.warning( + f"WebSocket authentication failed for {_namespace} {sid}: session not valid" + ) + return False + else: + PrintStyle.debug( + "WebSocket authentication required but credentials not configured; proceeding" + ) + + if _csrf_required: + expected_token = session.get("csrf_token") + if not isinstance(expected_token, str) or not expected_token: + PrintStyle.warning( + f"WebSocket CSRF validation failed for {_namespace} {sid}: csrf_token not initialized" + ) + return False + + auth_token = None + if isinstance(_auth, dict): + auth_token = _auth.get("csrf_token") or _auth.get("csrfToken") + if not isinstance(auth_token, str) or not auth_token: + PrintStyle.warning( + f"WebSocket CSRF validation failed for {_namespace} {sid}: missing csrf_token in auth" + ) + return False + if auth_token != expected_token: + PrintStyle.warning( + f"WebSocket CSRF validation failed for {_namespace} {sid}: csrf_token mismatch" + ) + return False + + cookie_name = f"csrf_token_{runtime.get_runtime_id()}" + cookie_token = request.cookies.get(cookie_name) + if cookie_token != expected_token: + PrintStyle.warning( + f"WebSocket CSRF validation failed for {_namespace} {sid}: csrf cookie mismatch" + ) + return False + + user_id = session.get("user_id") or "single_user" + await websocket_manager.handle_connect(_namespace, sid, user_id=user_id) + return True + + @socketio_server.on("disconnect", namespace=namespace) + async def _disconnect(sid, _namespace: str = namespace): # type: ignore[override] + await websocket_manager.handle_disconnect(_namespace, sid) + + def _register_socketio_event(event_type: str) -> None: + @socketio_server.on(event_type, namespace=namespace) + async def _event_handler( + sid, + data, + _event_type: str = event_type, + _namespace: str = namespace, + ): + payload = data or {} + return await websocket_manager.route_event( + _namespace, _event_type, payload, sid + ) + + for _event_type in websocket_manager.iter_event_types(namespace): + _register_socketio_event(_event_type) + + @socketio_server.on("*", namespace=namespace) + async def _catch_all(event, sid, data, _namespace: str = namespace): + payload = data or {} + return await websocket_manager.route_event(_namespace, event, payload, sid) + + for namespace, namespace_handlers in namespace_map.items(): + _register_namespace_handlers(namespace, namespace_handlers) + + return allowed_namespaces + + +def run(): + PrintStyle().print("Initializing framework...") - # Get configuration from environment port = runtime.get_web_ui_port() host = ( runtime.get_arg("host") or dotenv.get_dotenv_value("WEB_UI_HOST") or "localhost" ) - server = None def register_api_handler(app, handler: type[ApiHandler]): name = handler.__module__.split(".")[-1] @@ -232,37 +424,73 @@ async def handler_wrap() -> BaseResponse: methods=handler.get_methods(), ) - # initialize and register API handlers handlers = load_classes_from_folder("python/api", "*.py", ApiHandler) for handler in handlers: register_api_handler(webapp, handler) - # add the webapp, mcp, and a2a to the app - middleware_routes = { - "/mcp": ASGIMiddleware(app=mcp_server.DynamicMcpProxy.get_instance()), # type: ignore - "/a2a": ASGIMiddleware(app=fasta2a_server.DynamicA2AProxy.get_instance()), # type: ignore - } + handlers_by_namespace = _build_websocket_handlers_by_namespace(socketio_server, lock) + configure_websocket_namespaces( + webapp=webapp, + socketio_server=socketio_server, + websocket_manager=websocket_manager, + handlers_by_namespace=handlers_by_namespace, + ) - app = DispatcherMiddleware(webapp, middleware_routes) # type: ignore + init_a0() - PrintStyle().debug(f"Starting server at http://{host}:{port} ...") + wsgi_app = WSGIMiddleware(webapp) + starlette_app = Starlette( + routes=[ + Mount("/mcp", app=mcp_server.DynamicMcpProxy.get_instance()), + Mount("/a2a", app=fasta2a_server.DynamicA2AProxy.get_instance()), + Mount("/", app=wsgi_app), + ] + ) + + asgi_app = ASGIApp(socketio_server, other_asgi_app=starlette_app) - server = make_server( + def flush_and_shutdown_callback() -> None: + """ + TODO(dev): add cleanup + flush-to-disk logic here. + """ + return + flush_ran = False + + def _run_flush(reason: str) -> None: + nonlocal flush_ran + if flush_ran: + return + flush_ran = True + try: + flush_and_shutdown_callback() + except Exception as e: + PrintStyle.warning(f"Shutdown flush failed ({reason}): {e}") + + config = uvicorn.Config( + asgi_app, host=host, port=port, - app=app, - request_handler=NoRequestLoggingWSGIRequestHandler, - threaded=True, + log_level="error", + access_log=_settings.get("uvicorn_access_logs_enabled", False), + ws="wsproto", ) - process.set_server(server) - server.log_startup() + server = uvicorn.Server(config) - # Start init_a0 in a background thread when server starts - # threading.Thread(target=init_a0, daemon=True).start() - init_a0() + class _UvicornServerWrapper: + def __init__(self, server: uvicorn.Server): + self._server = server - # run the server - server.serve_forever() + def shutdown(self) -> None: + _run_flush("shutdown") + self._server.should_exit = True + + process.set_server(_UvicornServerWrapper(server)) + + PrintStyle().debug(f"Starting server at http://{host}:{port} ...") + try: + server.run() + finally: + _run_flush("server_exit") def init_a0(): @@ -278,9 +506,8 @@ def init_a0(): initialize.initialize_preload() - # run the internal server if __name__ == "__main__": runtime.initialize() dotenv.load_dotenv() - run() \ No newline at end of file + run() diff --git a/tests/test_multi_tab_isolation.py b/tests/test_multi_tab_isolation.py new file mode 100644 index 0000000000..c54f9e85f6 --- /dev/null +++ b/tests/test_multi_tab_isolation.py @@ -0,0 +1,158 @@ +import asyncio +import sys +from pathlib import Path + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + + +@pytest.mark.asyncio +async def test_state_monitor_per_sid_isolation_independent_snapshots_seq_and_cursors(monkeypatch): + import python.helpers.state_monitor as state_monitor_module + from python.helpers.state_monitor import StateMonitor + from python.helpers.state_snapshot import StateRequestV1 + + snapshot_calls: list[dict[str, object]] = [] + emitted: list[dict[str, object]] = [] + + namespace = "/state_sync" + + async def fake_build_snapshot_from_request(*, request): + context = request.context + log_from = request.log_from + notifications_from = request.notifications_from + timezone = request.timezone + snapshot_calls.append( + { + "context": context, + "log_from": log_from, + "notifications_from": notifications_from, + "timezone": timezone, + } + ) + # Return poll-shaped keys that StateMonitor expects to advance cursors from. + return { + "deselect_chat": False, + "context": context or "", + "contexts": [], + "tasks": [], + "logs": [], + "log_guid": "log-guid", + "log_version": int(log_from) + 1, + "log_progress": "", + "log_progress_active": False, + "paused": False, + "notifications": [], + "notifications_guid": "notifications-guid", + "notifications_version": int(notifications_from) + 1, + } + + class FakeManager: + def __init__(self, loop): + self._dispatcher_loop = loop + + async def emit_to(self, namespace, sid, event_type, payload, *, handler_id=None): + emitted.append( + { + "namespace": namespace, + "sid": sid, + "event_type": event_type, + "payload": payload, + "handler_id": handler_id, + } + ) + + monkeypatch.setattr( + state_monitor_module, + "build_snapshot_from_request", + fake_build_snapshot_from_request, + ) + + monitor = StateMonitor(debounce_seconds=60.0) + loop = asyncio.get_running_loop() + monitor.bind_manager(FakeManager(loop), handler_id="test.handler") + + monitor.register_sid(namespace, "sid-a") + monitor.register_sid(namespace, "sid-b") + + monitor.update_projection( + namespace, + "sid-a", + request=StateRequestV1(context="ctx-a", log_from=0, notifications_from=0, timezone="UTC"), + seq_base=10, + ) + monitor.update_projection( + namespace, + "sid-b", + request=StateRequestV1( + context="ctx-b", + log_from=40, + notifications_from=7, + timezone="Europe/Berlin", + ), + seq_base=100, + ) + + # Flush pushes directly to avoid relying on debounce scheduling. + await monitor._flush_push((namespace, "sid-a")) + await monitor._flush_push((namespace, "sid-b")) + + assert snapshot_calls == [ + {"context": "ctx-a", "log_from": 0, "notifications_from": 0, "timezone": "UTC"}, + {"context": "ctx-b", "log_from": 40, "notifications_from": 7, "timezone": "Europe/Berlin"}, + ] + + assert len(emitted) == 2 + assert {entry["sid"] for entry in emitted} == {"sid-a", "sid-b"} + assert all(entry["event_type"] == "state_push" for entry in emitted) + assert all(entry["handler_id"] == "test.handler" for entry in emitted) + assert all(entry["namespace"] == namespace for entry in emitted) + + payload_a = next(entry["payload"] for entry in emitted if entry["sid"] == "sid-a") + payload_b = next(entry["payload"] for entry in emitted if entry["sid"] == "sid-b") + + assert payload_a["seq"] == 11 # seq_base=10 -> first push increments to 11 + assert payload_b["seq"] == 101 # seq_base=100 -> first push increments to 101 + + assert payload_a["snapshot"]["context"] == "ctx-a" + assert payload_b["snapshot"]["context"] == "ctx-b" + + # Verify per-sid cursor advancement is independent. + assert monitor._projections[(namespace, "sid-a")].request.log_from == 1 + assert monitor._projections[(namespace, "sid-a")].request.notifications_from == 1 + assert monitor._projections[(namespace, "sid-b")].request.log_from == 41 + assert monitor._projections[(namespace, "sid-b")].request.notifications_from == 8 + + +@pytest.mark.asyncio +async def test_state_monitor_mark_dirty_for_context_scopes_to_active_context(): + from python.helpers.state_monitor import StateMonitor + from python.helpers.state_snapshot import StateRequestV1 + + monitor = StateMonitor(debounce_seconds=60.0) + namespace = "/state_sync" + monitor.register_sid(namespace, "sid-a") + monitor.register_sid(namespace, "sid-b") + + monitor.update_projection( + namespace, + "sid-a", + request=StateRequestV1(context="ctx-a", log_from=0, notifications_from=0, timezone="UTC"), + seq_base=10, + ) + monitor.update_projection( + namespace, + "sid-b", + request=StateRequestV1(context="ctx-b", log_from=0, notifications_from=0, timezone="UTC"), + seq_base=10, + ) + + monitor.mark_dirty_for_context("ctx-a") + assert (namespace, "sid-a") in monitor._debounce_handles + assert (namespace, "sid-b") not in monitor._debounce_handles + + monitor.unregister_sid(namespace, "sid-a") + monitor.unregister_sid(namespace, "sid-b") diff --git a/tests/test_persist_chat_log_ids.py b/tests/test_persist_chat_log_ids.py new file mode 100644 index 0000000000..9d30a4eca3 --- /dev/null +++ b/tests/test_persist_chat_log_ids.py @@ -0,0 +1,18 @@ +from __future__ import annotations + + +def test_deserialize_log_preserves_item_id() -> None: + from python.helpers.log import Log + from python.helpers.persist_chat import _deserialize_log, _serialize_log + + log = Log() + log.log(type="user", heading="User message", content="hello", id="msg-123") + log.log(type="assistant", heading="Assistant", content="hi") + + serialized = _serialize_log(log) + restored = _deserialize_log(serialized) + + assert restored.logs[0].type == "user" + assert restored.logs[0].id == "msg-123" + assert restored.logs[1].type == "assistant" + assert restored.logs[1].id is None diff --git a/tests/test_run_ui_config.py b/tests/test_run_ui_config.py new file mode 100644 index 0000000000..e2d3a4c59a --- /dev/null +++ b/tests/test_run_ui_config.py @@ -0,0 +1,16 @@ +import sys +from pathlib import Path + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + +import run_ui + + +def test_socketio_engine_configuration_defaults(): + server = run_ui.socketio_server.eio + + assert server.ping_interval == 25 + assert server.ping_timeout == 20 + assert server.max_http_buffer_size == 50 * 1024 * 1024 diff --git a/tests/test_settings_developer_sections.py b/tests/test_settings_developer_sections.py new file mode 100644 index 0000000000..1effaf6019 --- /dev/null +++ b/tests/test_settings_developer_sections.py @@ -0,0 +1,37 @@ +import sys +from pathlib import Path + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + +from python.helpers import settings as settings_helper + + +def _build_sections(monkeypatch: pytest.MonkeyPatch, is_dev: bool): + monkeypatch.setattr(settings_helper.runtime, "is_development", lambda: is_dev) + output = settings_helper.convert_out(settings_helper.get_settings()) + return output["sections"] + + +def test_websocket_harness_section_only_in_development(monkeypatch: pytest.MonkeyPatch): + sections = _build_sections(monkeypatch, True) + section_ids = [section["id"] for section in sections] + assert "dev_testing" in section_ids + + dev_index = section_ids.index("dev") + testing_index = section_ids.index("dev_testing") + assert testing_index > dev_index + + prod_sections = _build_sections(monkeypatch, False) + prod_ids = [section["id"] for section in prod_sections] + assert "dev_testing" not in prod_ids + + +def test_websocket_harness_template_is_gated_by_runtime(): + template_path = PROJECT_ROOT / "webui" / "components" / "settings" / "developer" / "websocket-tester.html" + content = template_path.read_text(encoding="utf-8") + assert "window.runtimeInfo?.isDevelopment" in content + assert "$store.root?.isDevelopment" not in content diff --git a/tests/test_snapshot_parity.py b/tests/test_snapshot_parity.py new file mode 100644 index 0000000000..f46083d243 --- /dev/null +++ b/tests/test_snapshot_parity.py @@ -0,0 +1,77 @@ +import sys +import threading +from pathlib import Path + +import pytest +from flask import Flask + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + +from agent import AgentContext +from initialize import initialize_agent +from python.api.poll import Poll + + +@pytest.mark.asyncio +async def test_snapshot_builder_matches_poll_output_for_null_context(): + app = Flask("snapshot-parity-test") + app.secret_key = "test-secret" + lock = threading.RLock() + + poll = Poll(app, lock) + poll_payload = await poll.process( + { + "context": None, + "log_from": 0, + "notifications_from": 0, + "timezone": "UTC", + }, + None, # Poll.process does not access the flask Request object. + ) + + from python.helpers import state_snapshot as snapshot + + builder_payload = await snapshot.build_snapshot( + context=None, + log_from=0, + notifications_from=0, + timezone="UTC", + ) + + assert builder_payload == poll_payload + + +@pytest.mark.asyncio +async def test_snapshot_builder_active_context_includes_incremental_logs(): + ctxid = "ctx-snapshot-parity" + ctx = AgentContext(config=initialize_agent(), id=ctxid, set_current=False) + try: + ctx.log.log(type="user", heading="hi", content="hello") + first = await Poll(Flask("parity-active"), threading.RLock()).process( + { + "context": ctxid, + "log_from": 0, + "notifications_from": 0, + "timezone": "UTC", + }, + None, + ) + assert first["context"] == ctxid + assert first["logs"] + assert first["log_version"] == len(ctx.log.updates) + + from python.helpers import state_snapshot as snapshot + + second = await snapshot.build_snapshot( + context=ctxid, + log_from=first["log_version"], + notifications_from=0, + timezone="UTC", + ) + assert second["context"] == ctxid + assert second["logs"] == [] + assert second["log_version"] == first["log_version"] + finally: + AgentContext.remove(ctxid) diff --git a/tests/test_snapshot_schema_v1.py b/tests/test_snapshot_schema_v1.py new file mode 100644 index 0000000000..8e2b0b7d46 --- /dev/null +++ b/tests/test_snapshot_schema_v1.py @@ -0,0 +1,110 @@ +import sys +import threading +from pathlib import Path + +import pytest +from flask import Flask + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + +from python.api.poll import Poll + + +EXPECTED_SNAPSHOT_KEYS = { + "deselect_chat", + "context", + "contexts", + "tasks", + "logs", + "log_guid", + "log_version", + "log_progress", + "log_progress_active", + "paused", + "notifications", + "notifications_guid", + "notifications_version", +} + + +@pytest.mark.asyncio +async def test_poll_snapshot_matches_contract_schema_key_set_null_context(): + app = Flask("poll-snapshot-schema-test") + app.secret_key = "test-secret" + lock = threading.RLock() + + poll = Poll(app, lock) + payload = await poll.process( + { + "context": None, + "log_from": 0, + "notifications_from": 0, + "timezone": "UTC", + }, + None, # Poll.process does not access the flask Request object. + ) + + assert set(payload.keys()) == EXPECTED_SNAPSHOT_KEYS + assert payload["deselect_chat"] is False + assert payload["context"] == "" + assert payload["logs"] == [] + assert payload["log_guid"] == "" + assert payload["log_version"] == 0 + assert payload["log_progress"] == 0 + assert payload["log_progress_active"] is False + assert payload["paused"] is False + + +@pytest.mark.asyncio +async def test_snapshot_builder_produces_contract_schema_key_set_and_defaults(): + from python.helpers import state_snapshot as snapshot + + payload = await snapshot.build_snapshot( + context=None, + log_from=0, + notifications_from=0, + timezone="UTC", + ) + + snapshot.validate_snapshot_schema_v1(payload) + assert set(payload.keys()) == EXPECTED_SNAPSHOT_KEYS + assert payload["deselect_chat"] is False + assert payload["context"] == "" + assert payload["logs"] == [] + assert payload["log_guid"] == "" + assert payload["log_version"] == 0 + assert payload["log_progress"] == 0 + assert payload["log_progress_active"] is False + assert payload["paused"] is False + assert isinstance(payload["contexts"], list) + assert isinstance(payload["tasks"], list) + assert isinstance(payload["notifications"], list) + assert isinstance(payload["notifications_guid"], str) + assert isinstance(payload["notifications_version"], int) + assert payload["notifications_version"] >= 0 + + +def test_snapshot_schema_rejects_unexpected_top_level_keys(): + from python.helpers import state_snapshot as snapshot + + payload = { + "deselect_chat": False, + "context": "", + "contexts": [], + "tasks": [], + "logs": [], + "log_guid": "", + "log_version": 0, + "log_progress": 0, + "log_progress_active": False, + "paused": False, + "notifications": [], + "notifications_guid": "guid", + "notifications_version": 0, + "api_key": "should-not-be-here", + } + + with pytest.raises(ValueError): + snapshot.validate_snapshot_schema_v1(payload) diff --git a/tests/test_socketio_library_semantics.py b/tests/test_socketio_library_semantics.py new file mode 100644 index 0000000000..f5f5c8dc55 --- /dev/null +++ b/tests/test_socketio_library_semantics.py @@ -0,0 +1,117 @@ +import asyncio +import contextlib +import socket +from typing import Any, AsyncIterator + +import pytest + + +@contextlib.asynccontextmanager +async def _run_asgi_app(app: Any) -> AsyncIterator[str]: + import uvicorn + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(("127.0.0.1", 0)) + sock.listen(128) + + port = sock.getsockname()[1] + + config = uvicorn.Config( + app, + host="127.0.0.1", + port=port, + log_level="warning", + access_log=False, + lifespan="off", + ) + server = uvicorn.Server(config) + server.install_signal_handlers = lambda: None # type: ignore[method-assign] + + task = asyncio.create_task(server.serve(sockets=[sock])) + try: + while not server.started: + await asyncio.sleep(0.01) + yield f"http://127.0.0.1:{port}" + finally: + server.should_exit = True + try: + await asyncio.wait_for(task, timeout=5) + finally: + sock.close() + + +@pytest.mark.asyncio +async def test_socketio_wildcard_handler_only_runs_for_unhandled_events() -> None: + import socketio + + handled_calls: list[tuple[str, Any]] = [] + wildcard_calls: list[str] = [] + + sio = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*") + + @sio.on("handled", namespace="/ns") + async def _handled(sid: str, data: Any) -> dict[str, Any]: + handled_calls.append((sid, data)) + return {"path": "handled"} + + @sio.on("*", namespace="/ns") + async def _wildcard(event: str, sid: str, data: Any) -> dict[str, Any]: + wildcard_calls.append(event) + return {"path": "wildcard", "event": event} + + app = socketio.ASGIApp(sio) + + async with _run_asgi_app(app) as base_url: + client = socketio.AsyncClient() + await client.connect(base_url, namespaces=["/ns"]) + try: + res = await client.call("handled", {"x": 1}, namespace="/ns", timeout=2) + assert res == {"path": "handled"} + assert wildcard_calls == [] + + res2 = await client.call("unhandled_event", {"x": 2}, namespace="/ns", timeout=2) + assert res2 == {"path": "wildcard", "event": "unhandled_event"} + assert wildcard_calls == ["unhandled_event"] + finally: + await client.disconnect() + + +@pytest.mark.asyncio +async def test_socketio_handler_return_values_ack_only_when_client_requests_ack() -> None: + import socketio + + sio = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*") + sent_packets: list[Any] = [] + + original_send_packet = sio._send_packet + + async def _record_send_packet(eio_sid: str, pkt: Any) -> None: + sent_packets.append(pkt) + await original_send_packet(eio_sid, pkt) + + sio._send_packet = _record_send_packet # type: ignore[assignment] + + @sio.on("returns_value", namespace="/ns") + async def _returns_value(_sid: str, _data: Any) -> dict[str, Any]: + return {"ok": True} + + app = socketio.ASGIApp(sio) + + async with _run_asgi_app(app) as base_url: + client = socketio.AsyncClient() + await client.connect(base_url, namespaces=["/ns"]) + try: + sent_packets.clear() + await client.emit("returns_value", {"x": 1}, namespace="/ns") + await asyncio.sleep(0.05) + ack_packets = [p for p in sent_packets if getattr(p, "packet_type", None) in (3, 6)] + assert ack_packets == [] + + sent_packets.clear() + res = await client.call("returns_value", {"x": 2}, namespace="/ns", timeout=2) + assert res == {"ok": True} + ack_packets = [p for p in sent_packets if getattr(p, "packet_type", None) in (3, 6)] + assert len(ack_packets) >= 1 + finally: + await client.disconnect() diff --git a/tests/test_socketio_unknown_namespace.py b/tests/test_socketio_unknown_namespace.py new file mode 100644 index 0000000000..74840e9800 --- /dev/null +++ b/tests/test_socketio_unknown_namespace.py @@ -0,0 +1,103 @@ +import asyncio +import contextlib +import socket +from typing import Any, AsyncIterator + +import pytest + + +@contextlib.asynccontextmanager +async def _run_asgi_app(app: Any) -> AsyncIterator[str]: + import uvicorn + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(("127.0.0.1", 0)) + sock.listen(128) + + port = sock.getsockname()[1] + + config = uvicorn.Config( + app, + host="127.0.0.1", + port=port, + log_level="warning", + access_log=False, + lifespan="off", + ) + server = uvicorn.Server(config) + server.install_signal_handlers = lambda: None # type: ignore[method-assign] + + task = asyncio.create_task(server.serve(sockets=[sock])) + try: + while not server.started: + await asyncio.sleep(0.01) + yield f"http://127.0.0.1:{port}" + finally: + server.should_exit = True + try: + await asyncio.wait_for(task, timeout=5) + finally: + sock.close() + + +@pytest.mark.asyncio +async def test_unknown_namespace_connect_error_can_be_made_deterministic() -> None: + """ + Library-semantics test: demonstrate a deterministic connect_error payload shape for + unknown namespaces using a server-side allowlist gatekeeper. + """ + + import socketio + from socketio import packet + + sio = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*", namespaces="*") + + allowed_namespaces = {"/known", "/"} + + original_handle_connect = sio._handle_connect + + async def _gatekeeper_handle_connect(eio_sid: str, namespace: str | None, data: Any) -> None: + namespace = namespace or "/" + if namespace not in allowed_namespaces: + await sio._send_packet( + eio_sid, + sio.packet_class( + packet.CONNECT_ERROR, + data={ + "message": "UNKNOWN_NAMESPACE", + "data": {"code": "UNKNOWN_NAMESPACE", "namespace": namespace}, + }, + namespace=namespace, + ), + ) + return + + await original_handle_connect(eio_sid, namespace, data) + + sio._handle_connect = _gatekeeper_handle_connect # type: ignore[assignment] + + app = socketio.ASGIApp(sio) + + async with _run_asgi_app(app) as base_url: + client = socketio.AsyncClient() + connect_error_fut: asyncio.Future[Any] = asyncio.get_running_loop().create_future() + + async def _on_connect_error(data: Any) -> None: + if not connect_error_fut.done(): + connect_error_fut.set_result(data) + + client.on("connect_error", _on_connect_error, namespace="/unknown") + + try: + with pytest.raises(socketio.exceptions.ConnectionError): + await client.connect(base_url, namespaces=["/unknown"]) + + err = await asyncio.wait_for(connect_error_fut, timeout=2) + assert err["message"] == "UNKNOWN_NAMESPACE" + assert err["data"] == {"code": "UNKNOWN_NAMESPACE", "namespace": "/unknown"} + finally: + try: + await client.disconnect() + except Exception: + pass diff --git a/tests/test_state_monitor.py b/tests/test_state_monitor.py new file mode 100644 index 0000000000..ef3b2e245e --- /dev/null +++ b/tests/test_state_monitor.py @@ -0,0 +1,103 @@ +import sys +from pathlib import Path + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + + +@pytest.mark.asyncio +async def test_state_monitor_debounce_coalesces_without_postponing_and_cleanup_cancels_pending(): + from python.helpers.state_monitor import StateMonitor + from python.helpers.state_snapshot import StateRequestV1 + + namespace = "/state_sync" + monitor = StateMonitor(debounce_seconds=10.0) + monitor.register_sid(namespace, "sid-1") + monitor.bind_manager(type("FakeManager", (), {"_dispatcher_loop": None})()) + monitor.update_projection( + namespace, + "sid-1", + request=StateRequestV1(context=None, log_from=0, notifications_from=0, timezone="UTC"), + seq_base=1, + ) + + monitor.mark_dirty(namespace, "sid-1") + first = monitor._debounce_handles[(namespace, "sid-1")] + + monitor.mark_dirty(namespace, "sid-1") + second = monitor._debounce_handles[(namespace, "sid-1")] + + # Throttled coalescing: subsequent dirties keep the scheduled push instead of postponing it. + assert first is second + assert not second.cancelled() + + monitor.unregister_sid(namespace, "sid-1") + assert second.cancelled() + assert (namespace, "sid-1") not in monitor._debounce_handles + + +@pytest.mark.asyncio +async def test_state_monitor_namespace_identity_prevents_cross_namespace_state_push(monkeypatch) -> None: + import asyncio + from unittest.mock import AsyncMock + + from python.helpers.state_monitor import StateMonitor + from python.helpers.state_snapshot import StateRequestV1 + + loop = asyncio.get_running_loop() + push_ready = asyncio.Event() + captured: list[tuple[str, str]] = [] + + async def _emit_to(namespace: str, sid: str, event_type: str, _payload: object, **_kwargs): + if event_type == "state_push": + captured.append((namespace, sid)) + push_ready.set() + + class FakeManager: + def __init__(self): + self._dispatcher_loop = loop + self.emit_to = AsyncMock(side_effect=_emit_to) + + monitor = StateMonitor(debounce_seconds=0.0) + manager = FakeManager() + monitor.bind_manager(manager, handler_id="tester") + + sid = "shared-sid" + ns_a = "/a" + ns_b = "/b" + monitor.register_sid(ns_a, sid) + monitor.register_sid(ns_b, sid) + monitor.update_projection( + ns_a, + sid, + request=StateRequestV1(context=None, log_from=0, notifications_from=0, timezone="UTC"), + seq_base=1, + ) + monitor.update_projection( + ns_b, + sid, + request=StateRequestV1(context=None, log_from=0, notifications_from=0, timezone="UTC"), + seq_base=1, + ) + + async def _fake_snapshot(**_kwargs): + return { + "log_version": 0, + "notifications_version": 0, + "logs": [], + "contexts": [], + "tasks": [], + "notifications": [], + } + + # Patch build_snapshot used by StateMonitor so this test stays lightweight. + monkeypatch.setattr("python.helpers.state_monitor.build_snapshot_from_request", _fake_snapshot) + + monitor.mark_dirty(ns_a, sid, reason="test") + await asyncio.wait_for(push_ready.wait(), timeout=1.0) + + assert captured + assert all(ns == ns_a for ns, _ in captured) diff --git a/tests/test_state_sync_handler.py b/tests/test_state_sync_handler.py new file mode 100644 index 0000000000..959acd2dc2 --- /dev/null +++ b/tests/test_state_sync_handler.py @@ -0,0 +1,168 @@ +import sys +import threading +from pathlib import Path + +import pytest +import asyncio +import time + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + +from python.helpers.websocket_manager import WebSocketManager + +NAMESPACE = "/state_sync" + + +class FakeSocketIOServer: + def __init__(self) -> None: + from unittest.mock import AsyncMock + + self.emit = AsyncMock() + self.disconnect = AsyncMock() + + +async def _create_manager() -> WebSocketManager: + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + from python.websocket_handlers.state_sync_handler import StateSyncHandler + from python.helpers.state_monitor import _reset_state_monitor_for_testing + + _reset_state_monitor_for_testing() + StateSyncHandler._reset_instance_for_testing() + handler = StateSyncHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [handler]}) + await manager.handle_connect(NAMESPACE, "sid-1") + return manager + + +async def _create_manager_with_socketio() -> tuple[WebSocketManager, FakeSocketIOServer]: + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + from python.websocket_handlers.state_sync_handler import StateSyncHandler + from python.helpers.state_monitor import _reset_state_monitor_for_testing + + _reset_state_monitor_for_testing() + StateSyncHandler._reset_instance_for_testing() + handler = StateSyncHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [handler]}) + await manager.handle_connect(NAMESPACE, "sid-1") + return manager, socketio + + +@pytest.mark.asyncio +async def test_state_request_success_returns_wire_level_shape_and_contract_payload(): + manager = await _create_manager() + + response = await manager.route_event( + NAMESPACE, + "state_request", + { + "correlationId": "client-1", + "ts": "2025-12-28T00:00:00.000Z", + "data": { + "context": None, + "log_from": 0, + "notifications_from": 0, + "timezone": "UTC", + }, + }, + "sid-1", + ) + + assert response["correlationId"] == "client-1" + assert isinstance(response.get("results"), list) + assert response["results"] + + first = response["results"][0] + assert first["ok"] is True + assert first["correlationId"] == "client-1" + assert isinstance(first.get("data"), dict) + assert set(first["data"].keys()) >= {"runtime_epoch", "seq_base"} + assert isinstance(first["data"]["runtime_epoch"], str) and first["data"]["runtime_epoch"] + assert isinstance(first["data"]["seq_base"], int) + + +@pytest.mark.asyncio +async def test_state_request_invalid_payload_returns_invalid_request_error(): + manager = await _create_manager() + + response = await manager.route_event( + NAMESPACE, + "state_request", + { + "correlationId": "client-2", + "ts": "2025-12-28T00:00:00.000Z", + "data": { + "context": None, + "log_from": -1, + "notifications_from": 0, + "timezone": "UTC", + }, + }, + "sid-1", + ) + + assert response["correlationId"] == "client-2" + assert response["results"] + first = response["results"][0] + assert first["ok"] is False + assert first["error"]["code"] == "INVALID_REQUEST" + + +@pytest.mark.asyncio +async def test_state_push_gating_and_initial_snapshot_delivery(): + from python.helpers.state_monitor import get_state_monitor + from python.helpers.state_snapshot import validate_snapshot_schema_v1 + + manager, socketio = await _create_manager_with_socketio() + + push_ready = asyncio.Event() + captured: dict[str, object] = {} + + async def _emit(event_type, envelope, **_kwargs): + if event_type == "state_push": + captured["envelope"] = envelope + push_ready.set() + + socketio.emit.side_effect = _emit + + # INVARIANT.STATE.GATING: no push before a successful state_request. + get_state_monitor().mark_dirty(NAMESPACE, "sid-1") + await asyncio.sleep(0.2) + assert not push_ready.is_set() + + start = time.monotonic() + await manager.route_event( + NAMESPACE, + "state_request", + { + "correlationId": "client-gating", + "ts": "2025-12-28T00:00:00.000Z", + "data": { + "context": None, + "log_from": 0, + "notifications_from": 0, + "timezone": "UTC", + }, + }, + "sid-1", + ) + + await asyncio.wait_for(push_ready.wait(), timeout=1.0) + assert (time.monotonic() - start) <= 1.0 + + envelope = captured.get("envelope") + assert isinstance(envelope, dict) + data = envelope.get("data") + assert isinstance(data, dict) + assert set(data.keys()) >= {"runtime_epoch", "seq", "snapshot"} + assert isinstance(data["runtime_epoch"], str) and data["runtime_epoch"] + assert isinstance(data["seq"], int) + assert isinstance(data["snapshot"], dict) + validate_snapshot_schema_v1(data["snapshot"]) + + await manager.handle_disconnect(NAMESPACE, "sid-1") diff --git a/tests/test_state_sync_welcome_screen.py b/tests/test_state_sync_welcome_screen.py new file mode 100644 index 0000000000..4e2a22bb75 --- /dev/null +++ b/tests/test_state_sync_welcome_screen.py @@ -0,0 +1,82 @@ +import asyncio +import sys +import threading +import time +from pathlib import Path + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + +from python.helpers.websocket_manager import WebSocketManager + +NAMESPACE = "/state_sync" + + +class FakeSocketIOServer: + def __init__(self) -> None: + from unittest.mock import AsyncMock + + self.emit = AsyncMock() + self.disconnect = AsyncMock() + + +@pytest.mark.asyncio +async def test_state_sync_handshake_and_initial_snapshot_work_with_no_selected_context() -> None: + """ + Regression for Welcome screen: the UI has no selected context, so `state_request.context` + is null. We must still handshake and receive an initial `state_push` quickly (no hang). + """ + + from python.helpers.state_snapshot import validate_snapshot_schema_v1 + from python.helpers.state_monitor import _reset_state_monitor_for_testing + from python.websocket_handlers.state_sync_handler import StateSyncHandler + + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + _reset_state_monitor_for_testing() + StateSyncHandler._reset_instance_for_testing() + handler = StateSyncHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [handler]}) + await manager.handle_connect(NAMESPACE, "sid-1") + + push_ready = asyncio.Event() + captured: dict[str, object] = {} + + async def _emit(event_type, envelope, **_kwargs): + if event_type == "state_push": + captured["envelope"] = envelope + push_ready.set() + + socketio.emit.side_effect = _emit + + start = time.monotonic() + await manager.route_event( + NAMESPACE, + "state_request", + { + "correlationId": "client-welcome", + "ts": "2026-01-05T00:00:00.000Z", + "data": { + "context": None, # welcome screen (no selected chat) + "log_from": 0, + "notifications_from": 0, + "timezone": "UTC", + }, + }, + "sid-1", + ) + + await asyncio.wait_for(push_ready.wait(), timeout=1.0) + assert (time.monotonic() - start) <= 1.0 + + envelope = captured.get("envelope") + assert isinstance(envelope, dict) + data = envelope.get("data") + assert isinstance(data, dict) + assert set(data.keys()) >= {"runtime_epoch", "seq", "snapshot"} + assert isinstance(data["snapshot"], dict) + validate_snapshot_schema_v1(data["snapshot"]) diff --git a/tests/test_websocket_client_api_surface.py b/tests/test_websocket_client_api_surface.py new file mode 100644 index 0000000000..2d683bec3d --- /dev/null +++ b/tests/test_websocket_client_api_surface.py @@ -0,0 +1,41 @@ +import re +import sys +from pathlib import Path + + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + + +def _get_named_exports(source: str) -> set[str]: + exports: set[str] = set() + + exports.update(re.findall(r"^export\s+function\s+([A-Za-z0-9_]+)\s*\(", source, flags=re.M)) + exports.update(re.findall(r"^export\s+const\s+([A-Za-z0-9_]+)\s*=", source, flags=re.M)) + exports.update(re.findall(r"^export\s+class\s+([A-Za-z0-9_]+)\s*[\{:]", source, flags=re.M)) + + for m in re.findall(r"^export\s*\{([^}]+)\}\s*;?", source, flags=re.M): + for item in m.split(","): + item = item.strip() + if not item: + continue + # Handle: `foo as bar` + parts = item.split() + if len(parts) >= 3 and parts[-2] == "as": + exports.add(parts[-1]) + else: + exports.add(parts[0]) + + return exports + + +def test_websocket_js_exports_minimal_namespaced_api_surface() -> None: + source = (PROJECT_ROOT / "webui" / "js" / "websocket.js").read_text(encoding="utf-8") + exports = _get_named_exports(source) + + assert "createNamespacedClient" in exports + assert "getNamespacedClient" in exports + + assert "broadcast" not in exports + assert "requestAll" not in exports diff --git a/tests/test_websocket_csrf.py b/tests/test_websocket_csrf.py new file mode 100644 index 0000000000..45e50db058 --- /dev/null +++ b/tests/test_websocket_csrf.py @@ -0,0 +1,51 @@ +import sys +from pathlib import Path + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + +from python.helpers.websocket import validate_ws_origin + + +def test_validate_ws_origin_allows_same_origin_with_explicit_port(): + ok, reason = validate_ws_origin( + { + "HTTP_ORIGIN": "http://localhost:5000", + "HTTP_HOST": "localhost:5000", + } + ) + assert ok is True + assert reason is None + + +def test_validate_ws_origin_allows_default_https_port_without_explicit_port(): + ok, reason = validate_ws_origin( + { + "HTTP_ORIGIN": "https://example.com", + "HTTP_HOST": "example.com", + } + ) + assert ok is True + assert reason is None + + +def test_validate_ws_origin_rejects_missing_origin(): + ok, reason = validate_ws_origin( + { + "HTTP_HOST": "localhost:5000", + } + ) + assert ok is False + assert reason == "missing_origin" + + +def test_validate_ws_origin_rejects_cross_origin(): + ok, reason = validate_ws_origin( + { + "HTTP_ORIGIN": "http://evil.test", + "HTTP_HOST": "localhost:5000", + } + ) + assert ok is False + assert reason == "origin_host_mismatch" diff --git a/tests/test_websocket_handlers.py b/tests/test_websocket_handlers.py new file mode 100644 index 0000000000..fb76f55ee0 --- /dev/null +++ b/tests/test_websocket_handlers.py @@ -0,0 +1,176 @@ +import sys +import threading +from pathlib import Path + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + +from python.helpers.websocket import ( + WebSocketHandler, + WebSocketResult, + SingletonInstantiationError, +) + + +class _FakeSocketIO: + async def emit(self, *_args, **_kwargs): # pragma: no cover - helper stub + return None + + async def disconnect(self, *_args, **_kwargs): # pragma: no cover - helper stub + return None + + +class _TestHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["test_event"] + + async def process_event(self, event_type: str, data: dict, sid: str) -> None: + return None + + +def _make_handler() -> _TestHandler: + _TestHandler._reset_instance_for_testing() + return _TestHandler.get_instance(_FakeSocketIO(), threading.RLock()) + + +def test_websocket_result_ok_clones_payload(): + payload = {"value": 1} + result = WebSocketResult.ok(payload) + + assert result.as_result( + handler_id="handler", + fallback_correlation_id="corr", + )["data"] == payload + + payload["value"] = 2 + assert result.as_result( + handler_id="handler", + fallback_correlation_id="corr", + )["data"] == {"value": 1} + + +def test_websocket_result_error_contains_metadata(): + result = WebSocketResult.error( + code="E_TEST", + message="failure", + details="additional", + correlation_id="corr", + duration_ms=12.5, + ) + + as_payload = result.as_result(handler_id="handler", fallback_correlation_id=None) + assert as_payload["ok"] is False + assert as_payload["error"] == { + "code": "E_TEST", + "error": "failure", + "details": "additional", + } + assert as_payload["correlationId"] == "corr" + assert as_payload["durationMs"] == pytest.approx(12.5, rel=1e-3) + + +def test_websocket_result_applies_fallback_correlation_and_duration(): + result = WebSocketResult.ok(duration_ms=5.4321) + payload = result.as_result( + handler_id="handler", + fallback_correlation_id="corr-fallback", + ) + assert payload["correlationId"] == "corr-fallback" + assert payload["durationMs"] == pytest.approx(5.4321, rel=1e-3) + + +def test_handler_result_helpers_return_websocket_result_instances(): + handler = _make_handler() + + ok_result = handler.result_ok({"foo": "bar"}, correlation_id="cid") + assert isinstance(ok_result, WebSocketResult) + ok_payload = ok_result.as_result( + handler_id="handler", + fallback_correlation_id=None, + ) + assert ok_payload["ok"] is True + assert ok_payload["data"] == {"foo": "bar"} + assert ok_payload["correlationId"] == "cid" + + err_result = handler.result_error( + code="E_BAD", + message="boom", + details="missing", + correlation_id="err", + ) + assert isinstance(err_result, WebSocketResult) + err_payload = err_result.as_result( + handler_id="handler", + fallback_correlation_id=None, + ) + assert err_payload["ok"] is False + assert err_payload["error"] == { + "code": "E_BAD", + "error": "boom", + "details": "missing", + } + assert err_payload["correlationId"] == "err" + + +def test_result_error_requires_error_payload(): + with pytest.raises(ValueError): + WebSocketResult(ok=False) + + with pytest.raises(ValueError): + WebSocketResult.error(code="", message="boom") + + +def test_handler_direct_instantiation_disallowed(): + with pytest.raises(SingletonInstantiationError): + _TestHandler(_FakeSocketIO(), threading.RLock()) + + +def test_get_instance_returns_singleton(): + _TestHandler._reset_instance_for_testing() + socketio = _FakeSocketIO() + lock = threading.RLock() + first = _TestHandler.get_instance(socketio, lock) + second = _TestHandler.get_instance(None, None) + assert first is second + + +@pytest.mark.asyncio +async def test_state_sync_handler_registers_and_routes_state_request(): + from python.helpers.websocket_manager import WebSocketManager + from python.websocket_handlers.state_sync_handler import StateSyncHandler + from python.helpers.state_monitor import _reset_state_monitor_for_testing + + _reset_state_monitor_for_testing() + StateSyncHandler._reset_instance_for_testing() + + socketio = _FakeSocketIO() + lock = threading.RLock() + manager = WebSocketManager(socketio, lock) + handler = StateSyncHandler.get_instance(socketio, lock) + namespace = "/state_sync" + manager.register_handlers({namespace: [handler]}) + await manager.handle_connect(namespace, "sid-1") + + response = await manager.route_event( + namespace, + "state_request", + { + "correlationId": "smoke-1", + "ts": "2025-12-28T00:00:00.000Z", + "data": { + "context": None, + "log_from": 0, + "notifications_from": 0, + "timezone": "UTC", + }, + }, + "sid-1", + ) + + assert response["correlationId"] == "smoke-1" + assert response["results"] and response["results"][0]["ok"] is True + await manager.handle_disconnect(namespace, "sid-1") diff --git a/tests/test_websocket_harness.py b/tests/test_websocket_harness.py new file mode 100644 index 0000000000..9d56d62f95 --- /dev/null +++ b/tests/test_websocket_harness.py @@ -0,0 +1,173 @@ +import sys +import threading +from pathlib import Path +from typing import Any + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + +from python.helpers.websocket_manager import WebSocketManager +from python.websocket_handlers.dev_websocket_test_handler import ( + DevWebsocketTestHandler, +) + +NAMESPACE = "/dev_websocket_test" + + +class FakeSocketIOServer: + def __init__(self) -> None: + from unittest.mock import AsyncMock + + self.emit = AsyncMock() + self.disconnect = AsyncMock() + + +async def _create_manager() -> tuple[WebSocketManager, DevWebsocketTestHandler, FakeSocketIOServer]: + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + DevWebsocketTestHandler._reset_instance_for_testing() + handler = DevWebsocketTestHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [handler]}) + await manager.handle_connect(NAMESPACE, "sid-primary") + return manager, handler, socketio + + +@pytest.mark.asyncio +async def test_harness_emit_broadcasts_to_active_connections(): + manager, _handler, socketio = await _create_manager() + + await manager.route_event( + NAMESPACE, + "ws_tester_emit", + {"message": "emit-check", "timestamp": "2025-10-29T12:00:00Z"}, + "sid-primary", + ) + + socketio.emit.assert_awaited() + emit_calls = [(call.args, call.kwargs) for call in socketio.emit.await_args_list] + match = next((c for c in emit_calls if c[0] and c[0][0] == "ws_tester_broadcast"), None) + assert match is not None + args, kwargs = match + envelope = args[1] + assert envelope["handlerId"].endswith("DevWebsocketTestHandler") + assert envelope["data"]["message"] == "emit-check" + assert kwargs == {"to": "sid-primary", "namespace": NAMESPACE} + + +@pytest.mark.asyncio +async def test_harness_request_returns_per_handler_result(): + manager, _handler, _socketio = await _create_manager() + + response = await manager.route_event( + NAMESPACE, + "ws_tester_request", + {"value": 42}, + "sid-primary", + ) + + assert isinstance(response, dict) + assert response["results"] + first = response["results"][0] + assert first["ok"] is True + assert first["data"]["echo"] == 42 + assert response["correlationId"] + assert first["handlerId"].endswith("DevWebsocketTestHandler") + assert first["correlationId"] == response["correlationId"] + + +@pytest.mark.asyncio +async def test_harness_request_delayed_waits_for_sleep(monkeypatch): + manager, _handler, _socketio = await _create_manager() + + calls: list[float] = [] + + async def _fake_sleep(delay: float) -> None: # pragma: no cover - helper + calls.append(delay) + + monkeypatch.setattr( + "python.websocket_handlers.dev_websocket_test_handler.asyncio.sleep", + _fake_sleep, + ) + + await manager.route_event( + NAMESPACE, + "ws_tester_request_delayed", + {"delay_ms": 1500}, + "sid-primary", + ) + + assert calls == [1.5] + + +@pytest.mark.asyncio +async def test_harness_persistence_emit_targets_requesting_sid(): + manager, _handler, socketio = await _create_manager() + + await manager.route_event( + NAMESPACE, + "ws_tester_trigger_persistence", + {"phase": "after"}, + "sid-primary", + ) + + socketio.emit.assert_awaited() + emit_calls = [(call.args, call.kwargs) for call in socketio.emit.await_args_list] + match = next((c for c in emit_calls if c[0] and c[0][0] == "ws_tester_persistence"), None) + assert match is not None + args, kwargs = match + payload = args[1] + assert payload["handlerId"] == _handler.identifier + assert payload["data"] == {"phase": "after", "handler": _handler.identifier} + assert kwargs == {"to": "sid-primary", "namespace": NAMESPACE} + + +@pytest.mark.asyncio +async def test_harness_request_all_aggregates_all_connections(): + manager, _handler, _socketio = await _create_manager() + await manager.handle_connect(NAMESPACE, "sid-secondary") + + response = await manager.route_event( + NAMESPACE, + "ws_tester_request_all", + {"marker": "aggregate"}, + "sid-primary", + ) + + assert response["results"] and response["results"][0]["ok"] is True + data = response["results"][0]["data"] + aggregated = data.get("results") or data.get("result") + assert isinstance(aggregated, list) + by_sid: dict[str, Any] = {entry["sid"]: entry["results"] for entry in aggregated} + assert set(by_sid.keys()) == {"sid-primary", "sid-secondary"} + for results in by_sid.values(): + assert results and results[0]["ok"] is True + payload = results[0]["data"] + assert payload["handler"].endswith("DevWebsocketTestHandler") + assert results[0]["handlerId"].endswith("DevWebsocketTestHandler") + assert results[0]["correlationId"] == response["results"][0]["correlationId"] + assert response["correlationId"] + + +@pytest.mark.asyncio +async def test_harness_request_all_respects_exclude_handlers(): + manager, handler, _socketio = await _create_manager() + await manager.handle_connect(NAMESPACE, "sid-secondary") + + response = await manager.route_event( + NAMESPACE, + "ws_tester_request_all", + { + "marker": "exclude", + "excludeHandlers": [handler.identifier], + }, + "sid-primary", + ) + + assert response["correlationId"] + first = response["results"][0] + assert first["ok"] is False + assert first["error"]["code"] == "INVALID_FILTER" + assert "excludeHandlers" in first["error"]["error"] diff --git a/tests/test_websocket_manager.py b/tests/test_websocket_manager.py new file mode 100644 index 0000000000..9f3290c1df --- /dev/null +++ b/tests/test_websocket_manager.py @@ -0,0 +1,853 @@ +import asyncio +import sys +import threading +import time +from pathlib import Path +from typing import Any +from unittest.mock import AsyncMock, patch + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + +from python.helpers.websocket import ConnectionNotFoundError, WebSocketHandler, WebSocketResult +from python.helpers.websocket_manager import ( + WebSocketManager, + BUFFER_TTL, + DIAGNOSTIC_EVENT, + LIFECYCLE_CONNECT_EVENT, + LIFECYCLE_DISCONNECT_EVENT, +) + +NAMESPACE = "/test" + + +class FakeSocketIOServer: + def __init__(self): + self.emit = AsyncMock() + self.disconnect = AsyncMock() + + +class DummyHandler(WebSocketHandler): + def __init__(self, socketio, lock, results): + super().__init__(socketio, lock) + self.results = results + + @classmethod + def get_event_types(cls) -> list[str]: + return ["dummy"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + response = {"sid": sid, "data": data} + self.results.append(response) + return response + + +@pytest.mark.asyncio +async def test_connect_disconnect_updates_registry(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + await manager.handle_connect(NAMESPACE, "abc") + assert (NAMESPACE, "abc") in manager.connections + + await manager.handle_disconnect(NAMESPACE, "abc") + assert (NAMESPACE, "abc") not in manager.connections + + +@pytest.mark.asyncio +async def test_server_restart_broadcast_emitted_when_enabled(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + manager.set_server_restart_broadcast(True) + + await manager.handle_connect(NAMESPACE, "sid-restart") + + socketio.emit.assert_awaited() + args, kwargs = socketio.emit.await_args_list[0] + assert args[0] == "server_restart" + envelope = args[1] + assert envelope["handlerId"] == manager._identifier # noqa: SLF001 + assert envelope["data"]["runtimeId"] + assert kwargs == {"to": "sid-restart", "namespace": NAMESPACE} + + +@pytest.mark.asyncio +async def test_server_restart_broadcast_skipped_when_disabled(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + manager.set_server_restart_broadcast(False) + + await manager.handle_connect(NAMESPACE, "sid-no-restart") + + assert socketio.emit.await_count == 0 + + +@pytest.mark.asyncio +async def test_broadcast_performance_smoke(monkeypatch): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + for idx in range(50): + await manager.handle_connect(NAMESPACE, f"sid-{idx}") + + import time + + start = time.perf_counter() + await manager.broadcast(NAMESPACE, "perf_event", {"ok": True}) + duration_ms = (time.perf_counter() - start) * 1000 + + assert socketio.emit.await_count == 50 + assert duration_ms < 300 + + +@pytest.mark.asyncio +async def test_route_event_invokes_handler_and_ack(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + results = [] + DummyHandler._reset_instance_for_testing() + handler = DummyHandler.get_instance(socketio, threading.RLock(), results) + manager.register_handlers({NAMESPACE: [handler]}) + await manager.handle_connect(NAMESPACE, "sid-1") + + response = await manager.route_event(NAMESPACE, "dummy", {"foo": "bar"}, "sid-1") + + assert results[0]["sid"] == "sid-1" + assert results[0]["data"]["foo"] == "bar" + assert "correlationId" in results[0]["data"] + + assert isinstance(response, dict) + assert "correlationId" in response + assert isinstance(response["results"], list) + entry = response["results"][0] + assert entry["ok"] is True + assert entry["data"]["sid"] == "sid-1" + assert entry["data"]["data"]["foo"] == "bar" + + +@pytest.mark.asyncio +async def test_route_event_no_handler_returns_standard_error(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + await manager.handle_connect(NAMESPACE, "sid-1") + + response = await manager.route_event(NAMESPACE, "missing", {}, "sid-1") + + assert len(response["results"]) == 1 + result = response["results"][0] + assert result["handlerId"].endswith("WebSocketManager") + assert result["ok"] is False + assert result["error"]["code"] == "NO_HANDLERS" + assert ( + result["error"]["error"] + == f"No handler for namespace '{NAMESPACE}' event 'missing'" + ) + + +@pytest.mark.asyncio +async def test_route_event_all_returns_empty_when_no_connections(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + results = await manager.route_event_all(NAMESPACE, "event", {}, timeout_ms=1000) + + assert results == [] + + +@pytest.mark.asyncio +async def test_route_event_all_aggregates_results(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + class EchoHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["multi"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + return {"sid": sid, "echo": data} + + EchoHandler._reset_instance_for_testing() + handler = EchoHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [handler]}) + + await manager.handle_connect(NAMESPACE, "sid-1") + await manager.handle_connect(NAMESPACE, "sid-2") + + aggregated = await manager.route_event_all( + NAMESPACE, "multi", {"value": 42}, timeout_ms=1000 + ) + + assert len(aggregated) == 2 + by_sid = {entry["sid"]: entry for entry in aggregated} + assert by_sid["sid-1"]["results"][0]["ok"] is True + payload_sid1 = by_sid["sid-1"]["results"][0]["data"] + assert payload_sid1["sid"] == "sid-1" + assert payload_sid1["echo"]["value"] == 42 + assert "correlationId" in payload_sid1["echo"] + assert by_sid["sid-2"]["results"][0]["ok"] is True + payload_sid2 = by_sid["sid-2"]["results"][0]["data"] + assert payload_sid2["sid"] == "sid-2" + assert payload_sid2["echo"]["value"] == 42 + assert by_sid["sid-1"]["correlationId"] + + +@pytest.mark.asyncio +async def test_route_event_all_timeout_marks_error(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + class SlowHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["slow"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + await asyncio.sleep(0.2) + return {"status": "done"} + + SlowHandler._reset_instance_for_testing() + handler = SlowHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [handler]}) + await manager.handle_connect(NAMESPACE, "sid-1") + + aggregated = await manager.route_event_all(NAMESPACE, "slow", {}, timeout_ms=50) + + assert len(aggregated) == 1 + first_entry = aggregated[0] + result = first_entry["results"][0] + assert result["ok"] is False + assert result["error"] == {"code": "TIMEOUT", "error": "Request timeout"} + assert first_entry["correlationId"] + + +@pytest.mark.asyncio +async def test_route_event_exception_standardizes_error_payload(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + class FailingHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["boom"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + raise RuntimeError("kaboom") + + FailingHandler._reset_instance_for_testing() + handler = FailingHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [handler]}) + await manager.handle_connect(NAMESPACE, "sid-1") + + response = await manager.route_event(NAMESPACE, "boom", {}, "sid-1") + + assert len(response["results"]) == 1 + result = response["results"][0] + assert result["handlerId"].endswith("FailingHandler") + assert result["ok"] is False + assert result["error"]["code"] == "HANDLER_ERROR" + assert result["error"]["error"] == "Internal server error" + assert "details" in result["error"] + + +@pytest.mark.asyncio +async def test_route_event_offloads_blocking_handlers(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + class BlockingHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["block"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + time.sleep(0.2) + return {"status": "done"} + + BlockingHandler._reset_instance_for_testing() + handler = BlockingHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [handler]}) + await manager.handle_connect(NAMESPACE, "sid-1") + + route_task = asyncio.create_task( + manager.route_event(NAMESPACE, "block", {}, "sid-1") + ) + await asyncio.sleep(0) + + t0 = time.perf_counter() + await asyncio.sleep(0.05) + elapsed = time.perf_counter() - t0 + assert elapsed < 0.15 + + response = await route_task + assert response["results"] + + +@pytest.mark.asyncio +async def test_route_event_unwraps_ts_data_envelope_and_preserves_correlation_id(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + results: list[dict[str, Any]] = [] + DummyHandler._reset_instance_for_testing() + handler = DummyHandler.get_instance(socketio, threading.RLock(), results) + manager.register_handlers({NAMESPACE: [handler]}) + await manager.handle_connect(NAMESPACE, "sid-1") + + response = await manager.route_event( + NAMESPACE, + "dummy", + { + "correlationId": "client-1", + "ts": "2025-10-29T12:00:00.000Z", + "data": {"value": 123}, + }, + "sid-1", + ) + + assert response["correlationId"] == "client-1" + assert len(results) == 1 + handler_payload = results[0]["data"] + assert handler_payload["value"] == 123 + assert handler_payload["correlationId"] == "client-1" + assert "ts" not in handler_payload + assert "data" not in handler_payload + + +@pytest.mark.asyncio +async def test_emit_to_unknown_sid_raises_error(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + with pytest.raises(ConnectionNotFoundError): + await manager.emit_to(NAMESPACE, "unknown", "event", {}) + + +@pytest.mark.asyncio +async def test_emit_to_known_disconnected_sid_buffers(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + await manager.handle_connect(NAMESPACE, "sid-1") + await manager.handle_disconnect(NAMESPACE, "sid-1") + + await manager.emit_to( + NAMESPACE, "sid-1", "event", {"a": 1}, correlation_id="corr-1" + ) + + assert (NAMESPACE, "sid-1") in manager.buffers + buffered = list(manager.buffers[(NAMESPACE, "sid-1")]) + assert len(buffered) == 1 + assert buffered[0].event_type == "event" + assert buffered[0].data == {"a": 1} + assert buffered[0].correlation_id == "corr-1" + + +@pytest.mark.asyncio +async def test_buffer_overflow_drops_oldest(monkeypatch): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + await manager.handle_connect(NAMESPACE, "offline") + await manager.handle_disconnect(NAMESPACE, "offline") + + monkeypatch.setattr("python.helpers.websocket_manager.BUFFER_MAX_SIZE", 2) + + await manager.emit_to(NAMESPACE, "offline", "event", {"idx": 0}) + await manager.emit_to(NAMESPACE, "offline", "event", {"idx": 1}) + await manager.emit_to(NAMESPACE, "offline", "event", {"idx": 2}) + + buffer = manager.buffers[(NAMESPACE, "offline")] + assert len(buffer) == 2 + assert buffer[0].data["idx"] == 1 + assert buffer[1].data["idx"] == 2 + + +@pytest.mark.asyncio +async def test_expired_buffer_entries_are_discarded(monkeypatch): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + await manager.handle_connect(NAMESPACE, "sid-expired") + await manager.handle_disconnect(NAMESPACE, "sid-expired") + + from datetime import timedelta, timezone, datetime + + past = datetime.now(timezone.utc) - (BUFFER_TTL + timedelta(seconds=5)) + future = past + BUFFER_TTL + timedelta(seconds=10) + + await manager.emit_to(NAMESPACE, "sid-expired", "event", {"a": 1}) + manager.buffers[(NAMESPACE, "sid-expired")][0].timestamp = past + + socketio.emit.reset_mock() + + monkeypatch.setattr( + "python.helpers.websocket_manager._utcnow", + lambda: future, + ) + await manager.handle_connect(NAMESPACE, "sid-expired") + + assert socketio.emit.await_count == 0 + assert (NAMESPACE, "sid-expired") not in manager.buffers + + +@pytest.mark.asyncio +async def test_flush_buffer_delivers_and_logs(monkeypatch): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + await manager.handle_connect(NAMESPACE, "sid-1") + await manager.handle_disconnect(NAMESPACE, "sid-1") + + await manager.emit_to(NAMESPACE, "sid-1", "event", {"a": 1}) + + await manager.handle_connect(NAMESPACE, "sid-1") + + assert len(socketio.emit.await_args_list) == 1 + awaited_call = socketio.emit.await_args_list[0] + assert awaited_call.args[0] == "event" + envelope = awaited_call.args[1] + assert envelope["data"] == {"a": 1} + assert "eventId" in envelope and "handlerId" in envelope and "ts" in envelope + assert awaited_call.kwargs == {"to": "sid-1", "namespace": NAMESPACE} + assert (NAMESPACE, "sid-1") not in manager.buffers + + +@pytest.mark.asyncio +async def test_broadcast_excludes_multiple_sids(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + for sid in ("sid-1", "sid-2", "sid-3"): + await manager.handle_connect(NAMESPACE, sid) + + await manager.broadcast( + NAMESPACE, + "event", + {"foo": "bar"}, + exclude_sids={"sid-1", "sid-3"}, + handler_id="custom.broadcast", + correlation_id="corr-b", + ) + + assert len(socketio.emit.await_args_list) == 1 + awaited_call = socketio.emit.await_args_list[0] + assert awaited_call.args[0] == "event" + envelope = awaited_call.args[1] + assert envelope["data"] == {"foo": "bar"} + assert envelope["handlerId"] == "custom.broadcast" + assert envelope["correlationId"] == "corr-b" + assert "eventId" in envelope and "ts" in envelope + assert awaited_call.kwargs == {"to": "sid-2", "namespace": NAMESPACE} + + +@pytest.mark.asyncio +async def test_emit_to_wraps_envelope_with_metadata(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + await manager.handle_connect(NAMESPACE, "sid-meta") + + await manager.emit_to( + NAMESPACE, + "sid-meta", + "meta_event", + {"payload": True}, + handler_id="custom.handler", + correlation_id="corr-meta", + ) + + socketio.emit.assert_awaited_once() + args, kwargs = socketio.emit.await_args_list[0] + assert args[0] == "meta_event" + envelope = args[1] + assert envelope["handlerId"] == "custom.handler" + assert envelope["correlationId"] == "corr-meta" + assert envelope["data"] == {"payload": True} + assert kwargs == {"to": "sid-meta", "namespace": NAMESPACE} + + +@pytest.mark.asyncio +async def test_timestamps_are_timezone_aware(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + await manager.handle_connect(NAMESPACE, "sid-utc") + info = manager.connections[(NAMESPACE, "sid-utc")] + + assert info.connected_at.tzinfo is not None + assert info.last_activity.tzinfo is not None + + with patch("python.helpers.websocket_manager._utcnow") as mocked_now: + mocked_now.return_value = info.last_activity + await manager.route_event(NAMESPACE, "unknown", {}, "sid-utc") + assert info.last_activity.tzinfo is not None + +class DuplicateHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["dup_event"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + return {"handledBy": self.identifier} + + +class AnotherDuplicateHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["dup_event"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + return {"handledBy": self.identifier} + + +def test_register_handlers_warns_on_duplicates(monkeypatch): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + warnings: list[str] = [] + + def capture_warning(message: str) -> None: + warnings.append(message) + + monkeypatch.setattr( + "python.helpers.print_style.PrintStyle.warning", staticmethod(capture_warning) + ) + + DuplicateHandler._reset_instance_for_testing() + AnotherDuplicateHandler._reset_instance_for_testing() + handler_a = DuplicateHandler.get_instance(socketio, threading.RLock()) + handler_b = AnotherDuplicateHandler.get_instance(socketio, threading.RLock()) + + manager.register_handlers({NAMESPACE: [handler_a, handler_b]}) + + assert any("Duplicate handler registration" in msg for msg in warnings) + + +class NonDictHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["non_dict"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + return "raw-value" + + +@pytest.mark.asyncio +async def test_route_event_standardizes_success_payload(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + NonDictHandler._reset_instance_for_testing() + handler = NonDictHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [handler]}) + + response = await manager.route_event(NAMESPACE, "non_dict", {}, "sid-123") + + assert len(response["results"]) == 1 + assert response["results"][0]["ok"] is True + assert response["results"][0]["data"] == {"result": "raw-value"} + + +class ErrorHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["boom"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + raise RuntimeError("BOOM") + + +class ResultHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: # pragma: no cover - simple declaration + return ["result_event", "result_error"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + if event_type == "result_event": + return WebSocketResult.ok({"sid": sid}, correlation_id="explicit", duration_ms=1.234) + return WebSocketResult.error( + code="E_RESULT", + message="boom", + details="test", + ) + + +@pytest.mark.asyncio +async def test_route_event_standardizes_error_payload(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + ErrorHandler._reset_instance_for_testing() + handler = ErrorHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [handler]}) + + response = await manager.route_event(NAMESPACE, "boom", {}, "sid-123") + + assert len(response["results"]) == 1 + payload = response["results"][0] + assert payload["ok"] is False + assert payload["error"]["code"] == "HANDLER_ERROR" + assert payload["error"]["error"] == "Internal server error" + + +@pytest.mark.asyncio +async def test_route_event_accepts_websocket_result_instances(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + ResultHandler._reset_instance_for_testing() + handler = ResultHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [handler]}) + + response = await manager.route_event(NAMESPACE, "result_event", {}, "sid-123") + + assert response["results"] + payload = response["results"][0] + assert payload["ok"] is True + assert payload["data"] == {"sid": "sid-123"} + assert payload["correlationId"] == "explicit" + assert payload["durationMs"] == pytest.approx(1.234, rel=1e-3) + + +@pytest.mark.asyncio +async def test_route_event_preserves_websocket_result_errors(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + ResultHandler._reset_instance_for_testing() + handler = ResultHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [handler]}) + + response = await manager.route_event(NAMESPACE, "result_error", {}, "sid-123") + + payload = response["results"][0] + assert payload["ok"] is False + assert payload["error"] == {"code": "E_RESULT", "error": "boom", "details": "test"} + + +class AlphaFilterHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["filter_event"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + return {"handledBy": self.identifier, "sid": sid} + + +class BetaFilterHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["filter_event"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + return {"handledBy": self.identifier, "sid": sid} + + +@pytest.mark.asyncio +async def test_route_event_include_handlers_filters_results(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + AlphaFilterHandler._reset_instance_for_testing() + BetaFilterHandler._reset_instance_for_testing() + alpha = AlphaFilterHandler.get_instance(socketio, threading.RLock()) + beta = BetaFilterHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [alpha, beta]}) + await manager.handle_connect(NAMESPACE, "sid-filter") + + response = await manager.route_event( + NAMESPACE, + "filter_event", + { + "includeHandlers": [alpha.identifier], + "payload": True, + }, + "sid-filter", + ) + + assert response["correlationId"] + results = response["results"] + assert len(results) == 1 + assert results[0]["handlerId"] == alpha.identifier + assert results[0]["data"]["handledBy"] == alpha.identifier + + +@pytest.mark.asyncio +async def test_route_event_rejects_exclude_handlers_without_permission(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + AlphaFilterHandler._reset_instance_for_testing() + handler = AlphaFilterHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [handler]}) + await manager.handle_connect(NAMESPACE, "sid-exclude") + + response = await manager.route_event( + NAMESPACE, + "filter_event", + {"excludeHandlers": [handler.identifier]}, + "sid-exclude", + ) + + result = response["results"][0] + assert result["error"]["code"] == "INVALID_FILTER" + assert "excludeHandlers" in result["error"]["error"] + + +@pytest.mark.asyncio +async def test_route_event_all_respects_exclude_handlers(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + AlphaFilterHandler._reset_instance_for_testing() + BetaFilterHandler._reset_instance_for_testing() + alpha = AlphaFilterHandler.get_instance(socketio, threading.RLock()) + beta = BetaFilterHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({NAMESPACE: [alpha, beta]}) + + await manager.handle_connect(NAMESPACE, "sid-a") + await manager.handle_connect(NAMESPACE, "sid-b") + + aggregated = await manager.route_event_all( + NAMESPACE, + "filter_event", + {"excludeHandlers": [beta.identifier]}, + handler_id="test.manager", + ) + + assert aggregated + for entry in aggregated: + assert entry["correlationId"] + assert entry["results"] + assert all(result["handlerId"] == alpha.identifier for result in entry["results"]) + + +@pytest.mark.asyncio +async def test_route_event_preserves_correlation_id(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + results = [] + DummyHandler._reset_instance_for_testing() + handler = DummyHandler.get_instance(socketio, threading.RLock(), results) + manager.register_handlers({NAMESPACE: [handler]}) + await manager.handle_connect(NAMESPACE, "sid-correlation") + + response = await manager.route_event( + NAMESPACE, + "dummy", + {"foo": "bar", "correlationId": "manual-correlation"}, + "sid-correlation", + ) + + assert response["correlationId"] == "manual-correlation" + result = response["results"][0] + assert result["correlationId"] == "manual-correlation" + + +@pytest.mark.asyncio +async def test_request_preserves_explicit_correlation_id(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + DummyHandler._reset_instance_for_testing() + handler = DummyHandler.get_instance(socketio, threading.RLock(), []) + manager.register_handlers({NAMESPACE: [handler]}) + await manager.handle_connect(NAMESPACE, "sid-request") + + response = await manager.request_for_sid( + namespace=NAMESPACE, + sid="sid-request", + event_type="dummy", + data={"payload": True, "correlationId": "req-correlation"}, + handler_id="tester", + ) + + assert response["correlationId"] == "req-correlation" + result = response["results"][0] + assert result["correlationId"] == "req-correlation" + + +@pytest.mark.asyncio +async def test_request_all_entries_include_correlation_id(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + DummyHandler._reset_instance_for_testing() + handler = DummyHandler.get_instance(socketio, threading.RLock(), []) + manager.register_handlers({NAMESPACE: [handler]}) + + await manager.handle_connect(NAMESPACE, "sid-1") + await manager.handle_connect(NAMESPACE, "sid-2") + + aggregated = await manager.route_event_all( + NAMESPACE, + "dummy", + {"value": 1, "correlationId": "agg-correlation"}, + ) + + assert aggregated + for entry in aggregated: + assert entry["correlationId"] == "agg-correlation" + assert entry["results"] + assert entry["results"][0]["correlationId"] == "agg-correlation" + + +def test_debug_logging_respects_runtime_flag(monkeypatch): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + logs: list[str] = [] + + def capture(message: str) -> None: + logs.append(message) + + monkeypatch.setattr("python.helpers.print_style.PrintStyle.debug", staticmethod(capture)) + monkeypatch.setattr("python.helpers.websocket_manager.runtime.is_development", lambda: False) + + manager._debug("should-not-log") # noqa: SLF001 + assert logs == [] + + monkeypatch.setattr("python.helpers.websocket_manager.runtime.is_development", lambda: True) + manager._debug("should-log") # noqa: SLF001 + assert logs == ["should-log"] + + +@pytest.mark.asyncio +async def test_diagnostic_event_emitted_for_inbound(): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + results: list[dict[str, Any]] = [] + DummyHandler._reset_instance_for_testing() + handler = DummyHandler.get_instance(socketio, threading.RLock(), results) + manager.register_handlers({NAMESPACE: [handler]}) + + await manager.handle_connect(NAMESPACE, "observer") + assert manager.register_diagnostic_watcher(NAMESPACE, "observer") is True + await manager.handle_connect(NAMESPACE, "sid-client") + + await manager.route_event(NAMESPACE, "dummy", {"payload": "value"}, "sid-client") + + emitted_events = [call.args[0] for call in socketio.emit.await_args_list] + assert DIAGNOSTIC_EVENT in emitted_events + + +@pytest.mark.asyncio +async def test_lifecycle_events_broadcast(monkeypatch): + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + broadcast_mock = AsyncMock() + monkeypatch.setattr(manager, "broadcast", broadcast_mock) + + await manager.handle_connect(NAMESPACE, "sid-life") + await asyncio.sleep(0) + await manager.handle_disconnect(NAMESPACE, "sid-life") + await asyncio.sleep(0) + + events = [call.args[1] for call in broadcast_mock.await_args_list] + assert LIFECYCLE_CONNECT_EVENT in events + assert LIFECYCLE_DISCONNECT_EVENT in events diff --git a/tests/test_websocket_namespace_discovery.py b/tests/test_websocket_namespace_discovery.py new file mode 100644 index 0000000000..aa25f2b201 --- /dev/null +++ b/tests/test_websocket_namespace_discovery.py @@ -0,0 +1,225 @@ +import asyncio +import contextlib +import socket +import sys +from pathlib import Path +from typing import Any, AsyncIterator + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + + +@contextlib.asynccontextmanager +async def _run_asgi_app(app: Any) -> AsyncIterator[str]: + import uvicorn + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(("127.0.0.1", 0)) + sock.listen(128) + + port = sock.getsockname()[1] + + config = uvicorn.Config( + app, + host="127.0.0.1", + port=port, + log_level="warning", + access_log=False, + lifespan="off", + ) + server = uvicorn.Server(config) + server.install_signal_handlers = lambda: None # type: ignore[method-assign] + + task = asyncio.create_task(server.serve(sockets=[sock])) + try: + while not server.started: + await asyncio.sleep(0.01) + yield f"http://127.0.0.1:{port}" + finally: + server.should_exit = True + try: + await asyncio.wait_for(task, timeout=5) + finally: + sock.close() + + +def _write_handler_module(path: Path, class_name: str, event_type: str) -> None: + path.write_text( + "\n".join( + [ + "from __future__ import annotations", + "", + "from typing import Any", + "", + "from python.helpers.websocket import WebSocketHandler", + "", + f"class {class_name}(WebSocketHandler):", + " @classmethod", + " def requires_auth(cls) -> bool:", + " return False", + "", + " @classmethod", + " def requires_csrf(cls) -> bool:", + " return False", + "", + " @classmethod", + " def get_event_types(cls) -> list[str]:", + f" return ['{event_type}']", + "", + " async def process_event(self, event_type: str, data: dict[str, Any], sid: str):", + " return {'ok': True}", + "", + ] + ), + encoding="utf-8", + ) + + +def test_discovery_supports_folder_entries_and_ignores_deeper_nesting(tmp_path: Path) -> None: + from python.helpers.websocket_namespace_discovery import discover_websocket_namespaces + + folder = tmp_path / "orders" + folder.mkdir() + _write_handler_module(folder / "orders.py", "OrdersHandler", "orders_request") + + # Deeper nesting must be ignored (and must not be imported). + nested = folder / "nested" + nested.mkdir() + (nested / "boom.py").write_text("raise RuntimeError('should-not-import')\n", encoding="utf-8") + + discoveries = discover_websocket_namespaces(handlers_folder=str(tmp_path), include_root_default=False) + by_ns = {d.namespace: d for d in discoveries} + + assert "/orders" in by_ns + entry = by_ns["/orders"] + assert [cls.__name__ for cls in entry.handler_classes] == ["OrdersHandler"] + + +def test_discovery_folder_suffix_handler_stripped(tmp_path: Path) -> None: + from python.helpers.websocket_namespace_discovery import discover_websocket_namespaces + + folder = tmp_path / "sales_handler" + folder.mkdir() + _write_handler_module(folder / "main.py", "SalesHandler", "sales_request") + + discoveries = discover_websocket_namespaces(handlers_folder=str(tmp_path), include_root_default=False) + namespaces = {d.namespace for d in discoveries} + assert "/sales" in namespaces + + +def test_discovery_empty_folder_warns_and_treats_namespace_unregistered(tmp_path: Path, monkeypatch) -> None: + from flask import Flask + import socketio + + from python.helpers.websocket_manager import WebSocketManager + from python.helpers.websocket_namespace_discovery import discover_websocket_namespaces + from run_ui import configure_websocket_namespaces + + empty = tmp_path / "empty" + empty.mkdir() + (empty / "__init__.py").write_text("# init\n", encoding="utf-8") + + warnings: list[str] = [] + + def _warn(message: str) -> None: + warnings.append(message) + + monkeypatch.setattr("python.helpers.print_style.PrintStyle.warning", staticmethod(_warn)) + + discoveries = discover_websocket_namespaces(handlers_folder=str(tmp_path), include_root_default=False) + assert "/empty" not in {d.namespace for d in discoveries} + assert any("empty" in msg.lower() for msg in warnings) + + # Integration check: treat as unregistered -> UNKNOWN_NAMESPACE connect_error. + app = Flask("test_empty_folder_unregistered") + app.secret_key = "test-secret" + sio = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*", namespaces="*") + lock = __import__("threading").RLock() + manager = WebSocketManager(sio, lock) + + handlers_by_namespace: dict[str, list[Any]] = {} + for discovery in discoveries: + handlers_by_namespace[discovery.namespace] = [ + cls.get_instance(sio, lock) for cls in discovery.handler_classes + ] + + configure_websocket_namespaces( + webapp=app, + socketio_server=sio, + websocket_manager=manager, + handlers_by_namespace=handlers_by_namespace, + ) + + asgi_app = socketio.ASGIApp(sio) + async def _run() -> None: + async with _run_asgi_app(asgi_app) as base_url: + client = socketio.AsyncClient() + connect_error_fut: asyncio.Future[Any] = asyncio.get_running_loop().create_future() + + async def _on_connect_error(data: Any) -> None: + if not connect_error_fut.done(): + connect_error_fut.set_result(data) + + client.on("connect_error", _on_connect_error, namespace="/empty") + try: + with pytest.raises(socketio.exceptions.ConnectionError): + await client.connect(base_url, namespaces=["/empty"]) + err = await asyncio.wait_for(connect_error_fut, timeout=2) + assert err["message"] == "UNKNOWN_NAMESPACE" + assert err["data"]["namespace"] == "/empty" + finally: + try: + await client.disconnect() + except Exception: + pass + + asyncio.run(_run()) + + +def test_discovery_invalid_modules_fail_fast_with_descriptive_errors(tmp_path: Path) -> None: + from python.helpers.websocket_namespace_discovery import discover_websocket_namespaces + + # 0 handlers in a *_handler.py module + (tmp_path / "bad_handler.py").write_text( + "class NotAHandler:\n pass\n", encoding="utf-8" + ) + with pytest.raises(RuntimeError) as excinfo: + discover_websocket_namespaces(handlers_folder=str(tmp_path), include_root_default=False) + assert "defines no WebSocketHandler subclasses" in str(excinfo.value) + + # 2+ handlers in a *_handler.py module + tmp_path.joinpath("bad_handler.py").unlink() + (tmp_path / "two_handler.py").write_text( + "\n".join( + [ + "from python.helpers.websocket import WebSocketHandler", + "class A(WebSocketHandler):", + " @classmethod", + " def requires_auth(cls): return False", + " @classmethod", + " def requires_csrf(cls): return False", + " @classmethod", + " def get_event_types(cls): return ['two_a']", + " async def process_event(self, event_type, data, sid): return {'ok': True}", + "class B(WebSocketHandler):", + " @classmethod", + " def requires_auth(cls): return False", + " @classmethod", + " def requires_csrf(cls): return False", + " @classmethod", + " def get_event_types(cls): return ['two_b']", + " async def process_event(self, event_type, data, sid): return {'ok': True}", + "", + ] + ), + encoding="utf-8", + ) + with pytest.raises(RuntimeError) as excinfo2: + discover_websocket_namespaces(handlers_folder=str(tmp_path), include_root_default=False) + message = str(excinfo2.value) + assert "defines multiple WebSocketHandler subclasses" in message + assert "A" in message and "B" in message diff --git a/tests/test_websocket_namespace_security.py b/tests/test_websocket_namespace_security.py new file mode 100644 index 0000000000..ed1127651e --- /dev/null +++ b/tests/test_websocket_namespace_security.py @@ -0,0 +1,243 @@ +import asyncio +import contextlib +import socket +import sys +import threading +from pathlib import Path +from typing import Any, AsyncIterator + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + + +@contextlib.asynccontextmanager +async def _run_asgi_app(app: Any) -> AsyncIterator[str]: + import uvicorn + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(("127.0.0.1", 0)) + sock.listen(128) + + port = sock.getsockname()[1] + + config = uvicorn.Config( + app, + host="127.0.0.1", + port=port, + log_level="warning", + access_log=False, + lifespan="off", + ) + server = uvicorn.Server(config) + server.install_signal_handlers = lambda: None # type: ignore[method-assign] + + task = asyncio.create_task(server.serve(sockets=[sock])) + try: + while not server.started: + await asyncio.sleep(0.01) + yield f"http://127.0.0.1:{port}" + finally: + server.should_exit = True + try: + await asyncio.wait_for(task, timeout=5) + finally: + sock.close() + + +def _make_session_cookie(app: Any, data: dict[str, Any]) -> str: + from flask.sessions import SecureCookieSessionInterface + + serializer = SecureCookieSessionInterface().get_signing_serializer(app) + assert serializer is not None + return serializer.dumps(data) + + +@pytest.mark.asyncio +async def test_connect_security_is_computed_per_namespace_and_enforced(monkeypatch) -> None: + from flask import Flask + import socketio + + from python.helpers.websocket import WebSocketHandler + from python.helpers.websocket_manager import WebSocketManager + from python.helpers import runtime + from run_ui import configure_websocket_namespaces + + class OpenHandler(WebSocketHandler): + @classmethod + def requires_auth(cls) -> bool: + return False + + @classmethod + def requires_csrf(cls) -> bool: + return False + + @classmethod + def get_event_types(cls) -> list[str]: + return ["open_ping"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str) -> dict[str, Any]: + return {"ok": True} + + class SecureHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["secure_ping"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str) -> dict[str, Any]: + return {"ok": True} + + OpenHandler._reset_instance_for_testing() + SecureHandler._reset_instance_for_testing() + + monkeypatch.setattr("python.helpers.login.get_credentials_hash", lambda: "hash") + + webapp = Flask("test_websocket_namespace_security") + webapp.secret_key = "test-secret" + + sio = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*", namespaces="*") + lock = threading.RLock() + manager = WebSocketManager(sio, lock) + handlers_by_namespace = { + "/open": [OpenHandler.get_instance(sio, lock)], + "/secure": [SecureHandler.get_instance(sio, lock)], + } + + configure_websocket_namespaces( + webapp=webapp, + socketio_server=sio, + websocket_manager=manager, + handlers_by_namespace=handlers_by_namespace, + ) + + asgi_app = socketio.ASGIApp(sio) + + async with _run_asgi_app(asgi_app) as base_url: + # Open namespace should not require auth/csrf (but Origin validation is always enforced). + open_client = socketio.AsyncClient() + await open_client.connect( + base_url, + namespaces=["/open"], + headers={"Origin": base_url}, + wait_timeout=2, + ) + try: + res = await open_client.call("open_ping", {}, namespace="/open", timeout=2) + assert isinstance(res, dict) + assert res.get("results") + res_unhandled = await open_client.call("unhandled_event", {"x": 1}, namespace="/open", timeout=2) + assert res_unhandled["results"] + assert res_unhandled["results"][0]["ok"] is False + assert res_unhandled["results"][0]["error"]["code"] == "NO_HANDLERS" + finally: + await open_client.disconnect() + + # Secure namespace rejects without valid session+csrf when credentials are configured. + secure_client = socketio.AsyncClient() + with pytest.raises(socketio.exceptions.ConnectionError): + await secure_client.connect( + base_url, + namespaces=["/secure"], + headers={"Origin": base_url}, + wait_timeout=2, + ) + await secure_client.disconnect() + + # Secure namespace accepts valid session + auth csrf_token + runtime-scoped csrf cookie. + csrf_token = "csrf-1" + session_cookie = _make_session_cookie( + webapp, + { + "authentication": "hash", + "csrf_token": csrf_token, + "user_id": "u1", + }, + ) + session_cookie_name = webapp.config.get("SESSION_COOKIE_NAME", "session") + csrf_cookie_name = f"csrf_token_{runtime.get_runtime_id()}" + cookie_header = f"{session_cookie_name}={session_cookie}; {csrf_cookie_name}={csrf_token}" + + secure_client_ok = socketio.AsyncClient() + await secure_client_ok.connect( + base_url, + namespaces=["/secure"], + headers={"Origin": base_url, "Cookie": cookie_header}, + auth={"csrf_token": csrf_token}, + wait_timeout=2, + ) + try: + res2 = await secure_client_ok.call("secure_ping", {}, namespace="/secure", timeout=2) + assert isinstance(res2, dict) + assert res2.get("results") + finally: + await secure_client_ok.disconnect() + + +@pytest.mark.asyncio +async def test_unknown_namespace_rejected_with_deterministic_connect_error_payload() -> None: + from flask import Flask + import socketio + + from python.helpers.websocket import WebSocketHandler + from python.helpers.websocket_manager import WebSocketManager + from run_ui import configure_websocket_namespaces + + class OpenHandler(WebSocketHandler): + @classmethod + def requires_auth(cls) -> bool: + return False + + @classmethod + def requires_csrf(cls) -> bool: + return False + + @classmethod + def get_event_types(cls) -> list[str]: + return ["open_ping"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str) -> dict[str, Any]: + return {"ok": True} + + OpenHandler._reset_instance_for_testing() + + webapp = Flask("test_unknown_namespace_rejection") + webapp.secret_key = "test-secret" + + sio = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*", namespaces="*") + lock = threading.RLock() + manager = WebSocketManager(sio, lock) + + configure_websocket_namespaces( + webapp=webapp, + socketio_server=sio, + websocket_manager=manager, + handlers_by_namespace={"/open": [OpenHandler.get_instance(sio, lock)]}, + ) + + asgi_app = socketio.ASGIApp(sio) + + async with _run_asgi_app(asgi_app) as base_url: + client = socketio.AsyncClient() + connect_error_fut: asyncio.Future[Any] = asyncio.get_running_loop().create_future() + + async def _on_connect_error(data: Any) -> None: + if not connect_error_fut.done(): + connect_error_fut.set_result(data) + + client.on("connect_error", _on_connect_error, namespace="/unknown") + + try: + with pytest.raises(socketio.exceptions.ConnectionError): + await client.connect(base_url, namespaces=["/unknown"]) + + err = await asyncio.wait_for(connect_error_fut, timeout=2) + assert err["message"] == "UNKNOWN_NAMESPACE" + assert err["data"] == {"code": "UNKNOWN_NAMESPACE", "namespace": "/unknown"} + finally: + try: + await client.disconnect() + except Exception: + pass diff --git a/tests/test_websocket_namespaces.py b/tests/test_websocket_namespaces.py new file mode 100644 index 0000000000..f82329107c --- /dev/null +++ b/tests/test_websocket_namespaces.py @@ -0,0 +1,497 @@ +import asyncio +import contextlib +import socket +import sys +import threading +from pathlib import Path +from typing import Any, AsyncIterator +from unittest.mock import AsyncMock + +import pytest + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(PROJECT_ROOT)) + +from python.helpers.state_monitor import StateMonitor +from python.helpers.websocket_manager import WebSocketManager + + +class FakeSocketIOServer: + def __init__(self) -> None: + self.emit = AsyncMock() + self.disconnect = AsyncMock() + + +@contextlib.asynccontextmanager +async def _run_asgi_app(app: Any) -> AsyncIterator[str]: + import uvicorn + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(("127.0.0.1", 0)) + sock.listen(128) + + port = sock.getsockname()[1] + + config = uvicorn.Config( + app, + host="127.0.0.1", + port=port, + log_level="warning", + access_log=False, + lifespan="off", + ) + server = uvicorn.Server(config) + server.install_signal_handlers = lambda: None # type: ignore[method-assign] + + task = asyncio.create_task(server.serve(sockets=[sock])) + try: + while not server.started: + await asyncio.sleep(0.01) + yield f"http://127.0.0.1:{port}" + finally: + server.should_exit = True + try: + await asyncio.wait_for(task, timeout=5) + finally: + sock.close() + + +@pytest.mark.asyncio +async def test_manager_identity_is_namespace_and_sid_allows_same_sid_across_namespaces() -> None: + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + # Avoid flakiness from lifecycle broadcasts scheduled via asyncio.create_task. + manager._schedule_lifecycle_broadcast = lambda *_args, **_kwargs: None # type: ignore[assignment] + + sid = "shared-sid" + ns_a = "/a" + ns_b = "/b" + + await manager.handle_connect(ns_a, sid) + await manager.handle_connect(ns_b, sid) + + assert (ns_a, sid) in manager.connections + assert (ns_b, sid) in manager.connections + + await manager.handle_disconnect(ns_a, sid) + assert (ns_a, sid) not in manager.connections + assert (ns_b, sid) in manager.connections + + await manager.emit_to(ns_a, sid, "test_event", {"value": 1}, correlation_id="corr-1") + + assert (ns_a, sid) in manager.buffers + assert (ns_b, sid) not in manager.buffers + assert socketio.emit.await_count == 0 + + +def test_state_monitor_tracks_two_identities_for_same_sid_across_namespaces() -> None: + monitor = StateMonitor() + sid = "shared-sid" + monitor.register_sid("/a", sid) + monitor.register_sid("/b", sid) + + debug = monitor._debug_state() + assert ("/a", sid) in debug["identities"] + assert ("/b", sid) in debug["identities"] + + +@pytest.mark.asyncio +async def test_namespace_isolation_state_sync_vs_dev_websocket_test() -> None: + """ + CONTRACT.INVARIANT.NS.ISOLATION: no cross-namespace delivery for application events. + + Acceptance proof for `/state_sync` vs `/dev_websocket_test` namespaces. + """ + + from flask import Flask + import socketio + + from python.helpers.websocket import WebSocketHandler + from python.helpers.websocket_manager import WebSocketManager + from run_ui import configure_websocket_namespaces + + class StateHandler(WebSocketHandler): + @classmethod + def requires_auth(cls) -> bool: + return False + + @classmethod + def requires_csrf(cls) -> bool: + return False + + @classmethod + def get_event_types(cls) -> list[str]: + return ["state_request"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + await self.emit_to(sid, "state_push", {"source": "state_sync"}) + return {"ok": True} + + class DevHandler(WebSocketHandler): + @classmethod + def requires_auth(cls) -> bool: + return False + + @classmethod + def requires_csrf(cls) -> bool: + return False + + @classmethod + def get_event_types(cls) -> list[str]: + return ["ws_tester_emit"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + await self.broadcast("ws_tester_broadcast", {"source": "dev_websocket_test"}) + return None + + StateHandler._reset_instance_for_testing() + DevHandler._reset_instance_for_testing() + + webapp = Flask("test_namespace_isolation") + webapp.secret_key = "test-secret" + + sio = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*", namespaces="*") + lock = threading.RLock() + manager = WebSocketManager(sio, lock) + + configure_websocket_namespaces( + webapp=webapp, + socketio_server=sio, + websocket_manager=manager, + handlers_by_namespace={ + "/state_sync": [StateHandler.get_instance(sio, lock)], + "/dev_websocket_test": [DevHandler.get_instance(sio, lock)], + }, + ) + + asgi_app = socketio.ASGIApp(sio) + + async with _run_asgi_app(asgi_app) as base_url: + client = socketio.AsyncClient() + + state_push_state = asyncio.Event() + state_push_dev = asyncio.Event() + tester_broadcast_dev = asyncio.Event() + tester_broadcast_state = asyncio.Event() + + async def _on_state_push_state(_payload: Any) -> None: + state_push_state.set() + + async def _on_state_push_dev(_payload: Any) -> None: + state_push_dev.set() + + async def _on_tester_broadcast_dev(_payload: Any) -> None: + tester_broadcast_dev.set() + + async def _on_tester_broadcast_state(_payload: Any) -> None: + tester_broadcast_state.set() + + client.on("state_push", _on_state_push_state, namespace="/state_sync") + client.on("state_push", _on_state_push_dev, namespace="/dev_websocket_test") + client.on("ws_tester_broadcast", _on_tester_broadcast_dev, namespace="/dev_websocket_test") + client.on("ws_tester_broadcast", _on_tester_broadcast_state, namespace="/state_sync") + + await client.connect( + base_url, + namespaces=["/state_sync", "/dev_websocket_test"], + headers={"Origin": base_url}, + wait_timeout=2, + ) + try: + await client.call("state_request", {"context": None}, namespace="/state_sync", timeout=2) + await asyncio.wait_for(state_push_state.wait(), timeout=2) + await asyncio.sleep(0.05) + assert state_push_dev.is_set() is False + + await client.emit("ws_tester_emit", {"message": "hi"}, namespace="/dev_websocket_test") + await asyncio.wait_for(tester_broadcast_dev.wait(), timeout=2) + await asyncio.sleep(0.05) + assert tester_broadcast_state.is_set() is False + finally: + await client.disconnect() + + +@pytest.mark.asyncio +async def test_diagnostics_include_source_namespace_and_deliver_on_dev_namespace_only() -> None: + """ + CONTRACT.Diagnostics: dev console diagnostics are delivered on `/dev_websocket_test`, + but must include `sourceNamespace` identifying the origin namespace. + """ + + from python.helpers.websocket import WebSocketHandler + from python.helpers.websocket_manager import DIAGNOSTIC_EVENT, WebSocketManager + + class DummyHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["dummy_event"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + return {"ok": True} + + DummyHandler._reset_instance_for_testing() + + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + manager._schedule_lifecycle_broadcast = lambda *_args, **_kwargs: None # type: ignore[assignment] + + ns_state = "/state_sync" + ns_dev = "/dev_websocket_test" + + handler = DummyHandler.get_instance(socketio, threading.RLock()) + manager.register_handlers({ns_state: [handler]}) + + await manager.handle_connect(ns_dev, "sid-watcher") + await manager.handle_connect(ns_state, "sid-client") + assert manager.register_diagnostic_watcher(ns_dev, "sid-watcher") is True + + socketio.emit.reset_mock() + + await manager.route_event(ns_state, "dummy_event", {"payload": True}, "sid-client") + + calls = [(call.args, call.kwargs) for call in socketio.emit.await_args_list] + diagnostic = next((c for c in calls if c[0] and c[0][0] == DIAGNOSTIC_EVENT), None) + assert diagnostic is not None + + args, kwargs = diagnostic + envelope = args[1] + assert kwargs == {"to": "sid-watcher", "namespace": ns_dev} + assert envelope["data"]["sourceNamespace"] == ns_state + + +def test_namespace_discovery_maps_core_handlers_to_expected_namespaces() -> None: + """ + US1 regression: ensure discovery assigns core handlers to their dedicated namespaces + (no cross-registration). + """ + + from python.helpers.websocket_namespace_discovery import discover_websocket_namespaces + + discoveries = discover_websocket_namespaces( + handlers_folder="python/websocket_handlers", + include_root_default=True, + ) + by_namespace = {entry.namespace: entry for entry in discoveries} + + assert "/state_sync" in by_namespace + assert "/dev_websocket_test" in by_namespace + + state_cls_names = [cls.__name__ for cls in by_namespace["/state_sync"].handler_classes] + dev_cls_names = [cls.__name__ for cls in by_namespace["/dev_websocket_test"].handler_classes] + + assert state_cls_names == ["StateSyncHandler"] + assert dev_cls_names == ["DevWebsocketTestHandler"] + + +def test_run_ui_builds_namespace_handler_map_without_cross_registration() -> None: + from run_ui import _build_websocket_handlers_by_namespace + + handlers_by_namespace = _build_websocket_handlers_by_namespace(object(), threading.RLock()) + + assert "/state_sync" in handlers_by_namespace + assert "/dev_websocket_test" in handlers_by_namespace + + assert all( + handler.__class__.__name__ != "DevWebsocketTestHandler" + for handler in handlers_by_namespace["/state_sync"] + ) + assert all( + handler.__class__.__name__ != "StateSyncHandler" + for handler in handlers_by_namespace["/dev_websocket_test"] + ) + + +@pytest.mark.asyncio +async def test_route_event_dispatches_only_within_connected_namespace_and_results_are_scoped() -> None: + """ + CONTRACT.NS.ROUTING: inbound routing is restricted to handlers in the connected namespace. + """ + + from python.helpers.websocket import WebSocketHandler + + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + manager._schedule_lifecycle_broadcast = lambda *_args, **_kwargs: None # type: ignore[assignment] + + ns_state = "/state_sync" + ns_dev = "/dev_websocket_test" + + calls: list[str] = [] + + class StatePingHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["route_test"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + calls.append(f"state:{sid}") + return {"ns": "state"} + + class DevPingHandler(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["route_test"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + calls.append(f"dev:{sid}") + return {"ns": "dev"} + + StatePingHandler._reset_instance_for_testing() + DevPingHandler._reset_instance_for_testing() + + state_handler = StatePingHandler.get_instance(socketio, threading.RLock()) + dev_handler = DevPingHandler.get_instance(socketio, threading.RLock()) + + manager.register_handlers({ns_state: [state_handler], ns_dev: [dev_handler]}) + await manager.handle_connect(ns_state, "sid-state") + await manager.handle_connect(ns_dev, "sid-dev") + + res_state = await manager.route_event(ns_state, "route_test", {"x": 1}, "sid-state") + assert {item["handlerId"] for item in res_state["results"]} == {state_handler.identifier} + assert res_state["results"][0]["data"]["ns"] == "state" + + res_dev = await manager.route_event(ns_dev, "route_test", {"x": 2}, "sid-dev") + assert {item["handlerId"] for item in res_dev["results"]} == {dev_handler.identifier} + assert res_dev["results"][0]["data"]["ns"] == "dev" + + assert calls == ["state:sid-state", "dev:sid-dev"] + + +@pytest.mark.asyncio +async def test_lifecycle_broadcasts_deliver_only_within_the_namespace() -> None: + """ + CONTRACT.NS.DELIVERY: lifecycle broadcasts are namespace-scoped. + """ + + from python.helpers.websocket_manager import ( + LIFECYCLE_CONNECT_EVENT, + LIFECYCLE_DISCONNECT_EVENT, + ) + + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + + ns_state = "/state_sync" + ns_dev = "/dev_websocket_test" + + # Connect events should broadcast only within their namespace. + await manager.handle_connect(ns_state, "sid-state-1") + await asyncio.sleep(0) + state_connect_calls = [ + call + for call in socketio.emit.await_args_list + if call.args and call.args[0] == LIFECYCLE_CONNECT_EVENT + ] + assert state_connect_calls + assert all(call.kwargs.get("namespace") == ns_state for call in state_connect_calls) + + socketio.emit.reset_mock() + await manager.handle_connect(ns_dev, "sid-dev-1") + await asyncio.sleep(0) + dev_connect_calls = [ + call + for call in socketio.emit.await_args_list + if call.args and call.args[0] == LIFECYCLE_CONNECT_EVENT + ] + assert dev_connect_calls + assert all(call.kwargs.get("namespace") == ns_dev for call in dev_connect_calls) + + # Disconnect broadcasts go to remaining peers in that namespace only. + socketio.emit.reset_mock() + await manager.handle_connect(ns_state, "sid-state-2") + await manager.handle_connect(ns_dev, "sid-dev-2") + socketio.emit.reset_mock() + + await manager.handle_disconnect(ns_state, "sid-state-2") + await asyncio.sleep(0) + state_disconnect_calls = [ + call + for call in socketio.emit.await_args_list + if call.args and call.args[0] == LIFECYCLE_DISCONNECT_EVENT + ] + assert state_disconnect_calls + assert all(call.kwargs.get("namespace") == ns_state for call in state_disconnect_calls) + assert all(call.kwargs.get("to") == "sid-state-1" for call in state_disconnect_calls) + + +@pytest.mark.asyncio +async def test_request_semantics_no_handlers_and_timeouts_are_namespace_scoped_and_order_insensitive() -> None: + """ + CONTRACT.REQUEST.RESULTS + CONTRACT.REQUEST.RESULTS.ORDERING + CONTRACT.NS.ROUTING. + """ + + from python.helpers.websocket import WebSocketHandler + + socketio = FakeSocketIOServer() + manager = WebSocketManager(socketio, threading.RLock()) + manager._schedule_lifecycle_broadcast = lambda *_args, **_kwargs: None # type: ignore[assignment] + + ns_state = "/state_sync" + ns_dev = "/dev_websocket_test" + + class Alpha(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["multi", "slow"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + if event_type == "slow": + await asyncio.sleep(0.2) + return {"alpha": True} + return {"alpha": True} + + class Beta(WebSocketHandler): + @classmethod + def get_event_types(cls) -> list[str]: + return ["multi"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + return {"beta": True} + + Alpha._reset_instance_for_testing() + Beta._reset_instance_for_testing() + alpha = Alpha.get_instance(socketio, threading.RLock()) + beta = Beta.get_instance(socketio, threading.RLock()) + + manager.register_handlers({ns_state: [alpha, beta]}) + await manager.handle_connect(ns_state, "sid-a") + await manager.handle_connect(ns_state, "sid-b") + await manager.handle_connect(ns_dev, "sid-dev") + + # Unknown event name -> NO_HANDLERS (no hang), scoped to the namespace. + no_handler = await manager.route_event(ns_dev, "missing_event", {"x": 1}, "sid-dev") + assert no_handler["results"][0]["ok"] is False + assert no_handler["results"][0]["error"]["code"] == "NO_HANDLERS" + assert ns_dev in no_handler["results"][0]["error"]["error"] + + # Unknown event name in a namespace that *does* have other handlers -> NO_HANDLERS. + unhandled_in_state = await manager.route_event(ns_state, "unknown_event", {"x": 1}, "sid-a") + assert unhandled_in_state["results"][0]["ok"] is False + assert unhandled_in_state["results"][0]["error"]["code"] == "NO_HANDLERS" + assert ns_state in unhandled_in_state["results"][0]["error"]["error"] + + # Known event name in the wrong namespace -> NO_HANDLERS (no cross-namespace fallback). + wrong_namespace = await manager.route_event(ns_dev, "multi", {"x": 1}, "sid-dev") + assert wrong_namespace["results"][0]["ok"] is False + assert wrong_namespace["results"][0]["error"]["code"] == "NO_HANDLERS" + assert ns_dev in wrong_namespace["results"][0]["error"]["error"] + + # Order-insensitive results[]: both handlers must be present regardless of ordering. + multi = await manager.route_event(ns_state, "multi", {"x": 1}, "sid-a") + handler_ids = {item["handlerId"] for item in multi["results"]} + assert handler_ids == {alpha.identifier, beta.identifier} + + # Timeout results are represented as TIMEOUT items and scoped to the namespace. + aggregated = await manager.route_event_all(ns_state, "slow", {"x": 1}, timeout_ms=50) + assert len(aggregated) == 2 # only state namespace connections + assert {entry["sid"] for entry in aggregated} == {"sid-a", "sid-b"} + for entry in aggregated: + assert entry["results"] + assert entry["results"][0]["ok"] is False + assert entry["results"][0]["error"]["code"] == "TIMEOUT" + + # Allow the underlying slow route_event coroutines to complete so pytest's event loop + # teardown does not cancel them mid-flight (avoids noisy InvalidStateError callbacks). + await asyncio.sleep(0.3) diff --git a/tests/test_websocket_namespaces_integration.py b/tests/test_websocket_namespaces_integration.py new file mode 100644 index 0000000000..55b3da0020 --- /dev/null +++ b/tests/test_websocket_namespaces_integration.py @@ -0,0 +1,113 @@ +import asyncio +import contextlib +import socket +from typing import Any, AsyncIterator + +import pytest + + +@contextlib.asynccontextmanager +async def _run_asgi_app(app: Any) -> AsyncIterator[str]: + import uvicorn + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(("127.0.0.1", 0)) + sock.listen(128) + + port = sock.getsockname()[1] + + config = uvicorn.Config( + app, + host="127.0.0.1", + port=port, + log_level="warning", + access_log=False, + lifespan="off", + ) + server = uvicorn.Server(config) + server.install_signal_handlers = lambda: None # type: ignore[method-assign] + + task = asyncio.create_task(server.serve(sockets=[sock])) + try: + while not server.started: + await asyncio.sleep(0.01) + yield f"http://127.0.0.1:{port}" + finally: + server.should_exit = True + try: + await asyncio.wait_for(task, timeout=5) + finally: + sock.close() + + +@pytest.mark.asyncio +async def test_unregistered_namespace_connection_fails_with_unknown_namespace_connect_error() -> None: + """ + US5 integration: unregistered namespace connections fail deterministically with a structured + connect_error payload (UNKNOWN_NAMESPACE), independent of python-socketio defaults. + """ + + from flask import Flask + import socketio + + from python.helpers.websocket import WebSocketHandler + from python.helpers.websocket_manager import WebSocketManager + from run_ui import configure_websocket_namespaces + + class OpenHandler(WebSocketHandler): + @classmethod + def requires_auth(cls) -> bool: + return False + + @classmethod + def requires_csrf(cls) -> bool: + return False + + @classmethod + def get_event_types(cls) -> list[str]: + return ["open_ping"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + return {"ok": True} + + OpenHandler._reset_instance_for_testing() + + webapp = Flask("test_ws_namespaces_integration") + webapp.secret_key = "test-secret" + + sio = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*", namespaces="*") + lock = __import__("threading").RLock() + manager = WebSocketManager(sio, lock) + + configure_websocket_namespaces( + webapp=webapp, + socketio_server=sio, + websocket_manager=manager, + handlers_by_namespace={"/open": [OpenHandler.get_instance(sio, lock)]}, + ) + + asgi_app = socketio.ASGIApp(sio) + + async with _run_asgi_app(asgi_app) as base_url: + client = socketio.AsyncClient() + connect_error_fut: asyncio.Future[Any] = asyncio.get_running_loop().create_future() + + async def _on_connect_error(data: Any) -> None: + if not connect_error_fut.done(): + connect_error_fut.set_result(data) + + client.on("connect_error", _on_connect_error, namespace="/unknown") + + try: + with pytest.raises(socketio.exceptions.ConnectionError): + await client.connect(base_url, namespaces=["/unknown"]) + + err = await asyncio.wait_for(connect_error_fut, timeout=2) + assert err["message"] == "UNKNOWN_NAMESPACE" + assert err["data"] == {"code": "UNKNOWN_NAMESPACE", "namespace": "/unknown"} + finally: + try: + await client.disconnect() + except Exception: + pass diff --git a/tests/test_websocket_root_namespace.py b/tests/test_websocket_root_namespace.py new file mode 100644 index 0000000000..53420fc2bf --- /dev/null +++ b/tests/test_websocket_root_namespace.py @@ -0,0 +1,183 @@ +import asyncio +import contextlib +import socket +from typing import Any, AsyncIterator + +import pytest + + +@contextlib.asynccontextmanager +async def _run_asgi_app(app: Any) -> AsyncIterator[str]: + import uvicorn + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(("127.0.0.1", 0)) + sock.listen(128) + + port = sock.getsockname()[1] + + config = uvicorn.Config( + app, + host="127.0.0.1", + port=port, + log_level="warning", + access_log=False, + lifespan="off", + ) + server = uvicorn.Server(config) + server.install_signal_handlers = lambda: None # type: ignore[method-assign] + + task = asyncio.create_task(server.serve(sockets=[sock])) + try: + while not server.started: + await asyncio.sleep(0.01) + yield f"http://127.0.0.1:{port}" + finally: + server.should_exit = True + try: + await asyncio.wait_for(task, timeout=5) + finally: + sock.close() + + +@pytest.mark.asyncio +async def test_root_namespace_request_style_calls_resolve_with_no_handlers() -> None: + """ + CONTRACT.INVARIANT.NS.ROOT.UNHANDLED: root (`/`) is reserved and unhandled for application + events by default, but request-style calls must not hang (NO_HANDLERS). + """ + + from flask import Flask + import socketio + + from python.helpers.websocket import WebSocketHandler + from python.helpers.websocket_manager import WebSocketManager + from run_ui import configure_websocket_namespaces + + app = Flask("test_ws_root_namespace") + app.secret_key = "test-secret" + + calls: list[str] = [] + + class HelloHandler(WebSocketHandler): + @classmethod + def requires_auth(cls) -> bool: + return False + + @classmethod + def requires_csrf(cls) -> bool: + return False + + @classmethod + def get_event_types(cls) -> list[str]: + return ["hello_request"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + calls.append(sid) + return {"hello": True} + + HelloHandler._reset_instance_for_testing() + + sio = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*", namespaces="*") + lock = __import__("threading").RLock() + manager = WebSocketManager(sio, lock) + + configure_websocket_namespaces( + webapp=app, + socketio_server=sio, + websocket_manager=manager, + handlers_by_namespace={ + "/state_sync": [HelloHandler.get_instance(sio, lock)], + }, + ) + + asgi_app = socketio.ASGIApp(sio) + + async with _run_asgi_app(asgi_app) as base_url: + client = socketio.AsyncClient() + await client.connect( + base_url, + namespaces=["/"], + headers={"Origin": base_url}, + wait_timeout=2, + ) + try: + res_unknown = await client.call("unknown_event", {"x": 1}, namespace="/", timeout=2) + assert res_unknown["results"][0]["ok"] is False + assert res_unknown["results"][0]["error"]["code"] == "NO_HANDLERS" + + res_known_elsewhere = await client.call("hello_request", {"name": "x"}, namespace="/", timeout=2) + assert res_known_elsewhere["results"][0]["ok"] is False + assert res_known_elsewhere["results"][0]["error"]["code"] == "NO_HANDLERS" + assert calls == [] + finally: + await client.disconnect() + + +@pytest.mark.asyncio +async def test_root_namespace_fire_and_forget_does_not_invoke_application_handlers() -> None: + """ + Fire-and-forget emits on `/` must not invoke any application handler. + """ + + from flask import Flask + import socketio + + from python.helpers.websocket import WebSocketHandler + from python.helpers.websocket_manager import WebSocketManager + from run_ui import configure_websocket_namespaces + + app = Flask("test_ws_root_fire_and_forget") + app.secret_key = "test-secret" + + calls: list[str] = [] + + class SideEffectHandler(WebSocketHandler): + @classmethod + def requires_auth(cls) -> bool: + return False + + @classmethod + def requires_csrf(cls) -> bool: + return False + + @classmethod + def get_event_types(cls) -> list[str]: + return ["hello_request"] + + async def process_event(self, event_type: str, data: dict[str, Any], sid: str): + calls.append(sid) + return {"ok": True} + + SideEffectHandler._reset_instance_for_testing() + + sio = socketio.AsyncServer(async_mode="asgi", cors_allowed_origins="*", namespaces="*") + lock = __import__("threading").RLock() + manager = WebSocketManager(sio, lock) + + configure_websocket_namespaces( + webapp=app, + socketio_server=sio, + websocket_manager=manager, + handlers_by_namespace={ + "/state_sync": [SideEffectHandler.get_instance(sio, lock)], + }, + ) + + asgi_app = socketio.ASGIApp(sio) + + async with _run_asgi_app(asgi_app) as base_url: + client = socketio.AsyncClient() + await client.connect( + base_url, + namespaces=["/"], + headers={"Origin": base_url}, + wait_timeout=2, + ) + try: + await client.emit("hello_request", {"name": "x"}, namespace="/") + await asyncio.sleep(0.1) + assert calls == [] + finally: + await client.disconnect() diff --git a/tests/websocket_namespace_test_utils.py b/tests/websocket_namespace_test_utils.py new file mode 100644 index 0000000000..f8252e97df --- /dev/null +++ b/tests/websocket_namespace_test_utils.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any +from unittest.mock import AsyncMock + + +ConnectionIdentity = tuple[str, str] # (namespace, sid) + + +def nsid(namespace: str, sid: str) -> ConnectionIdentity: + return (namespace, sid) + + +@dataclass(frozen=True) +class SocketIOCall: + args: tuple[Any, ...] + kwargs: dict[str, Any] + + @property + def namespace(self) -> str | None: + value = self.kwargs.get("namespace") + if value is None: + return None + if not isinstance(value, str): + raise TypeError(f"Expected namespace to be str, got {type(value).__name__}") + return value + + +class FakeSocketIOServer: + """ + Test double for python-socketio AsyncServer. + + Captures calls and surfaces the optional Socket.IO namespace dimension via recorded kwargs. + """ + + def __init__(self) -> None: + self._emit_calls: list[SocketIOCall] = [] + self._disconnect_calls: list[SocketIOCall] = [] + + self.emit = AsyncMock(side_effect=self._emit) + self.disconnect = AsyncMock(side_effect=self._disconnect) + + async def _emit(self, *args: Any, **kwargs: Any) -> None: + self._emit_calls.append(SocketIOCall(args=args, kwargs=dict(kwargs))) + + async def _disconnect(self, *args: Any, **kwargs: Any) -> None: + self._disconnect_calls.append(SocketIOCall(args=args, kwargs=dict(kwargs))) + + @property + def emit_calls(self) -> list[SocketIOCall]: + return self._emit_calls + + @property + def disconnect_calls(self) -> list[SocketIOCall]: + return self._disconnect_calls diff --git a/webui/components/chat/top-section/chat-top.html b/webui/components/chat/top-section/chat-top.html index 179495d6cc..32a4c9edb2 100644 --- a/webui/components/chat/top-section/chat-top.html +++ b/webui/components/chat/top-section/chat-top.html @@ -18,17 +18,7 @@
-
- - - - - - - -
+ @@ -40,4 +30,4 @@ - \ No newline at end of file + diff --git a/webui/components/notifications/notification-store.js b/webui/components/notifications/notification-store.js index dbe96a929a..f22dfaabde 100644 --- a/webui/components/notifications/notification-store.js +++ b/webui/components/notifications/notification-store.js @@ -592,10 +592,10 @@ const model = { // Add to bottom of stack (newest at bottom) this.toastStack.push(toast); - // Enforce max stack limit (remove oldest from top) - if (this.toastStack.length > this.maxToastStack) { - const removed = this.toastStack.shift(); // Remove from top - if (removed.autoRemoveTimer) { + // Enforce max stack limit (remove oldest). + while (this.toastStack.length > maxToasts) { + const removed = this.toastStack.shift(); + if (removed?.autoRemoveTimer) { clearTimeout(removed.autoRemoveTimer); } } @@ -646,7 +646,7 @@ const model = { console.log("Backend disconnected, showing as frontend-only toast"); } } - + // Fallback to frontend-only toast return this.addFrontendToastOnly( type, @@ -683,7 +683,8 @@ const model = { title = "Warning", display_time = 5, group = "", - priority = defaultPriority + priority = defaultPriority, + frontendOnly = false ) { return await this.addFrontendToast( NotificationType.WARNING, diff --git a/webui/components/settings/developer/websocket-event-console-store.js b/webui/components/settings/developer/websocket-event-console-store.js new file mode 100644 index 0000000000..de5d521a64 --- /dev/null +++ b/webui/components/settings/developer/websocket-event-console-store.js @@ -0,0 +1,247 @@ +import { createStore } from "/js/AlpineStore.js"; +import { getNamespacedClient } from "/js/websocket.js"; +import { store as notificationStore } from "/components/notifications/notification-store.js"; + +const websocket = getNamespacedClient("/dev_websocket_test"); + +const DIAGNOSTIC_EVENT = "ws_dev_console_event"; +const SUBSCRIBE_EVENT = "ws_event_console_subscribe"; +const UNSUBSCRIBE_EVENT = "ws_event_console_unsubscribe"; +const MAX_ENTRIES = 200; +const CAPTURE_ENABLED_KEY = "a0.websocket_event_console.capture_enabled"; + +const model = { + entries: [], + isEnabled: false, + captureEnabled: false, + subscriptionActive: false, + showHandledOnly: false, + lastError: null, + _consoleCallback: null, + _lifecycleBound: false, + _entrySeq: 0, + + init() { + this.isEnabled = Boolean(window.runtimeInfo?.isDevelopment); + if (!this.isEnabled) { + this.captureEnabled = false; + return; + } + + this._bindLifecycle(); + this.captureEnabled = this._loadCaptureEnabled(); + }, + + onOpen() { + // `init()` is called once when the store is registered; `onOpen()` is called + // every time the component is displayed (modal open). + this.init(); + if (!this.isEnabled) return; + if (this.captureEnabled) { + this.attach({ notify: false }); + } + }, + + _bindLifecycle() { + if (this._lifecycleBound) return; + this._lifecycleBound = true; + + websocket.onDisconnect(() => { + // Watcher subscriptions are per-sid and cleared server-side on disconnect. + this.subscriptionActive = false; + }); + + websocket.onConnect(() => { + if (!this.captureEnabled) return; + // Re-subscribe after reconnect (server watcher set is per-sid). + this._subscribe({ notify: false }); + }); + }, + + _loadCaptureEnabled() { + try { + const raw = window.localStorage?.getItem(CAPTURE_ENABLED_KEY); + return raw === "1" || raw === "true"; + } catch (error) { + return false; + } + }, + + _persistCaptureEnabled(enabled) { + try { + window.localStorage?.setItem(CAPTURE_ENABLED_KEY, enabled ? "1" : "0"); + } catch (error) { + // Ignore storage failures (private mode, etc). + } + }, + + async startCapture() { + await this.setCaptureEnabled(true, { notify: true }); + }, + + async stopCapture() { + await this.setCaptureEnabled(false, { notify: true }); + }, + + async setCaptureEnabled(enabled, { notify = true } = {}) { + if (!this.isEnabled) return; + + const desired = Boolean(enabled); + if (this.captureEnabled === desired) { + if (desired) { + await this.attach({ notify: false }); + } + return; + } + + this.captureEnabled = desired; + this._persistCaptureEnabled(desired); + + if (desired) { + await this.attach({ notify }); + return; + } + + await this.detach({ notify }); + }, + + async _subscribe({ notify = true } = {}) { + if (!this.isEnabled) return; + if (this.subscriptionActive) return; + + try { + await websocket.request(SUBSCRIBE_EVENT, { + requestedAt: new Date().toISOString(), + }); + this.subscriptionActive = true; + this.lastError = null; + + if (notify) { + notificationStore.frontendInfo( + "WebSocket diagnostics capture enabled", + "Event Console", + 4, + ); + } + } catch (error) { + this.handleError(error); + throw error; + } + }, + + async attach({ notify = true } = {}) { + if (!this.isEnabled) return; + if (this.subscriptionActive && this._consoleCallback) return; + + try { + await websocket.connect(); + + if (!this._consoleCallback) { + this._consoleCallback = (envelope) => { + try { + this.addEntry(envelope); + } catch (error) { + this.handleError(error); + } + }; + + await websocket.on(DIAGNOSTIC_EVENT, this._consoleCallback); + } + + await this._subscribe({ notify }); + } catch (error) { + this.handleError(error); + throw error; + } + }, + + async detach({ notify = false } = {}) { + if (this._consoleCallback) { + websocket.off(DIAGNOSTIC_EVENT, this._consoleCallback); + this._consoleCallback = null; + } + if (this.subscriptionActive) { + try { + await websocket.request(UNSUBSCRIBE_EVENT, {}); + } catch (error) { + this.handleError(error); + } + } + this.subscriptionActive = false; + + if (notify) { + notificationStore.frontendInfo( + "WebSocket diagnostics capture disabled", + "Event Console", + 3, + ); + } + }, + + async reconnect() { + if (!this.isEnabled) return; + if (!this.captureEnabled) { + await this.startCapture(); + return; + } + + await this.detach({ notify: false }); + await this.attach({ notify: true }); + }, + + handleError(error) { + const message = error?.message || String(error || "Unknown error"); + this.lastError = message; + notificationStore.frontendError(message, "WebSocket Event Console", 6); + }, + + addEntry(envelope) { + const payload = envelope?.data || {}; + const entry = { + kind: payload.kind || "unknown", + sourceNamespace: payload.sourceNamespace || payload.namespace || null, + eventType: payload.eventType || payload.event || "unknown", + eventId: envelope?.eventId || null, + sid: payload.sid || null, + correlationId: payload.correlationId || envelope?.correlationId || null, + timestamp: payload.timestamp || envelope?.ts || new Date().toISOString(), + handlerId: payload.handlerId || envelope?.handlerId || "WebSocketManager", + resultSummary: payload.resultSummary || {}, + payloadSummary: payload.payloadSummary || {}, + delivered: payload.delivered ?? null, + buffered: payload.buffered ?? null, + targets: Array.isArray(payload.targets) ? payload.targets : [], + targetCount: payload.targetCount ?? null, + }; + if (!entry.eventId) { + this._entrySeq += 1; + entry.eventId = `evt_${this._entrySeq}`; + } + entry.hasHandlers = + (entry.resultSummary?.handlerCount ?? entry.resultSummary?.ok ?? 0) > 0; + + this.entries.push(entry); + if (this.entries.length > MAX_ENTRIES) { + this.entries.shift(); + } + }, + + filteredEntries() { + if (!this.showHandledOnly) { + return this.entries; + } + return this.entries.filter( + (entry) => + entry.kind !== "inbound" || + entry.hasHandlers || + entry.resultSummary?.error > 0, + ); + }, + + clear() { + this.entries = []; + }, +}; + +const store = createStore("websocketEventConsoleStore", model); +export { store }; diff --git a/webui/components/settings/developer/websocket-event-console.html b/webui/components/settings/developer/websocket-event-console.html new file mode 100644 index 0000000000..f7006e3d21 --- /dev/null +++ b/webui/components/settings/developer/websocket-event-console.html @@ -0,0 +1,271 @@ + + + WebSocket Event Console + + + +
+ + + +
+ + + + diff --git a/webui/components/settings/developer/websocket-test-store.js b/webui/components/settings/developer/websocket-test-store.js new file mode 100644 index 0000000000..d8868c9aac --- /dev/null +++ b/webui/components/settings/developer/websocket-test-store.js @@ -0,0 +1,924 @@ +import { createStore } from "/js/AlpineStore.js"; +import { + getNamespacedClient, + createCorrelationId, + validateServerEnvelope, +} from "/js/websocket.js"; +import { store as notificationStore } from "/components/notifications/notification-store.js"; +import { store as chatsStore } from "/components/sidebar/chats/chats-store.js"; +import { store as syncStore } from "/components/sync/sync-store.js"; + +const MAX_PAYLOAD_BYTES = 50 * 1024 * 1024; +const TOAST_DURATION = 5; + +const websocket = getNamespacedClient("/dev_websocket_test"); +const stateSocket = getNamespacedClient("/state_sync"); + +function now() { + return new Date().toISOString(); +} + +function payloadSize(value) { + try { + return new TextEncoder().encode(JSON.stringify(value ?? null)).length; + } catch (_error) { + return String(value ?? "").length * 2; + } +} + +function clientForEventType(eventType) { + if (typeof eventType === "string" && eventType.startsWith("state_")) { + return stateSocket; + } + return websocket; +} + +async function showToast(type, message, title) { + const normalized = (type || "info").toLowerCase(); + switch (normalized) { + case "error": + return notificationStore.addFrontendToastOnly( + "error", + message, + title || "Error", + TOAST_DURATION, + "ws-harness", + 10, + ); + case "success": + return notificationStore.addFrontendToastOnly( + "success", + message, + title || "Success", + TOAST_DURATION, + "ws-harness", + 10, + ); + case "warning": + return notificationStore.addFrontendToastOnly( + "warning", + message, + title || "Warning", + TOAST_DURATION, + "ws-harness", + 10, + ); + case "info": + default: + return notificationStore.addFrontendToastOnly( + "info", + message, + title || "Info", + TOAST_DURATION, + "ws-harness", + 10, + ); + } +} + +function withTimeout(promise, timeoutMs, label) { + const normalizedTimeout = Number(timeoutMs); + if (!Number.isFinite(normalizedTimeout) || normalizedTimeout <= 0) { + return Promise.resolve(promise); + } + return new Promise((resolve, reject) => { + const timer = setTimeout(() => { + reject(new Error(`${label} timed out after ${normalizedTimeout}ms`)); + }, normalizedTimeout); + Promise.resolve(promise).then( + (value) => { + clearTimeout(timer); + resolve(value); + }, + (error) => { + clearTimeout(timer); + reject(error); + }, + ); + }); +} + +const model = { + logs: "", + running: false, + manualRunning: false, + subscriptionCount: 0, + lastAggregated: null, + receivedBroadcasts: [], + isEnabled: false, + _serverRestartHandler: null, + _subscriptionHandlers: null, + _broadcastSeq: 0, + + init() { + this.isEnabled = Boolean(window.runtimeInfo?.isDevelopment); + }, + + onOpen() { + // `init()` is called once when the store is registered; `onOpen()` is called + // every time the component is displayed (modal open). + this.init(); + + if (this.isEnabled) { + this.appendLog("WebSocket tester harness ready."); + if (this._serverRestartHandler) { + websocket.off("server_restart", this._serverRestartHandler); + } + this._serverRestartHandler = (payload) => { + try { + const envelope = validateServerEnvelope(payload); + this.appendLog( + `server_restart received (runtimeId=${envelope.data.runtimeId ?? "unknown"})`, + ); + } catch (error) { + this.appendLog(`server_restart envelope invalid: ${error.message || error}`); + } + }; + websocket + .on("server_restart", this._serverRestartHandler) + .catch((error) => { + this.appendLog(`Failed to subscribe to server_restart: ${error.message || error}`); + }); + } else { + this.appendLog("WebSocket tester harness is available only in development runtime."); + } + }, + + detach() { + if (this._subscriptionHandlers && typeof this._subscriptionHandlers === "object") { + for (const [eventType, handler] of Object.entries(this._subscriptionHandlers)) { + if (typeof handler === "function") { + clientForEventType(eventType).off(eventType, handler); + } + } + this._subscriptionHandlers = null; + } + if (this._serverRestartHandler) { + websocket.off("server_restart", this._serverRestartHandler); + this._serverRestartHandler = null; + } else { + websocket.off("server_restart"); + } + // Legacy cleanup: ensure we do not leave stray tester handlers attached. + websocket.off("ws_tester_broadcast"); + websocket.off("ws_tester_persistence"); + websocket.off("ws_tester_broadcast_demo"); + stateSocket.off("state_push"); + }, + + appendLog(message) { + this.logs += `[${now()}] ${message}\n`; + }, + + clearLog() { + this.logs = ""; + this.appendLog("Log cleared."); + }, + + assertEnabled() { + if (!this.isEnabled) { + throw new Error("WebSocket harness is available only in development runtime."); + } + }, + + async ensureConnected() { + this.assertEnabled(); + if (!websocket.isConnected()) { + this.appendLog("Connecting WebSocket client..."); + await withTimeout(websocket.connect(), 5000, "websocket.connect"); + this.appendLog("Connected to WebSocket server."); + } + }, + + async _toast(type, message, title) { + try { + await showToast(type, message, title); + } catch (error) { + this.appendLog(`Toast failed: ${error.message || error}`); + } + }, + + async runAutomaticSuite() { + this.assertEnabled(); + if (this.running) return; + this.running = true; + this.lastAggregated = null; + this.receivedBroadcasts = []; + this._broadcastSeq = 0; + + const results = []; + + const steps = [ + this.testEmit.bind(this), + this.testRequest.bind(this), + this.testRequestTimeout.bind(this), + this.testSubscriptionPersistence.bind(this), + this.testRequestAll.bind(this), + this.testStateSyncNoPollHealthy.bind(this), + this.testContextSwitchNoLeak.bind(this), + this.testFallbackRecoveryDegraded.bind(this), + this.testResyncTriggersRuntimeEpochAndSeqGap.bind(this), + ]; + + try { + this.appendLog("Starting automatic WebSocket validation suite..."); + await this.ensureConnected(); + + for (const step of steps) { + const result = await step(); + results.push(result); + if (!result.ok) { + await this._toast("warning", `Automatic suite halted: ${result.label} failed`, "WebSocket Harness"); + this.appendLog(`Automatic suite halted on step: ${result.label} (${result.error || 'unknown error'})`); + this.running = false; + return; + } + } + + await this._toast("success", "Automatic WebSocket validation succeeded", "WebSocket Harness"); + this.appendLog("Automatic suite completed successfully."); + } catch (error) { + this.appendLog(`Automatic suite failed: ${error.message || error}`); + await this._toast("error", `Automatic suite failed: ${error.message || error}`, "WebSocket Harness"); + } finally { + this.running = false; + } + }, + + async manualStep(stepFn) { + this.assertEnabled(); + if (this.manualRunning) return; + this.manualRunning = true; + try { + await this.ensureConnected(); + const result = await stepFn(); + this.appendLog( + `${result.ok ? "PASS" : "FAIL"} - ${result.label}${result.error ? `: ${result.error}` : ""}`, + ); + if (result.ok) { + await this._toast("success", `${result.label} succeeded`, "WebSocket Harness"); + } else { + await this._toast("warning", `${result.label} failed: ${result.error}`, "WebSocket Harness"); + } + } catch (error) { + await this._toast("error", `${error.message || error}`, "WebSocket Harness"); + this.appendLog(`Manual step error: ${error.message || error}`); + } finally { + this.manualRunning = false; + } + }, + + async testEmit() { + const label = "Fire-and-forget emit"; + try { + this.appendLog("Testing fire-and-forget emit..."); + await this.ensureSubscribed("ws_tester_broadcast", true); + const emitOptions = { + correlationId: createCorrelationId("harness-emit"), + }; + await websocket.emit( + "ws_tester_emit", + { message: "emit-check", timestamp: now() }, + emitOptions, + ); + const received = await this.waitForEvent( + "ws_tester_broadcast", + (_data, envelope) => + envelope?.data?.message === "emit-check" && + typeof envelope?.handlerId === "string" && + typeof envelope?.eventId === "string" && + typeof envelope?.correlationId === "string" && + typeof envelope?.ts === "string", + ); + this.appendLog("Received broadcast echo with valid envelope metadata."); + return { ok: received, label, error: received ? undefined : "Envelope validation failed" }; + } catch (error) { + this.appendLog(`${label} failed: ${error.message || error}`); + return { ok: false, label, error: error.message || error }; + } + }, + + async testRequest() { + const label = "Request-response"; + try { + this.appendLog("Testing request-response..."); + const requestOptions = { + correlationId: createCorrelationId("harness-request"), + }; + const response = await websocket.request( + "ws_tester_request", + { value: 42 }, + { ...requestOptions }, + ); + const delayedResponse = await websocket.request( + "ws_tester_request_delayed", + { delay_ms: 750 }, + { correlationId: createCorrelationId("harness-request-no-timeout") }, + ); + const first = response.results?.[0]; + const ok = Boolean( + response?.correlationId && + Array.isArray(response.results) && + first?.ok === true && + first?.handlerId && + first?.correlationId === response.correlationId && + first?.data?.echo === 42, + ); + const delayedOk = Boolean( + Array.isArray(delayedResponse.results) && + delayedResponse.results[0]?.ok === true && + delayedResponse.results[0]?.data?.status === "delayed", + ); + this.appendLog(`Request-response result: ${JSON.stringify(response)}`); + this.appendLog(`Request-response (no-timeout) result: ${JSON.stringify(delayedResponse)}`); + return { + ok: ok && delayedOk, + label, + error: ok && delayedOk ? undefined : "Unexpected response payload or default timeout behaviour", + }; + } catch (error) { + this.appendLog(`${label} failed: ${error.message || error}`); + return { ok: false, label, error: error.message || error }; + } + }, + + async testRequestTimeout() { + const label = "Request timeout"; + try { + this.appendLog("Testing request timeout..."); + let threw = false; + try { + const timeoutOptions = { + correlationId: createCorrelationId("harness-timeout"), + }; + await websocket.request( + "ws_tester_request_delayed", + { delay_ms: 2000 }, + { timeoutMs: 500, ...timeoutOptions }, + ); + } catch (error) { + threw = error.message === "Request timeout"; + if (!threw) { + throw error; + } + } + if (threw) { + this.appendLog("Timeout correctly triggered."); + return { ok: true, label }; + } + this.appendLog("Timeout test failed: request resolved unexpectedly."); + return { ok: false, label, error: "Request resolved but should timeout" }; + } catch (error) { + this.appendLog(`${label} failed: ${error.message || error}`); + return { ok: false, label, error: error.message || error }; + } + }, + + async testSubscriptionPersistence() { + const label = "Subscription persistence"; + try { + this.appendLog("Testing subscription persistence across reconnect..."); + await this.ensureSubscribed("ws_tester_persistence", true); + const emitOptions = { + correlationId: createCorrelationId("harness-persistence"), + }; + await websocket.emit("ws_tester_trigger_persistence", { phase: "before" }, emitOptions); + await this.waitForEvent("ws_tester_persistence", (data) => data?.phase === "before"); + this.appendLog("Initial subscription event received."); + + websocket.socket.disconnect(); + this.appendLog("Disconnected socket manually."); + await websocket.connect(); + this.appendLog("Reconnected socket."); + + await websocket.emit( + "ws_tester_trigger_persistence", + { phase: "after" }, + emitOptions, + ); + const received = await this.waitForEvent("ws_tester_persistence", (data) => data?.phase === "after", 2000); + this.appendLog("Post-reconnect event received."); + return { ok: received, label, error: received ? undefined : "Callback not triggered after reconnect" }; + } catch (error) { + this.appendLog(`${label} failed: ${error.message || error}`); + return { ok: false, label, error: error.message || error }; + } + }, + + async testRequestAll() { + const label = "requestAll aggregation"; + try { + this.appendLog("Testing requestAll aggregation..."); + const options = { + correlationId: createCorrelationId("harness-requestAll"), + }; + const response = await websocket.request( + "ws_tester_request_all", + { marker: "aggregate" }, + { timeoutMs: 2000, ...options }, + ); + this.lastAggregated = response; + + const first = response?.results?.[0]; + const aggregated = first?.ok === true ? first?.data?.results : null; + const ok = + Array.isArray(aggregated) && + aggregated.length > 0 && + aggregated.every( + (entry) => + typeof entry?.sid === "string" && + typeof entry?.correlationId === "string" && + Array.isArray(entry.results) && + entry.results.length > 0, + ); + + this.appendLog(`ws_tester_request_all response: ${JSON.stringify(response)}`); + return { ok, label, error: ok ? undefined : "Aggregation payload missing expected metadata" }; + } catch (error) { + this.appendLog(`${label} failed: ${error.message || error}`); + return { ok: false, label, error: error.message || error }; + } + }, + + async testStateSyncNoPollHealthy() { + const label = "State sync (state_request/state_push + no poll when HEALTHY)"; + const originalPoll = globalThis.poll; + let pollCalls = 0; + try { + this.appendLog("Testing state_request/state_push contract and healthy-mode poll suppression..."); + + if (typeof originalPoll === "function") { + globalThis.poll = async (...args) => { + pollCalls += 1; + return await originalPoll(...args); + }; + } + + await this.ensureSubscribed("state_push", true); + this.appendLog("Subscribed to state_push."); + + const timezone = Intl.DateTimeFormat().resolvedOptions().timeZone; + const response = await stateSocket.request( + "state_request", + { + context: globalThis.getContext ? globalThis.getContext() : null, + log_from: 0, + notifications_from: 0, + timezone, + }, + { timeoutMs: 2000, correlationId: createCorrelationId("harness-state-request") }, + ); + + const first = response?.results?.[0]; + const requestOk = Boolean( + response?.correlationId && + first?.ok === true && + typeof first?.data?.runtime_epoch === "string" && + typeof first?.data?.seq_base === "number", + ); + if (!requestOk) { + this.appendLog(`state_request response invalid: ${JSON.stringify(response)}`); + return { ok: false, label, error: "state_request did not return expected {runtime_epoch, seq_base}" }; + } + this.appendLog("state_request OK."); + + const start = Date.now(); + let pushOk = false; + while (Date.now() - start < 1000) { + const hit = this.receivedBroadcasts.find( + (entry) => + entry.eventType === "state_push" && + typeof entry?.payload?.data?.runtime_epoch === "string" && + typeof entry?.payload?.data?.seq === "number" && + entry?.payload?.data?.snapshot && + typeof entry?.payload?.data?.snapshot === "object" && + Array.isArray(entry?.payload?.data?.snapshot?.contexts) && + Array.isArray(entry?.payload?.data?.snapshot?.tasks) && + Array.isArray(entry?.payload?.data?.snapshot?.notifications), + ); + if (hit) { + pushOk = true; + break; + } + await new Promise((resolve) => setTimeout(resolve, 25)); + } + + if (!pushOk) { + return { ok: false, label, error: "Did not observe state_push within 1s after handshake" }; + } + this.appendLog("state_push observed."); + + // The sync store applies snapshots asynchronously; give it a moment to + // reach HEALTHY before asserting poll suppression. + const startedHealthyWait = Date.now(); + while (Date.now() - startedHealthyWait < 1000 && syncStore.mode !== "HEALTHY") { + await new Promise((resolve) => setTimeout(resolve, 25)); + } + if (syncStore.mode !== "HEALTHY") { + const mode = typeof syncStore.mode === "string" ? syncStore.mode : "missing"; + return { ok: false, label, error: `syncStore did not reach HEALTHY mode (mode=${mode})` }; + } + + // Reset count after the store is HEALTHY; then observe for >1 poll interval. + pollCalls = 0; + await new Promise((resolve) => setTimeout(resolve, 600)); + const noPoll = pollCalls === 0; + if (!noPoll) { + return { ok: false, label, error: `poll() invoked ${pollCalls}x while HEALTHY` }; + } + + return { ok: true, label }; + } catch (error) { + this.appendLog(`${label} failed: ${error.message || error}`); + return { ok: false, label, error: error.message || error }; + } finally { + if (typeof originalPoll === "function") { + globalThis.poll = originalPoll; + } + } + }, + + async testContextSwitchNoLeak() { + const label = "Context switching (state_request updates active context, no stale pushes)"; + const originalContext = typeof globalThis.getContext === "function" ? globalThis.getContext() : null; + try { + this.appendLog("Testing context switching does not leak or keep pushing stale contexts..."); + await this.ensureSubscribed("state_push", true); + + if (!Array.isArray(chatsStore.contexts)) { + return { ok: false, label, error: "chats store not available" }; + } + + const ids = chatsStore.contexts + .map((ctx) => ctx?.id) + .filter((id) => typeof id === "string" && id.length > 0); + const unique = Array.from(new Set(ids)); + if (unique.length < 2) { + return { ok: false, label, error: "Need at least 2 chats to validate switching" }; + } + + const current = typeof originalContext === "string" ? originalContext : null; + let first = unique[0]; + let second = unique[1]; + if (current && unique.includes(current)) { + const alternate = unique.find((id) => id !== current); + if (!alternate) { + return { ok: false, label, error: "Need at least 2 distinct chats to validate switching" }; + } + first = alternate; + second = current; + } + + const switchTo = async (ctxid) => { + if (typeof chatsStore.selectChat === "function") { + await chatsStore.selectChat(ctxid); + return; + } + if (typeof globalThis.setContext === "function") { + globalThis.setContext(ctxid); + return; + } + throw new Error("No chat selection function available"); + }; + + const waitForContextPush = async (ctxid, timeoutMs = 2000) => { + return await this.waitForEvent( + "state_push", + (data) => data?.snapshot?.context === ctxid, + timeoutMs, + ); + }; + + const waitFirst = waitForContextPush(first, 2500); + await switchTo(first); + const gotFirst = await waitFirst; + if (!gotFirst) { + return { ok: false, label, error: "Did not observe state_push for first context after switch" }; + } + + const switchedAt = Date.now(); + const waitSecond = waitForContextPush(second, 2500); + await switchTo(second); + const gotSecond = await waitSecond; + if (!gotSecond) { + return { ok: false, label, error: "Did not observe state_push for second context after switch" }; + } + + // After switching, we should not observe new pushes for the old context. + await new Promise((resolve) => setTimeout(resolve, 300)); + const stale = this.receivedBroadcasts.find((entry) => { + if (entry.eventType !== "state_push") return false; + const timestamp = Date.parse(entry.timestamp); + if (!Number.isFinite(timestamp) || timestamp < switchedAt) return false; + return entry?.payload?.data?.snapshot?.context === first; + }); + if (stale) { + return { ok: false, label, error: "Observed state_push for previous context after switching" }; + } + + return { ok: true, label }; + } catch (error) { + this.appendLog(`${label} failed: ${error.message || error}`); + return { ok: false, label, error: error.message || error }; + } finally { + if (originalContext && typeof originalContext === "string") { + try { + if (typeof chatsStore.selectChat === "function") { + await chatsStore.selectChat(originalContext); + } else if (typeof globalThis.setContext === "function") { + globalThis.setContext(originalContext); + } + } catch (_error) { + // no-op + } + } + } + }, + + async testFallbackRecoveryDegraded() { + const label = "Fallback + recovery (DEGRADED polling, ignore pushes)"; + const originalPoll = globalThis.poll; + const originalRequest = stateSocket.request; + try { + if (typeof syncStore.sendStateRequest !== "function") { + return { ok: false, label, error: "syncStore.sendStateRequest not available" }; + } + + // Ensure we start from a known-good state. + await syncStore.sendStateRequest({ forceFull: true }); + if (syncStore.mode !== "HEALTHY") { + return { ok: false, label, error: `Expected HEALTHY before test, got ${syncStore.mode}` }; + } + + // Stub poll to avoid network side-effects and track calls. + let pollCalls = 0; + globalThis.poll = async () => { + pollCalls += 1; + return { ok: true, updated: false }; + }; + + // Simulate state_request failures to force DEGRADED mode. + stateSocket.request = async (eventType, payload, options) => { + if (eventType === "state_request") { + throw new Error("Request timeout"); + } + return await originalRequest.call(stateSocket, eventType, payload, options); + }; + + let threw = false; + try { + await syncStore.sendStateRequest({ forceFull: true }); + } catch (_error) { + threw = true; + } + if (!threw) { + return { ok: false, label, error: "Expected state_request failure but request succeeded" }; + } + + if (syncStore.mode !== "DEGRADED") { + return { ok: false, label, error: `Expected DEGRADED after failure, got ${syncStore.mode}` }; + } + this.appendLog("Entered DEGRADED mode after simulated state_request failure."); + + // Poll fallback should kick in quickly (1Hz idle); wait long enough for at least one tick. + await new Promise((resolve) => setTimeout(resolve, 1200)); + if (pollCalls < 1) { + return { ok: false, label, error: "poll() was not invoked while DEGRADED" }; + } + + // While DEGRADED, pushes should be ignored (single-writer arbitration). + const lastSeqBefore = typeof syncStore.lastSeq === "number" ? syncStore.lastSeq : 0; + await syncStore._handlePush({ + data: { + runtime_epoch: typeof syncStore.runtimeEpoch === "string" ? syncStore.runtimeEpoch : "test-epoch", + seq: lastSeqBefore + 1, + snapshot: { ignored: true }, + }, + }); + if (syncStore.lastSeq !== lastSeqBefore) { + return { ok: false, label, error: "state_push advanced seq while DEGRADED (should be ignored)" }; + } + this.appendLog("Verified state_push ignored while DEGRADED."); + + // Recover: restore request path and confirm we return to HEALTHY and polling stops. + stateSocket.request = originalRequest; + await syncStore.sendStateRequest({ forceFull: true }); + if (syncStore.mode !== "HEALTHY") { + return { ok: false, label, error: `Expected HEALTHY after recovery, got ${syncStore.mode}` }; + } + + pollCalls = 0; + await new Promise((resolve) => setTimeout(resolve, 600)); + if (pollCalls !== 0) { + return { ok: false, label, error: `poll() invoked ${pollCalls}x after recovery to HEALTHY` }; + } + + return { ok: true, label }; + } catch (error) { + this.appendLog(`${label} failed: ${error.message || error}`); + return { ok: false, label, error: error.message || error }; + } finally { + stateSocket.request = originalRequest; + globalThis.poll = originalPoll; + } + }, + + async testResyncTriggersRuntimeEpochAndSeqGap() { + const label = "Resync triggers (runtime_epoch mismatch + seq gap)"; + if (typeof syncStore._handlePush !== "function") { + return { ok: false, label, error: "syncStore._handlePush not available" }; + } + const originalSendStateRequest = syncStore.sendStateRequest; + let calls = []; + try { + if (typeof originalSendStateRequest !== "function") { + return { ok: false, label, error: "syncStore.sendStateRequest not available" }; + } + + syncStore.sendStateRequest = async (options = {}) => { + calls.push(options); + }; + + // Case 1: runtime_epoch mismatch should trigger resync. + calls = []; + syncStore.mode = "HEALTHY"; + syncStore.runtimeEpoch = "epoch-a"; + syncStore.lastSeq = 10; + await syncStore._handlePush({ data: { runtime_epoch: "epoch-b", seq: 11 } }); + const runtimeTriggered = calls.length === 1 && calls[0] && calls[0].forceFull === true; + if (!runtimeTriggered) { + return { ok: false, label, error: "runtime_epoch mismatch did not trigger state_request resync" }; + } + if (syncStore.mode !== "HANDSHAKE_PENDING") { + return { ok: false, label, error: "runtime_epoch resync did not set HANDSHAKE_PENDING" }; + } + + // Case 2: seq gap should trigger resync. + calls = []; + syncStore.mode = "HEALTHY"; + syncStore.runtimeEpoch = "epoch-a"; + syncStore.lastSeq = 10; + await syncStore._handlePush({ data: { runtime_epoch: "epoch-a", seq: 12 } }); + const seqTriggered = calls.length === 1 && calls[0] && calls[0].forceFull === true; + if (!seqTriggered) { + return { ok: false, label, error: "seq gap did not trigger state_request resync" }; + } + if (syncStore.mode !== "HANDSHAKE_PENDING") { + return { ok: false, label, error: "seq gap resync did not set HANDSHAKE_PENDING" }; + } + + return { ok: true, label }; + } catch (error) { + this.appendLog(`${label} failed: ${error.message || error}`); + return { ok: false, label, error: error.message || error }; + } finally { + syncStore.sendStateRequest = originalSendStateRequest; + } + }, + + async ensureSubscribed(eventType, reset = false) { + if (!this._subscriptionHandlers || typeof this._subscriptionHandlers !== "object") { + this._subscriptionHandlers = {}; + } + + const existing = this._subscriptionHandlers[eventType]; + if (reset && typeof existing === "function") { + clientForEventType(eventType).off(eventType, existing); + delete this._subscriptionHandlers[eventType]; + } else if (!reset && typeof existing === "function") { + return; + } + + const handler = (payload) => { + try { + const envelope = validateServerEnvelope(payload); + if (!Array.isArray(this.receivedBroadcasts)) { + this.receivedBroadcasts = []; + } + this._broadcastSeq = (this._broadcastSeq || 0) + 1; + const id = envelope?.eventId + ? `${eventType}-${envelope.eventId}` + : `${eventType}-${this._broadcastSeq}`; + this.receivedBroadcasts.push({ + id, + eventType, + payload: envelope, + timestamp: now(), + }); + } catch (error) { + this.appendLog(`Received invalid envelope for ${eventType}: ${error.message || error}`); + } + }; + + this._subscriptionHandlers[eventType] = handler; + await clientForEventType(eventType).on(eventType, handler); + }, + + waitForEvent(eventType, predicate, timeout = 1500) { + return new Promise((resolve) => { + const client = clientForEventType(eventType); + let timer; + let done = false; + let handler = null; + + const finish = (ok) => { + if (done) return; + done = true; + if (timer) clearTimeout(timer); + if (typeof handler === "function") { + client.off(eventType, handler); + } + resolve(ok); + }; + + handler = (data) => { + let envelope; + try { + envelope = validateServerEnvelope(data); + } catch (error) { + this.appendLog(`Skipping invalid envelope for ${eventType}: ${error.message || error}`); + return; + } + + if (predicate(envelope.data, envelope)) { + finish(true); + } + }; + + const onPromise = client.on(eventType, handler); + if (onPromise && typeof onPromise.then === "function") { + onPromise.catch((error) => { + this.appendLog(`Failed to subscribe to ${eventType}: ${error.message || error}`); + finish(false); + }); + } + + timer = setTimeout(() => { + finish(false); + }, timeout); + }); + }, + + async runManualEmit() { + await this.manualStep(this.testEmit.bind(this)); + }, + + async runManualRequest() { + await this.manualStep(this.testRequest.bind(this)); + }, + + async runManualRequestTimeout() { + await this.manualStep(this.testRequestTimeout.bind(this)); + }, + + async runManualPersistence() { + await this.manualStep(this.testSubscriptionPersistence.bind(this)); + }, + + async runManualRequestAll() { + await this.manualStep(this.testRequestAll.bind(this)); + }, + + async runManualStateSync() { + await this.manualStep(this.testStateSyncNoPollHealthy.bind(this)); + }, + + async runManualContextSwitch() { + await this.manualStep(this.testContextSwitchNoLeak.bind(this)); + }, + + async runManualFallbackRecovery() { + await this.manualStep(this.testFallbackRecoveryDegraded.bind(this)); + }, + + async runManualResyncTriggers() { + await this.manualStep(this.testResyncTriggersRuntimeEpochAndSeqGap.bind(this)); + }, + + async triggerBroadcastDemo() { + this.assertEnabled(); + try { + await this.ensureConnected(); + await this.ensureSubscribed("ws_tester_broadcast_demo"); + const options = { + correlationId: createCorrelationId("harness-demo"), + }; + await websocket.emit( + "ws_tester_broadcast_demo_trigger", + { requested_at: now() }, + options, + ); + await this._toast("info", "Broadcast demo triggered. Check log output.", "WebSocket Harness"); + } catch (error) { + await this._toast("error", `Broadcast demo failed: ${error.message || error}`, "WebSocket Harness"); + this.appendLog(`Broadcast demo failed: ${error.message || error}`); + } + }, + + payloadSizePreview(input) { + return payloadSize(input); + }, +}; + +const store = createStore("websocketTesterStore", model); +export { store }; diff --git a/webui/components/settings/developer/websocket-tester.html b/webui/components/settings/developer/websocket-tester.html new file mode 100644 index 0000000000..db87a6601a --- /dev/null +++ b/webui/components/settings/developer/websocket-tester.html @@ -0,0 +1,188 @@ + + + WebSocket Test Harness + + + +
+ + +
+ + + + diff --git a/webui/components/sidebar/chats/chats-store.js b/webui/components/sidebar/chats/chats-store.js index 722c8e6606..133f0979f9 100644 --- a/webui/components/sidebar/chats/chats-store.js +++ b/webui/components/sidebar/chats/chats-store.js @@ -3,7 +3,6 @@ import { sendJsonData, getContext, setContext, - poll as triggerPoll, updateAfterScroll, toastFetchError, toast, @@ -42,6 +41,16 @@ const model = { this.contexts = contextsList.sort( (a, b) => (b.created_at || 0) - (a.created_at || 0) ); + + // Keep selectedContext in sync when the currently selected context's + // metadata changes (e.g. project activation/deactivation). + if (this.selected) { + const selectedId = this.selected; + const updated = this.contexts.find((ctx) => ctx.id === selectedId); + if (updated) { + this.selectedContext = updated; + } + } }, // Select a chat @@ -55,8 +64,22 @@ const model = { // Update selection state (will also persist to localStorage) this.setSelected(id); - // Trigger immediate poll - triggerPoll(); + // In push mode, context switching triggers a new `state_request` via setContext(). + // Keep polling only as a degraded-mode fallback. + try { + const syncStore = + globalThis.Alpine && typeof globalThis.Alpine.store === "function" + ? globalThis.Alpine.store("sync") + : null; + const mode = syncStore && typeof syncStore.mode === "string" ? syncStore.mode : null; + const shouldFallbackPoll = + Boolean(syncStore) && mode === "DEGRADED"; + if (shouldFallbackPoll && typeof globalThis.poll === "function") { + globalThis.poll(); + } + } catch (_e) { + // no-op + } // Update scroll updateAfterScroll(); @@ -125,12 +148,12 @@ const model = { await sendJsonData("/chat_reset", { context }); - + // Increment reset counter if (typeof globalThis.resetCounter === 'number') { globalThis.resetCounter = globalThis.resetCounter + 1; } - + updateAfterScroll(); } catch (e) { toastFetchError("Error resetting chat", e); @@ -284,47 +307,53 @@ const model = { // Restart the backend async restart() { - try { - // Check connection status - const connectionStatus = getConnectionStatus(); - if (connectionStatus === false) { - await notificationStore.frontendError( - "Backend disconnected, cannot restart.", - "Restart Error" - ); - return; - } - - // Try to initiate restart - const resp = await sendJsonData("/restart", {}); - } catch (e) { - // Show restarting message - await notificationStore.frontendInfo("Restarting...", "System Restart", 9999, "restart"); + // Check connection status (avoid spamming requests when already disconnected) + const connectionStatus = getConnectionStatus(); + if (connectionStatus === false) { + await notificationStore.frontendError( + "Backend disconnected, cannot restart.", + "Restart Error", + ); + return; + } - let retries = 0; - const maxRetries = 240; // 60 seconds with 250ms interval + // Create a backend notification first so other tabs have a chance to show it + // before the process is replaced. + const notificationId = await notificationStore.info( + "Restarting...", + "System Restart", + "", + 9999, + "restart", + ); - while (retries < maxRetries) { + // Best-effort: wait briefly for the notification to arrive via state sync so + // the initiating tab (and typically other tabs) renders the toast before restart. + if (notificationId) { + const deadline = Date.now() + 800; + while (Date.now() < deadline) { try { - const resp = await sendJsonData("/health", {}); - // Server is back up - await new Promise((resolve) => setTimeout(resolve, 250)); - await notificationStore.frontendSuccess("Restarted", "System Restart", 5, "restart"); - return; - } catch (e) { - // Server still down, keep waiting - retries++; - await new Promise((resolve) => setTimeout(resolve, 250)); + const store = + globalThis.Alpine && typeof globalThis.Alpine.store === "function" + ? globalThis.Alpine.store("notificationStore") + : null; + const stack = store && Array.isArray(store.toastStack) ? store.toastStack : null; + if (stack && stack.some((toast) => toast && toast.id === notificationId)) { + break; + } + } catch (_err) { + break; } + await new Promise((resolve) => setTimeout(resolve, 25)); } + } - // Restart failed or timed out - await notificationStore.frontendError( - "Restart timed out or failed", - "Restart Error", - 8, - "restart" - ); + // The restart endpoint usually drops the connection as the process is replaced. + // Do not wait on /health - recovery is driven by WebSocket CSRF preflight + reconnect. + try { + await sendJsonData("/restart", {}); + } catch (_e) { + // ignore } } }; diff --git a/webui/components/sync/sync-status.html b/webui/components/sync/sync-status.html new file mode 100644 index 0000000000..1a5d2fba58 --- /dev/null +++ b/webui/components/sync/sync-status.html @@ -0,0 +1,76 @@ + + + + Sync Status + + + + + + +
+ +
+ + + diff --git a/webui/components/sync/sync-store.js b/webui/components/sync/sync-store.js new file mode 100644 index 0000000000..ccce3d7c1a --- /dev/null +++ b/webui/components/sync/sync-store.js @@ -0,0 +1,316 @@ +import { createStore } from "/js/AlpineStore.js"; +import { getNamespacedClient } from "/js/websocket.js"; +import { applySnapshot, buildStateRequestPayload } from "/index.js"; +import { store as chatTopStore } from "/components/chat/top-section/chat-top-store.js"; +import { store as notificationStore } from "/components/notifications/notification-store.js"; + +const stateSocket = getNamespacedClient("/state_sync"); + +const SYNC_MODES = { + DISCONNECTED: "DISCONNECTED", + HANDSHAKE_PENDING: "HANDSHAKE_PENDING", + HEALTHY: "HEALTHY", + DEGRADED: "DEGRADED", +}; + +function isDevelopmentRuntime() { + return Boolean(globalThis.runtimeInfo?.isDevelopment); +} + +function isSyncDebugEnabled() { + if (!isDevelopmentRuntime()) return false; + try { + return globalThis.localStorage?.getItem("a0_debug_sync") === "true"; + } catch (_error) { + return false; + } +} + +function debug(...args) { + if (!isSyncDebugEnabled()) return; + // eslint-disable-next-line no-console + console.debug(...args); +} + +const model = { + mode: SYNC_MODES.DISCONNECTED, + initialized: false, + needsHandshake: false, + handshakePromise: null, + _handshakeQueued: false, + _queuedPayload: null, + _inFlightPayload: null, + _seenFirstConnect: false, + _pendingReconnectToast: null, + + runtimeEpoch: null, + seqBase: 0, + lastSeq: 0, + + _setMode(newMode, reason = "") { + const oldMode = this.mode; + if (oldMode === newMode) return; + this.mode = newMode; + debug("[syncStore] Mode transition:", oldMode, "→", newMode, reason ? `(${reason})` : ""); + }, + + async _flushPendingReconnectToast() { + const pending = this._pendingReconnectToast; + if (!pending) return; + this._pendingReconnectToast = null; + + try { + if (pending === "restart") { + await notificationStore.frontendSuccess( + "Restarted", + "System Restart", + 5, + "restart", + undefined, + true, + ); + return; + } + await notificationStore.frontendSuccess( + "Reconnected", + "Connection", + 3, + "reconnect", + undefined, + true, + ); + } catch (error) { + console.error("[syncStore] reconnect toast failed:", error); + } + }, + + async init() { + if (this.initialized) return; + this.initialized = true; + + try { + stateSocket.onConnect((info) => { + chatTopStore.connected = true; + debug("[syncStore] websocket connected", { needsHandshake: this.needsHandshake }); + + const firstConnect = Boolean(info && info.firstConnect); + if (firstConnect) { + this._seenFirstConnect = true; + } else if (this._seenFirstConnect) { + const runtimeChanged = Boolean(info && info.runtimeChanged); + this._pendingReconnectToast = runtimeChanged ? "restart" : "reconnect"; + } + + // Always re-handshake on every Socket.IO connect. + // + // The backend StateMonitor tracking is per-sid and starts with seq_base=0 on a + // newly connected sid. If a tab misses the 'disconnect' event (e.g. browser + // suspended overnight) it can look HEALTHY locally while never sending a + // fresh state_request, so pushes are gated and logs appear to stall. + this.sendStateRequest({ forceFull: true }).catch((error) => { + console.error("[syncStore] connect handshake failed:", error); + }); + }); + + stateSocket.onDisconnect(() => { + chatTopStore.connected = false; + this._setMode(SYNC_MODES.DISCONNECTED, "ws disconnect"); + this.needsHandshake = true; + debug("[syncStore] websocket disconnected"); + + // Tab-local UX: brief "Disconnected" toast. This intentionally does not go through + // the backend notification pipeline (no cross-tab intent, avoids request storms). + // Uses the same group as "Reconnected" so the reconnect toast replaces it if still visible. + if (this._seenFirstConnect) { + notificationStore + .frontendWarning("Disconnected", "Connection", 5, "reconnect", undefined, true) + .catch((error) => { + console.error("[syncStore] disconnected toast failed:", error); + }); + } + }); + + await stateSocket.on("state_push", (envelope) => { + this._handlePush(envelope).catch((error) => { + console.error("[syncStore] state_push handler failed:", error); + }); + }); + debug("[syncStore] subscribed to state_push"); + + await this.sendStateRequest({ forceFull: true }); + } catch (error) { + console.error("[syncStore] init failed:", error); + // Initialization failures often mean the socket can't connect; treat as disconnected. + this._setMode(SYNC_MODES.DISCONNECTED, "init failed"); + } + }, + + async sendStateRequest(options = {}) { + const { forceFull = false } = options || {}; + const payload = buildStateRequestPayload({ forceFull }); + return await this._sendStateRequestPayload(payload); + }, + + async _sendStateRequestPayload(payload) { + if (this.handshakePromise) { + const inFlight = this._inFlightPayload; + if ( + inFlight && + payload && + payload.context === inFlight.context && + typeof payload.log_from === "number" && + typeof payload.notifications_from === "number" && + typeof inFlight.log_from === "number" && + typeof inFlight.notifications_from === "number" + ) { + const stronger = + payload.log_from <= inFlight.log_from && + payload.notifications_from <= inFlight.notifications_from && + (payload.log_from < inFlight.log_from || + payload.notifications_from < inFlight.notifications_from); + if (!stronger) { + debug("[syncStore] state_request ignored (in-flight stronger/equal)", payload); + return await this.handshakePromise; + } + } + + // Coalesce repeated requests while a handshake is in-flight. This is important + // for fast context switching and resync flows where multiple requests can happen + // back-to-back with different contexts/offsets. + this._handshakeQueued = true; + const queued = this._queuedPayload; + if (!queued || !payload || payload.context !== queued.context) { + this._queuedPayload = payload; + } else if ( + typeof payload.log_from === "number" && + typeof payload.notifications_from === "number" && + typeof queued.log_from === "number" && + typeof queued.notifications_from === "number" + ) { + // Keep the "strongest" request: smaller offsets (0) mean a more complete resync. + const queuedStrongerOrEqual = + queued.log_from <= payload.log_from && queued.notifications_from <= payload.notifications_from; + if (!queuedStrongerOrEqual) { + this._queuedPayload = payload; + } + } + debug("[syncStore] state_request coalesced (handshake in-flight)", payload); + return await this.handshakePromise; + } + + this._inFlightPayload = payload; + this.handshakePromise = (async () => { + this._setMode(SYNC_MODES.HANDSHAKE_PENDING, "sendStateRequest"); + + let response; + try { + debug("[syncStore] state_request sent", payload); + response = await stateSocket.request("state_request", payload, { timeoutMs: 2000 }); + } catch (error) { + this.needsHandshake = true; + // If the socket isn't connected, we are disconnected (poll may or may not work). + // If the socket is connected but the request failed/timed out, treat as degraded (poll fallback). + this._setMode( + stateSocket.isConnected() ? SYNC_MODES.DEGRADED : SYNC_MODES.DISCONNECTED, + "state_request failed", + ); + throw error; + } + + const first = response && Array.isArray(response.results) ? response.results[0] : null; + if (!first || first.ok !== true || !first.data) { + const code = + first && first.error && typeof first.error.code === "string" + ? first.error.code + : "HANDSHAKE_FAILED"; + this._setMode(SYNC_MODES.DEGRADED, `handshake failed: ${code}`); + this.needsHandshake = true; + throw new Error(`state_request failed: ${code}`); + } + + const data = first.data; + if (typeof data.runtime_epoch === "string") { + this.runtimeEpoch = data.runtime_epoch; + } + if (typeof data.seq_base === "number" && Number.isFinite(data.seq_base)) { + this.seqBase = data.seq_base; + this.lastSeq = data.seq_base; + } + + this.needsHandshake = false; + this._setMode(SYNC_MODES.HEALTHY, "handshake ok"); + })().finally(() => { + this.handshakePromise = null; + this._inFlightPayload = null; + + if (this._handshakeQueued) { + const queuedPayload = this._queuedPayload; + this._handshakeQueued = false; + this._queuedPayload = null; + if (queuedPayload) { + debug("[syncStore] sending queued state_request", queuedPayload); + Promise.resolve().then(() => { + this._sendStateRequestPayload(queuedPayload).catch((error) => { + console.error("[syncStore] queued state_request failed:", error); + }); + }); + } + } + }); + + return await this.handshakePromise; + }, + + async _handlePush(envelope) { + if (this.mode === SYNC_MODES.DEGRADED) { + debug("[syncStore] ignoring state_push while DEGRADED"); + return; + } + + const data = envelope && envelope.data ? envelope.data : null; + if (!data || typeof data !== "object") return; + + if (typeof data.runtime_epoch === "string") { + if (this.runtimeEpoch && this.runtimeEpoch !== data.runtime_epoch) { + debug("[syncStore] runtime_epoch mismatch -> resync", { + current: this.runtimeEpoch, + incoming: data.runtime_epoch, + }); + this._setMode(SYNC_MODES.HANDSHAKE_PENDING, "runtime_epoch mismatch"); + await this.sendStateRequest({ forceFull: true }); + return; + } + this.runtimeEpoch = data.runtime_epoch; + } + + if (typeof data.seq === "number" && Number.isFinite(data.seq)) { + const expected = this.lastSeq + 1; + if (this.lastSeq > 0 && data.seq !== expected) { + debug("[syncStore] seq gap/out-of-order -> resync", { + lastSeq: this.lastSeq, + expected, + incoming: data.seq, + }); + this._setMode(SYNC_MODES.HANDSHAKE_PENDING, "seq gap"); + await this.sendStateRequest({ forceFull: true }); + return; + } + this.lastSeq = data.seq; + } + + if (data.snapshot && typeof data.snapshot === "object") { + await applySnapshot(data.snapshot, { + onLogGuidReset: async () => { + debug("[syncStore] log_guid reset -> resync (forceFull)"); + await this.sendStateRequest({ forceFull: true }); + }, + }); + this._setMode(SYNC_MODES.HEALTHY, "push applied"); + await this._flushPendingReconnectToast(); + } + }, +}; + +const store = createStore("sync", model); + +export { store, SYNC_MODES }; diff --git a/webui/index.html b/webui/index.html index 82abf4b591..f6a765f38e 100644 --- a/webui/index.html +++ b/webui/index.html @@ -131,6 +131,13 @@ diff --git a/webui/index.js b/webui/index.js index d7db5fc6af..2a914e81a8 100644 --- a/webui/index.js +++ b/webui/index.js @@ -254,138 +254,170 @@ let lastLogVersion = 0; let lastLogGuid = ""; let lastSpokenNo = 0; -export async function poll() { - let updated = false; - try { - // Get timezone from navigator - const timezone = Intl.DateTimeFormat().resolvedOptions().timeZone; +export function buildStateRequestPayload(options = {}) { + const { forceFull = false } = options || {}; + const timezone = Intl.DateTimeFormat().resolvedOptions().timeZone; + return { + context: context || null, + log_from: forceFull ? 0 : lastLogVersion, + notifications_from: forceFull ? 0 : notificationStore.lastNotificationVersion || 0, + timezone, + }; +} - const log_from = lastLogVersion; - const response = await sendJsonData("/poll", { - log_from: log_from, - notifications_from: notificationStore.lastNotificationVersion || 0, - context: context || null, - timezone: timezone, - }); +export async function applySnapshot(snapshot, options = {}) { + const { touchConnectionStatus = false, onLogGuidReset = null } = options || {}; - // Check if the response is valid - if (!response) { - console.error("Invalid response from poll endpoint"); - return false; - } + let updated = false; - // deselect chat if it is requested by the backend - if (response.deselect_chat) { - chatsStore.deselectChat(); - return - } + // Check if the snapshot is valid + if (!snapshot || typeof snapshot !== "object") { + console.error("Invalid snapshot payload"); + return { updated: false }; + } - if ( - response.context != context && - !(response.context === null && context === null) && - context !== null - ) { - return; - } + // deselect chat if it is requested by the backend + if (snapshot.deselect_chat) { + chatsStore.deselectChat(); + return { updated: false }; + } + + if ( + snapshot.context != context && + context !== null + ) { + return { updated: false }; + } - // if the chat has been reset, restart this poll as it may have been called with incorrect log_from - if (lastLogGuid != response.log_guid) { + // If the chat has been reset, reset cursors and request a resync from the caller. + // Note: on first snapshot after a context switch, lastLogGuid is intentionally empty, + // so the mismatch is expected and should not trigger a second state_request/poll. + if (lastLogGuid != snapshot.log_guid) { + if (lastLogGuid) { const chatHistoryEl = document.getElementById("chat-history"); if (chatHistoryEl) chatHistoryEl.innerHTML = ""; lastLogVersion = 0; - lastLogGuid = response.log_guid; - await poll(); - return; + lastLogGuid = snapshot.log_guid; + if (typeof onLogGuidReset === "function") { + await onLogGuidReset(); + } + return { updated: false, resynced: true }; } + // First guid observed for this context: accept it and continue applying snapshot. + lastLogVersion = 0; + lastLogGuid = snapshot.log_guid; + } - if (lastLogVersion != response.log_version) { - updated = true; - for (const log of response.logs) { - const messageId = log.id || log.no; // Use log.id if available - setMessage( - messageId, - log.type, - log.heading, - log.content, - log.temp, - log.kvps - ); - } - afterMessagesUpdate(response.logs); + if (lastLogVersion != snapshot.log_version) { + updated = true; + for (const log of snapshot.logs) { + const messageId = log.id || log.no; // Use log.id if available + setMessage( + messageId, + log.type, + log.heading, + log.content, + log.temp, + log.kvps + ); } + afterMessagesUpdate(snapshot.logs); + } - lastLogVersion = response.log_version; - lastLogGuid = response.log_guid; + lastLogVersion = snapshot.log_version; + lastLogGuid = snapshot.log_guid; - updateProgress(response.log_progress, response.log_progress_active); + updateProgress(snapshot.log_progress, snapshot.log_progress_active); - // Update notifications from response - notificationStore.updateFromPoll(response); + // Update notifications from snapshot + notificationStore.updateFromPoll(snapshot); - //set ui model vars from backend - inputStore.paused = response.paused; + // set ui model vars from backend + inputStore.paused = snapshot.paused; - // Update status icon state + // Optional: treat snapshot application as proof of connectivity (poll path) + if (touchConnectionStatus) { setConnectionStatus(true); + } - // Update chats list using store - let contexts = response.contexts || []; - chatsStore.applyContexts(contexts); - - // Update tasks list using store - let tasks = response.tasks || []; - tasksStore.applyTasks(tasks); + // Update chats list using store + let contexts = snapshot.contexts || []; + chatsStore.applyContexts(contexts); - // Make sure the active context is properly selected in both lists - if (context) { - // Update selection in both stores - chatsStore.setSelected(context); + // Update tasks list using store + let tasks = snapshot.tasks || []; + tasksStore.applyTasks(tasks); - const contextInChats = chatsStore.contains(context); - const contextInTasks = tasksStore.contains(context); + // Make sure the active context is properly selected in both lists + if (context) { + // Update selection in both stores + chatsStore.setSelected(context); - if (contextInTasks) { - tasksStore.setSelected(context); - } + const contextInChats = chatsStore.contains(context); + const contextInTasks = tasksStore.contains(context); - if (!contextInChats && !contextInTasks) { - if (chatsStore.contexts.length > 0) { - // If it doesn't exist in the list but other contexts do, fall back to the first - const firstChatId = chatsStore.firstId(); - if (firstChatId) { - setContext(firstChatId); - chatsStore.setSelected(firstChatId); - } - } else if (typeof deselectChat === "function") { - // No contexts remain – clear state so the welcome screen can surface - deselectChat(); - } - } - } else { - const welcomeStore = - globalThis.Alpine && typeof globalThis.Alpine.store === "function" - ? globalThis.Alpine.store("welcomeStore") - : null; - const welcomeVisible = Boolean(welcomeStore && welcomeStore.isVisible); + if (contextInTasks) { + tasksStore.setSelected(context); + } - // No context selected, try to select the first available item unless welcome screen is active - if (!welcomeVisible && contexts.length > 0) { + if (!contextInChats && !contextInTasks) { + if (chatsStore.contexts.length > 0) { + // If it doesn't exist in the list but other contexts do, fall back to the first const firstChatId = chatsStore.firstId(); if (firstChatId) { setContext(firstChatId); chatsStore.setSelected(firstChatId); } + } else if (typeof deselectChat === "function") { + // No contexts remain - clear state so the welcome screen can surface + deselectChat(); } } - lastLogVersion = response.log_version; - lastLogGuid = response.log_guid; + tasksStore.setSelected(context); + } else { + const welcomeStore = + globalThis.Alpine && typeof globalThis.Alpine.store === "function" + ? globalThis.Alpine.store("welcomeStore") + : null; + const welcomeVisible = Boolean(welcomeStore && welcomeStore.isVisible); + + // No context selected, try to select the first available item unless welcome screen is active + if (!welcomeVisible && contexts.length > 0) { + const firstChatId = chatsStore.firstId(); + if (firstChatId) { + setContext(firstChatId); + chatsStore.setSelected(firstChatId); + } + } + } + + return { updated }; +} + +export async function poll() { + try { + // Get timezone from navigator + const timezone = Intl.DateTimeFormat().resolvedOptions().timeZone; + + const log_from = lastLogVersion; + const response = await sendJsonData("/poll", { + log_from: log_from, + notifications_from: notificationStore.lastNotificationVersion || 0, + context: context || null, + timezone: timezone, + }); + + const result = await applySnapshot(response, { + touchConnectionStatus: true, + onLogGuidReset: poll, + }); + return { ok: true, updated: Boolean(result && result.updated) }; } catch (error) { console.error("Error:", error); setConnectionStatus(false); + return { ok: false, updated: false }; } - - return updated; } globalThis.poll = poll; @@ -490,6 +522,22 @@ export const setContext = function (id) { chatsStore.setSelected(id); tasksStore.setSelected(id); + // Trigger a new WS handshake for the newly selected context (push-based sync). + // This keeps the UI current without needing /poll during healthy operation. + try { + const syncStore = + globalThis.Alpine && typeof globalThis.Alpine.store === "function" + ? globalThis.Alpine.store("sync") + : null; + if (syncStore && typeof syncStore.sendStateRequest === "function") { + syncStore.sendStateRequest({ forceFull: true }).catch((error) => { + console.error("[index] syncStore.sendStateRequest failed:", error); + }); + } + } catch (_error) { + // no-op: sync store may not be initialized yet + } + //skip one speech if enabled when switching context if (localStorage.getItem("speech") == "true") skipOneSpeech = true; }; @@ -574,19 +622,101 @@ globalThis.updateAfterScroll = updateAfterScroll; // setInterval(poll, 250); async function startPolling() { - const shortInterval = 25; - const longInterval = 250; - const shortIntervalPeriod = 100; + // Fallback polling cadence: + // - DISCONNECTED: do not poll (transport down, avoid request spam) + // - DEGRADED: 1Hz idle, burst to 4Hz briefly when activity is detected + const degradedIdleIntervalMs = 1000; + const degradedActiveIntervalMs = 250; + const degradedActiveWindowTicks = 8; // 8 * 250ms = 2s burst window let shortIntervalCount = 0; + let missingSyncSinceMs = null; + let consecutivePollFailures = 0; + let lastHandshakeKickMs = 0; + const startedAtMs = Date.now(); + const initialNoPollGraceMs = 2000; async function _doPoll() { - let nextInterval = longInterval; + let nextInterval = degradedIdleIntervalMs; try { - const result = await poll(); - if (result) shortIntervalCount = shortIntervalPeriod; // Reset the counter when the result is true + const syncStore = + globalThis.Alpine && typeof globalThis.Alpine.store === "function" + ? globalThis.Alpine.store("sync") + : null; + const syncMode = syncStore && typeof syncStore.mode === "string" ? syncStore.mode : null; + // Polling is a fallback. In V1: + // - DEGRADED: poll at fallback cadence to keep the UI usable while WS sync is unavailable. + // - DISCONNECTED: do not poll; rely on Socket.IO reconnect and avoid console/network spam. + // Safety net: if the sync store never loads, start polling after a short grace period. + if (!syncStore || !syncMode) { + if (missingSyncSinceMs == null) { + missingSyncSinceMs = Date.now(); + } + } else { + missingSyncSinceMs = null; + } + + const shouldPoll = + syncMode === "DEGRADED" || + (missingSyncSinceMs != null && Date.now() - missingSyncSinceMs > 2000); + if (!shouldPoll) { + setTimeout(_doPoll.bind(this), nextInterval); + return; + } + + // Avoid a “single poll on boot” while the websocket handshake is racing to take over. + if (Date.now() - startedAtMs < initialNoPollGraceMs && (!syncStore || !syncMode)) { + setTimeout(_doPoll.bind(this), nextInterval); + return; + } + + // Call through `globalThis.poll` so test harnesses (and future instrumentation) + // can wrap/spy on polling behaviour. Fall back to the module-local function + // if the global is unavailable. + const pollFn = typeof globalThis.poll === "function" ? globalThis.poll : poll; + const result = await pollFn(); + const pollOk = Boolean(result && result.ok); + const updated = Boolean(result && result.updated); + + if (!pollOk) { + consecutivePollFailures += 1; + } else { + consecutivePollFailures = 0; + } + + // If we are degraded but polling repeatedly fails, upgrade to DISCONNECTED. + if ( + syncStore && + syncMode === "DEGRADED" && + !pollOk && + consecutivePollFailures >= 3 + ) { + syncStore.mode = "DISCONNECTED"; + } + + // If we're polling and the backend responds, try to re-establish push sync immediately. + if (syncStore && pollOk) { + const now = Date.now(); + const modeNow = typeof syncStore.mode === "string" ? syncStore.mode : null; + const kickCooldownMs = modeNow === "DISCONNECTED" ? 0 : 3000; + const eligible = + (modeNow === "DISCONNECTED" || modeNow === "DEGRADED") && + typeof syncStore.sendStateRequest === "function" && + now - lastHandshakeKickMs >= kickCooldownMs; + if (eligible) { + lastHandshakeKickMs = now; + syncStore.sendStateRequest({ forceFull: true }).catch(() => {}); + } + } + + if (updated) shortIntervalCount = degradedActiveWindowTicks; // Reset the counter when updates were applied if (shortIntervalCount > 0) shortIntervalCount--; // Decrease the counter on each call - nextInterval = shortIntervalCount > 0 ? shortInterval : longInterval; + const effectiveMode = + syncStore && typeof syncStore.mode === "string" ? syncStore.mode : syncMode; + nextInterval = + effectiveMode === "DEGRADED" + ? (shortIntervalCount > 0 ? degradedActiveIntervalMs : degradedIdleIntervalMs) + : degradedIdleIntervalMs; } catch (error) { console.error("Error:", error); } diff --git a/webui/js/api.js b/webui/js/api.js index 33f8934447..6fd6bf4d7d 100644 --- a/webui/js/api.js +++ b/webui/js/api.js @@ -71,14 +71,23 @@ export async function fetchApi(url, request) { // csrf token stored locally let csrfToken = null; +let csrfTokenPromise = null; + +export function invalidateCsrfToken() { + csrfToken = null; + csrfTokenPromise = null; +} /** * Get the CSRF token for API requests * Caches the token after first request * @returns {Promise} The CSRF token */ -async function getCsrfToken() { +export async function getCsrfToken() { if (csrfToken) return csrfToken; + if (csrfTokenPromise) return await csrfTokenPromise; + + csrfTokenPromise = (async () => { const response = await fetch("/csrf_token", { credentials: "same-origin", }); @@ -89,11 +98,32 @@ async function getCsrfToken() { } const json = await response.json(); if (json.ok) { + const runtimeId = + typeof json.runtime_id === "string" && json.runtime_id.length > 0 + ? json.runtime_id + : null; + + window.runtimeInfo = window.runtimeInfo || { id: null, isDevelopment: false }; + if (runtimeId) { + window.runtimeInfo = { ...(window.runtimeInfo || {}), id: runtimeId }; + } + csrfToken = json.token; - document.cookie = `csrf_token_${json.runtime_id}=${csrfToken}; SameSite=Strict; Path=/`; + if (runtimeId) { + document.cookie = `csrf_token_${runtimeId}=${csrfToken}; SameSite=Strict; Path=/`; + } else { + console.warn("CSRF runtime id missing from response; skipping cookie name binding."); + } return csrfToken; } else { if (json.error) alert(json.error); throw new Error(json.error || "Failed to get CSRF token"); } + })(); + + try { + return await csrfTokenPromise; + } finally { + csrfTokenPromise = null; + } } diff --git a/webui/js/messages.js b/webui/js/messages.js index 70c46bf098..1fb376e3f6 100644 --- a/webui/js/messages.js +++ b/webui/js/messages.js @@ -450,7 +450,7 @@ export function drawMessageUser( messageDiv.appendChild(attachmentsContainer); } // Important: Clear existing attachments to re-render, preventing duplicates on update - attachmentsContainer.innerHTML = ""; + attachmentsContainer.innerHTML = ""; kvps.attachments.forEach((attachment) => { const attachmentDiv = document.createElement("div"); @@ -1007,4 +1007,4 @@ class Scroller { reApplyScroll() { if (this.wasAtBottom) this.element.scrollTop = this.element.scrollHeight; } -} \ No newline at end of file +} diff --git a/webui/js/settings.js b/webui/js/settings.js index 059d645410..c1d1337c8f 100644 --- a/webui/js/settings.js +++ b/webui/js/settings.js @@ -1,3 +1,5 @@ +window.runtimeInfo = window.runtimeInfo || { id: null, isDevelopment: false }; + const settingsModalProxy = { isOpen: false, settings: {}, @@ -86,6 +88,11 @@ const settingsModalProxy = { try { const set = await sendJsonData("/settings_get", null); + const isDevelopment = Boolean(window.runtimeInfo?.isDevelopment); + if (store) { + store.isDevelopment = isDevelopment; + } + // First load the settings data without setting the active tab const settings = { "title": "Settings", @@ -283,6 +290,10 @@ const settingsModalProxy = { openModal("settings/external/api-examples.html"); } else if (field.id === "memory_dashboard") { openModal("settings/memory/memory-dashboard.html"); + } else if (field.id === "websocket_tester") { + openModal("settings/developer/websocket-tester.html"); + } else if (field.id === "websocket_event_console") { + openModal("settings/developer/websocket-event-console.html"); } } }; @@ -309,6 +320,7 @@ document.addEventListener('alpine:init', function () { Alpine.store('root', { activeTab: localStorage.getItem('settingsActiveTab') || 'agent', isOpen: false, + isDevelopment: false, toggleSettings() { this.isOpen = !this.isOpen; @@ -365,6 +377,12 @@ document.addEventListener('alpine:init', function () { if (response.ok) { const data = await response.json(); if (data && data.settings) { + const isDevelopment = Boolean(window.runtimeInfo?.isDevelopment); + const rootStore = Alpine.store('root'); + if (rootStore) { + rootStore.isDevelopment = isDevelopment; + } + this.settingsData = data.settings; } else { console.error('Invalid settings data format'); diff --git a/webui/js/websocket.js b/webui/js/websocket.js new file mode 100644 index 0000000000..50fbf1def1 --- /dev/null +++ b/webui/js/websocket.js @@ -0,0 +1,683 @@ +import { io } from "/vendor/socket.io.esm.min.js"; +import { getCsrfToken, invalidateCsrfToken } from "/js/api.js"; + +const MAX_PAYLOAD_BYTES = 50 * 1024 * 1024; // 50MB hard cap per contract +const DEFAULT_TIMEOUT_MS = 0; + +const _UUID_HEX = [..."0123456789abcdef"]; +const _OPTION_KEYS = new Set(["correlationId"]); + +/** + * @param {unknown} value + * @param {string} fieldName + * @returns {Record} + */ +function assertPlainObject(value, fieldName) { + if (!value || typeof value !== "object" || Array.isArray(value)) { + throw new Error(`${fieldName} must be a plain object`); + } + return /** @type {Record} */ (value); +} + +/** + * @returns {string} + */ +function generateUuid() { + if (typeof crypto !== "undefined" && typeof crypto.randomUUID === "function") { + return crypto.randomUUID(); + } + + const buffer = new Uint8Array(16); + if (typeof crypto !== "undefined" && typeof crypto.getRandomValues === "function") { + crypto.getRandomValues(buffer); + } else { + for (let i = 0; i < buffer.length; i += 1) { + buffer[i] = Math.floor(Math.random() * 256); + } + } + + buffer[6] = (buffer[6] & 0x0f) | 0x40; // version 4 + buffer[8] = (buffer[8] & 0x3f) | 0x80; // variant 10 + + let uuid = ""; + for (let i = 0; i < buffer.length; i += 1) { + if (i === 4 || i === 6 || i === 8 || i === 10) { + uuid += "-"; + } + uuid += _UUID_HEX[buffer[i] >> 4]; + uuid += _UUID_HEX[buffer[i] & 0x0f]; + } + return uuid; +} + +/** + * @param {unknown} value + * @param {string} fieldName + * @param {{ allowEmpty?: boolean }} [options] + * @returns {string[] | undefined} + */ +function normalizeStringList(value, fieldName, options = {}) { + if (value == null) return undefined; + const raw = Array.isArray(value) ? value : [value]; + const normalized = []; + for (const item of raw) { + if (typeof item !== "string" || item.trim().length === 0) { + throw new Error(`${fieldName} must contain non-empty strings`); + } + normalized.push(item.trim()); + } + const deduped = Array.from(new Set(normalized)); + if (!options.allowEmpty && deduped.length === 0) { + throw new Error(`${fieldName} must contain at least one value`); + } + return deduped.length > 0 ? deduped : undefined; +} + +/** + * @param {unknown} value + * @returns {string[] | undefined} + */ +function normalizeSidList(value) { + return normalizeStringList(value, "excludeSids", { allowEmpty: true }); +} + +/** + * @param {unknown} value + * @returns {string | undefined} + */ +function normalizeCorrelationId(value) { + if (value == null) return undefined; + if (typeof value !== "string") { + throw new Error("correlationId must be a non-empty string"); + } + const trimmed = value.trim(); + if (!trimmed) { + throw new Error("correlationId must be a non-empty string"); + } + return trimmed; +} + +/** + * @param {unknown} value + * @returns {string} + */ +function normalizeNamespace(value) { + if (typeof value !== "string") { + throw new Error("namespace must be a non-empty string"); + } + const trimmed = value.trim(); + if (!trimmed) { + throw new Error("namespace must be a non-empty string"); + } + if (trimmed === "/") { + return "/"; + } + return trimmed.startsWith("/") ? trimmed : `/${trimmed}`; +} + +/** + * Generate a correlation identifier using UUIDv4 semantics. + * + * @param {string} [prefix] + * @returns {string} + */ +export function createCorrelationId(prefix) { + const uuid = generateUuid(); + if (typeof prefix !== "string" || prefix.trim().length === 0) { + return uuid; + } + + const normalizedPrefix = prefix.trim(); + const suffix = normalizedPrefix.endsWith("-") ? "" : "-"; + return `${normalizedPrefix}${suffix}${uuid}`; +} + +/** + * @typedef {Object} NormalizedProducerOptions + * @property {string[]=} includeHandlers + * @property {string[]=} excludeHandlers + * @property {string[]=} excludeSids + * @property {string=} correlationId + */ + +/** + * Normalise producer options used for emit/request/broadcast helpers. + * + * @param {Record | undefined} options + * @returns {NormalizedProducerOptions} + */ +export function normalizeProducerOptions(options) { + if (options == null) return {}; + const source = assertPlainObject(options, "options"); + + const unknownKeys = Object.keys(source).filter((key) => !_OPTION_KEYS.has(key)); + if (unknownKeys.length > 0) { + throw new Error(`Unsupported producer option(s): ${unknownKeys.join(", ")}`); + } + + const normalized = {}; + + const includeHandlers = normalizeStringList(source.includeHandlers, "includeHandlers"); + if (includeHandlers) { + normalized.includeHandlers = includeHandlers; + } + + const excludeHandlers = normalizeStringList( + source.excludeHandlers, + "excludeHandlers", + { allowEmpty: true }, + ); + if (excludeHandlers && excludeHandlers.length > 0) { + normalized.excludeHandlers = excludeHandlers; + } + + const excludeSids = normalizeSidList(source.excludeSids); + if (excludeSids && excludeSids.length > 0) { + normalized.excludeSids = excludeSids; + } + + const correlationId = normalizeCorrelationId(source.correlationId); + if (correlationId) { + normalized.correlationId = correlationId; + } + + if (normalized.includeHandlers && normalized.excludeHandlers) { + throw new Error("includeHandlers and excludeHandlers cannot be used together"); + } + + return normalized; +} + +/** + * @typedef {Object} ServerDeliveryEnvelope + * @property {string} handlerId + * @property {string} eventId + * @property {string} correlationId + * @property {string} ts + * @property {Record} data + */ + +/** + * Validate a server-sent delivery envelope before invoking subscribers. + * + * @param {unknown} envelope + * @returns {ServerDeliveryEnvelope} + */ +export function validateServerEnvelope(envelope) { + const value = assertPlainObject(envelope, "envelope"); + + const handlerId = normalizeCorrelationId(value.handlerId)?.trim(); + if (!handlerId) { + throw new Error("Server envelope missing handlerId"); + } + + const eventId = normalizeCorrelationId(value.eventId)?.trim(); + if (!eventId) { + throw new Error("Server envelope missing eventId"); + } + + const correlationId = normalizeCorrelationId(value.correlationId); + if (!correlationId) { + throw new Error("Server envelope missing correlationId"); + } + + if (typeof value.ts !== "string" || value.ts.trim().length === 0) { + throw new Error("Server envelope missing timestamp"); + } + const timestamp = value.ts.trim(); + if (Number.isNaN(Date.parse(timestamp))) { + throw new Error("Server envelope timestamp is invalid"); + } + + let data = value.data; + if (data == null) { + data = {}; + } else if (typeof data !== "object" || Array.isArray(data)) { + throw new Error("Server envelope data must be a plain object"); + } + + const normalized = { + handlerId, + eventId, + correlationId, + ts: timestamp, + data: Object.freeze({ ...data }), + }; + + return Object.freeze(normalized); +} + +class WebSocketClient { + constructor(namespace = "/") { + this.namespace = normalizeNamespace(namespace); + this.socket = null; + this.connected = false; + this.connecting = false; + this.connectPromise = null; + this.subscriptions = new Map(); // eventType -> { handler, callbacks: Set } + this.connectCallbacks = new Set(); + this.disconnectCallbacks = new Set(); + this.errorCallbacks = new Set(); + this.isDevelopment = Boolean(window.runtimeInfo?.isDevelopment); + this._manualDisconnect = false; + this._hasConnectedOnce = false; + this._lastRuntimeId = null; + this._csrfInvalidatedForConnectError = false; + this._connectErrorRetryTimer = null; + this._connectErrorRetryAttempt = 0; + } + + _clearConnectErrorRetryTimer() { + if (this._connectErrorRetryTimer) { + clearTimeout(this._connectErrorRetryTimer); + this._connectErrorRetryTimer = null; + } + } + + _scheduleConnectErrorRetry(reason) { + if (this._manualDisconnect) return; + if (this.connected) return; + if (!this.socket) return; + if (this.socket.connected) return; + if (this._connectErrorRetryTimer) return; + + const attempt = Math.max(0, Number(this._connectErrorRetryAttempt) || 0); + const baseMs = 250; + const capMs = 10000; + const delayMs = Math.min(capMs, baseMs * 2 ** attempt); + this._connectErrorRetryAttempt = attempt + 1; + + this.debugLog("schedule connect retry", { reason, attempt, delayMs }); + this._connectErrorRetryTimer = setTimeout(() => { + this._connectErrorRetryTimer = null; + if (this._manualDisconnect) return; + if (this.connected) return; + this.connect().catch(() => {}); + }, delayMs); + } + + buildPayload(data) { + const ts = new Date().toISOString(); + if (data == null) { + return { ts, data: {} }; + } + if (typeof data !== "object" || Array.isArray(data)) { + throw new Error("WebSocket payload must be a plain object"); + } + return { ts, data: { ...data } }; + } + + applyProducerOptions(payload, normalizedOptions, allowances) { + const result = payload; + + if (normalizedOptions.includeHandlers) { + if (!allowances.includeHandlers) { + throw new Error("This operation does not support includeHandlers"); + } + result.includeHandlers = [...normalizedOptions.includeHandlers]; + } + + if (normalizedOptions.excludeHandlers) { + if (!allowances.excludeHandlers) { + throw new Error("This operation does not support excludeHandlers"); + } + result.excludeHandlers = [...normalizedOptions.excludeHandlers]; + } + + if (normalizedOptions.excludeSids) { + if (!allowances.excludeSids) { + throw new Error("This operation does not support excludeSids"); + } + result.excludeSids = [...normalizedOptions.excludeSids]; + } + + if (normalizedOptions.correlationId) { + result.correlationId = normalizedOptions.correlationId; + } + + return result; + } + + setDevelopmentFlag(value) { + const normalized = Boolean(value); + this.isDevelopment = normalized; + window.runtimeInfo = { ...(window.runtimeInfo || {}), isDevelopment: normalized }; + } + + debugLog(...args) { + if (this.isDevelopment) { + console.debug(`[websocket:${this.namespace}]`, ...args); + } + } + + async connect() { + if (this.connected) return; + if (this.connectPromise) return this.connectPromise; + + this._manualDisconnect = false; + this.connecting = true; + this.connectPromise = (async () => { + if (!this.socket) { + this.initializeSocket(); + } + + if (this.socket.connected) return; + + // Ensure the current runtime-bound session + CSRF cookies exist before initiating + // the Engine.IO handshake. This is required for seamless reconnect after backend + // restarts that rotate runtime_id and session cookie names. + await getCsrfToken(); + + await new Promise((resolve, reject) => { + const onConnect = () => { + this.socket.off("connect_error", onError); + resolve(); + }; + const onError = (error) => { + this.socket.off("connect", onConnect); + reject(error instanceof Error ? error : new Error(String(error))); + }; + + this.socket.once("connect", onConnect); + this.socket.once("connect_error", onError); + this.socket.connect(); + }); + })() + .catch((error) => { + throw new Error(`WebSocket connection failed: ${error.message || error}`); + }) + .finally(() => { + this.connecting = false; + this.connectPromise = null; + }); + + return this.connectPromise; + } + + async disconnect() { + if (!this.socket) return; + this._manualDisconnect = true; + this.socket.disconnect(); + this.connected = false; + } + + isConnected() { + return this.connected; + } + + async emit(eventType, data, options = {}) { + const correlationId = + normalizeCorrelationId(options?.correlationId) || createCorrelationId("emit"); + const payload = this.buildPayload(data); + payload.correlationId = correlationId; + + this.debugLog("emit", { + eventType, + correlationId, + }); + this.ensurePayloadSize(payload); + await this.connect(); + if (!this.isConnected()) { + throw new Error("Not connected"); + } + this.socket.emit(eventType, payload); + } + + async request(eventType, data, options = {}) { + const correlationId = + normalizeCorrelationId(options?.correlationId) || + createCorrelationId("request"); + const payload = this.buildPayload(data); + payload.correlationId = correlationId; + + const timeoutMs = Number(options.timeoutMs ?? DEFAULT_TIMEOUT_MS); + this.debugLog("request", { eventType, correlationId, timeoutMs }); + if (!Number.isFinite(timeoutMs) || timeoutMs < 0) { + throw new Error("timeoutMs must be a non-negative number"); + } + this.ensurePayloadSize(payload); + await this.connect(); + if (!this.isConnected()) { + throw new Error("Not connected"); + } + + return new Promise((resolve, reject) => { + if (timeoutMs > 0) { + this.socket + .timeout(timeoutMs) + .emit(eventType, payload, (err, response) => { + if (err) { + reject(new Error("Request timeout")); + return; + } + resolve(this.normalizeRequestResponse(response)); + }); + return; + } + + this.socket.emit(eventType, payload, (response) => { + resolve(this.normalizeRequestResponse(response)); + }); + }); + } + + normalizeRequestResponse(response) { + if (!response || typeof response !== "object") { + return { correlationId: null, results: [] }; + } + const correlationId = + typeof response.correlationId === "string" && response.correlationId.trim().length > 0 + ? response.correlationId.trim() + : null; + const results = Array.isArray(response.results) ? response.results : []; + return { correlationId, results }; + } + + async on(eventType, callback) { + if (typeof callback !== "function") { + throw new Error("Callback must be a function"); + } + + await this.connect(); + + if (!this.subscriptions.has(eventType)) { + const handler = (payload) => { + const entry = this.subscriptions.get(eventType); + if (!entry) return; + let envelope; + try { + envelope = validateServerEnvelope(payload); + } catch (error) { + console.error("WebSocket envelope validation failed:", error); + this.invokeErrorCallbacks(error); + return; + } + + entry.callbacks.forEach((cb) => { + try { + cb(envelope); + } catch (error) { + console.error("WebSocket callback error:", error); + } + }); + }; + + this.subscriptions.set(eventType, { + handler, + callbacks: new Set(), + }); + + this.socket.on(eventType, handler); + } + + const entry = this.subscriptions.get(eventType); + entry.callbacks.add(callback); + } + + off(eventType, callback) { + const entry = this.subscriptions.get(eventType); + if (!entry) return; + + if (callback) { + entry.callbacks.delete(callback); + } else { + entry.callbacks.clear(); + } + + if (entry.callbacks.size === 0) { + if (this.socket) { + this.socket.off(eventType, entry.handler); + } + this.subscriptions.delete(eventType); + } + } + + onConnect(callback) { + if (typeof callback === "function") { + this.connectCallbacks.add(callback); + } + } + + onDisconnect(callback) { + if (typeof callback === "function") { + this.disconnectCallbacks.add(callback); + } + } + + onError(callback) { + if (typeof callback === "function") { + this.errorCallbacks.add(callback); + } + } + + initializeSocket() { + this.socket = io(this.namespace, { + autoConnect: false, + reconnection: true, + transports: ["websocket", "polling"], + withCredentials: true, + auth: (cb) => { + getCsrfToken() + .then((token) => cb({ csrf_token: token })) + .catch((error) => { + console.error("[websocket] failed to fetch CSRF token for connect", error); + cb({}); + }); + }, + }); + + this.socket.on("connect", () => { + this.connected = true; + this._csrfInvalidatedForConnectError = false; + this._connectErrorRetryAttempt = 0; + this._clearConnectErrorRetryTimer(); + + const runtimeId = window.runtimeInfo?.id || null; + const runtimeChanged = Boolean( + this._lastRuntimeId && + runtimeId && + this._lastRuntimeId !== runtimeId + ); + const firstConnect = !this._hasConnectedOnce; + this._hasConnectedOnce = true; + this._lastRuntimeId = runtimeId; + + this.debugLog("socket connected", { + sid: this.socket.id, + runtimeId, + runtimeChanged, + firstConnect, + }); + this.connectCallbacks.forEach((cb) => { + try { + cb({ runtimeId, runtimeChanged, firstConnect }); + } catch (error) { + console.error("WebSocket onConnect callback error:", error); + } + }); + }); + + this.socket.on("disconnect", (reason) => { + this.connected = false; + this.debugLog("socket disconnected", { reason }); + this.disconnectCallbacks.forEach((cb) => { + try { + cb(reason); + } catch (error) { + console.error("WebSocket onDisconnect callback error:", error); + } + }); + }); + + this.socket.on("connect_error", (error) => { + this.debugLog("socket connect_error", error); + this.invokeErrorCallbacks(error); + if (!this._csrfInvalidatedForConnectError) { + this._csrfInvalidatedForConnectError = true; + invalidateCsrfToken(); + } + this._scheduleConnectErrorRetry("connect_error"); + }); + + this.socket.on("error", (error) => { + this.debugLog("socket error", error); + this.invokeErrorCallbacks(error); + }); + } + + invokeErrorCallbacks(error) { + this.errorCallbacks.forEach((cb) => { + try { + cb(error); + } catch (err) { + console.error("WebSocket onError callback error:", err); + } + }); + } + + ensurePayloadSize(data) { + const size = this.calculatePayloadSize(data); + if (size > MAX_PAYLOAD_BYTES) { + throw new Error("Payload too large"); + } + } + + calculatePayloadSize(data) { + try { + return new TextEncoder().encode(JSON.stringify(data ?? null)).length; + } catch (_error) { + // Fallback: rough estimate if stringify fails + const stringified = String(data); + return stringified.length * 2; + } + } +} + +const _namespacedClients = new Map(); + +/** + * Create a new Socket.IO client bound to a specific namespace. + * + * @param {string} namespace + * @returns {WebSocketClient} + */ +export function createNamespacedClient(namespace) { + return new WebSocketClient(namespace); +} + +/** + * Return a cached Socket.IO client for the given namespace (one per browser tab/window). + * + * @param {string} namespace + * @returns {WebSocketClient} + */ +export function getNamespacedClient(namespace) { + const key = normalizeNamespace(namespace); + const existing = _namespacedClients.get(key); + if (existing) return existing; + const client = new WebSocketClient(key); + _namespacedClients.set(key, client); + return client; +} + +export const websocket = getNamespacedClient("/"); diff --git a/webui/public/dev_testing.svg b/webui/public/dev_testing.svg new file mode 100644 index 0000000000..a5efa45f5a --- /dev/null +++ b/webui/public/dev_testing.svg @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/webui/vendor/ace-min/ace.min.css b/webui/vendor/ace-min/ace.min.css new file mode 100644 index 0000000000..b81ecf41a2 --- /dev/null +++ b/webui/vendor/ace-min/ace.min.css @@ -0,0 +1,8 @@ +/** + * Minified by jsDelivr using clean-css v5.3.2. + * Original file: /npm/ace-builds@1.36.5/css/ace.css + * + * Do NOT use SRI with dynamically generated files! More information: https://www.jsdelivr.com/using-sri-with-dynamic-files + */ +.ace_editor>.ace_sb-h div,.ace_editor>.ace_sb-v div{position:absolute;background:rgba(128,128,128,.6);-moz-box-sizing:border-box;box-sizing:border-box;border:1px solid #bbb;border-radius:2px;z-index:8}.ace_editor>.ace_sb-h,.ace_editor>.ace_sb-v{position:absolute;z-index:6;background:0 0;overflow:hidden!important}.ace_editor>.ace_sb-v{z-index:6;right:0;top:0;width:12px}.ace_editor>.ace_sb-v div{z-index:8;right:0;width:100%}.ace_editor>.ace_sb-h{bottom:0;left:0;height:12px}.ace_editor>.ace_sb-h div{bottom:0;height:100%}.ace_editor>.ace_sb_grabbed{z-index:8;background:#000}.ace_br1{border-top-left-radius:3px}.ace_br2{border-top-right-radius:3px}.ace_br3{border-top-left-radius:3px;border-top-right-radius:3px}.ace_br4{border-bottom-right-radius:3px}.ace_br5{border-top-left-radius:3px;border-bottom-right-radius:3px}.ace_br6{border-top-right-radius:3px;border-bottom-right-radius:3px}.ace_br7{border-top-left-radius:3px;border-top-right-radius:3px;border-bottom-right-radius:3px}.ace_br8{border-bottom-left-radius:3px}.ace_br9{border-top-left-radius:3px;border-bottom-left-radius:3px}.ace_br10{border-top-right-radius:3px;border-bottom-left-radius:3px}.ace_br11{border-top-left-radius:3px;border-top-right-radius:3px;border-bottom-left-radius:3px}.ace_br12{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.ace_br13{border-top-left-radius:3px;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.ace_br14{border-top-right-radius:3px;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.ace_br15{border-top-left-radius:3px;border-top-right-radius:3px;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.ace_editor{position:relative;overflow:hidden;padding:0;font:12px/normal Monaco,Menlo,'Ubuntu Mono',Consolas,'Source Code Pro',source-code-pro,monospace;direction:ltr;text-align:left;-webkit-tap-highlight-color:transparent;forced-color-adjust:none}.ace_scroller{position:absolute;overflow:hidden;top:0;bottom:0;background-color:inherit;-ms-user-select:none;-moz-user-select:none;-webkit-user-select:none;user-select:none;cursor:text}.ace_content{position:absolute;box-sizing:border-box;min-width:100%;contain:style size layout;font-variant-ligatures:no-common-ligatures}.ace_keyboard-focus:focus{box-shadow:inset 0 0 0 2px #5e9ed6;outline:0}.ace_dragging .ace_scroller:before{position:absolute;top:0;left:0;right:0;bottom:0;content:'';background:rgba(250,250,250,.01);z-index:1000}.ace_dragging.ace_dark .ace_scroller:before{background:rgba(0,0,0,.01)}.ace_gutter{position:absolute;overflow:hidden;width:auto;top:0;bottom:0;left:0;cursor:default;z-index:4;-ms-user-select:none;-moz-user-select:none;-webkit-user-select:none;user-select:none;contain:style size layout}.ace_gutter-active-line{position:absolute;left:0;right:0}.ace_scroller.ace_scroll-left:after{content:"";position:absolute;top:0;right:0;bottom:0;left:0;box-shadow:17px 0 16px -16px rgba(0,0,0,.4) inset;pointer-events:none}.ace_gutter-cell,.ace_gutter-cell_svg-icons{position:absolute;top:0;left:0;right:0;padding-left:19px;padding-right:6px;background-repeat:no-repeat}.ace_gutter-cell_svg-icons .ace_gutter_annotation{margin-left:-14px;float:left}.ace_gutter-cell .ace_gutter_annotation{margin-left:-19px;float:left}.ace_gutter-cell.ace_error,.ace_gutter-cell.ace_security,.ace_icon.ace_error,.ace_icon.ace_error_fold,.ace_icon.ace_security,.ace_icon.ace_security_fold{background-image:url("main-1.png");background-repeat:no-repeat;background-position:2px center}.ace_gutter-cell.ace_warning,.ace_icon.ace_warning,.ace_icon.ace_warning_fold{background-image:url("main-2.png");background-repeat:no-repeat;background-position:2px center}.ace_gutter-cell.ace_hint,.ace_gutter-cell.ace_info,.ace_icon.ace_hint,.ace_icon.ace_info{background-image:url("main-3.png");background-repeat:no-repeat;background-position:2px center}.ace_dark .ace_gutter-cell.ace_hint,.ace_dark .ace_gutter-cell.ace_info,.ace_dark .ace_icon.ace_hint,.ace_dark .ace_icon.ace_info{background-image:url("main-4.png")}.ace_icon_svg.ace_error{-webkit-mask-image:url("main-5.svg");background-color:#dc143c}.ace_icon_svg.ace_security{-webkit-mask-image:url("main-6.svg");background-color:#dc143c}.ace_icon_svg.ace_warning{-webkit-mask-image:url("main-7.svg");background-color:#ff8c00}.ace_icon_svg.ace_info{-webkit-mask-image:url("main-8.svg");background-color:#4169e1}.ace_icon_svg.ace_hint{-webkit-mask-image:url("main-9.svg");background-color:silver}.ace_icon_svg.ace_error_fold{-webkit-mask-image:url("main-10.svg");background-color:#dc143c}.ace_icon_svg.ace_security_fold{-webkit-mask-image:url("main-11.svg");background-color:#dc143c}.ace_icon_svg.ace_warning_fold{-webkit-mask-image:url("main-12.svg");background-color:#ff8c00}.ace_scrollbar{contain:strict;position:absolute;right:0;bottom:0;z-index:6}.ace_scrollbar-inner{position:absolute;cursor:text;left:0;top:0}.ace_scrollbar-v{overflow-x:hidden;overflow-y:scroll;top:0}.ace_scrollbar-h{overflow-x:scroll;overflow-y:hidden;left:0}.ace_print-margin{position:absolute;height:100%}.ace_text-input{position:absolute;z-index:0;width:.5em;height:1em;opacity:0;background:0 0;-moz-appearance:none;appearance:none;border:none;resize:none;outline:0;overflow:hidden;font:inherit;padding:0 1px;margin:0 -1px;contain:strict;-ms-user-select:text;-moz-user-select:text;-webkit-user-select:text;user-select:text;white-space:pre!important}.ace_text-input.ace_composition{background:0 0;color:inherit;z-index:1000;opacity:1}.ace_composition_placeholder{color:transparent}.ace_composition_marker{border-bottom:1px solid;position:absolute;border-radius:0;margin-top:1px}[ace_nocontext=true]{transform:none!important;filter:none!important;clip-path:none!important;mask:none!important;contain:none!important;perspective:none!important;mix-blend-mode:initial!important;z-index:auto}.ace_layer{z-index:1;position:absolute;overflow:hidden;word-wrap:normal;white-space:pre;height:100%;width:100%;box-sizing:border-box;pointer-events:none}.ace_gutter-layer{position:relative;width:auto;text-align:right;pointer-events:auto;height:1000000px;contain:style size layout}.ace_text-layer{font:inherit!important;position:absolute;height:1000000px;width:1000000px;contain:style size layout}.ace_text-layer>.ace_line,.ace_text-layer>.ace_line_group{contain:style size layout;position:absolute;top:0;left:0;right:0}.ace_hidpi .ace_content,.ace_hidpi .ace_gutter,.ace_hidpi .ace_gutter-layer,.ace_hidpi .ace_text-layer{contain:strict}.ace_hidpi .ace_text-layer>.ace_line,.ace_hidpi .ace_text-layer>.ace_line_group{contain:strict}.ace_cjk{display:inline-block;text-align:center}.ace_cursor-layer{z-index:4}.ace_cursor{z-index:4;position:absolute;box-sizing:border-box;border-left:2px solid;transform:translatez(0)}.ace_multiselect .ace_cursor{border-left-width:1px}.ace_slim-cursors .ace_cursor{border-left-width:1px}.ace_overwrite-cursors .ace_cursor{border-left-width:0;border-bottom:1px solid}.ace_hidden-cursors .ace_cursor{opacity:.2}.ace_hasPlaceholder .ace_hidden-cursors .ace_cursor{opacity:0}.ace_smooth-blinking .ace_cursor{transition:opacity .18s}.ace_animate-blinking .ace_cursor{animation-duration:1s;animation-timing-function:step-end;animation-name:blink-ace-animate;animation-iteration-count:infinite}.ace_animate-blinking.ace_smooth-blinking .ace_cursor{animation-duration:1s;animation-timing-function:ease-in-out;animation-name:blink-ace-animate-smooth}@keyframes blink-ace-animate{from,to{opacity:1}60%{opacity:0}}@keyframes blink-ace-animate-smooth{from,to{opacity:1}45%{opacity:1}60%{opacity:0}85%{opacity:0}}.ace_marker-layer .ace_stack,.ace_marker-layer .ace_step{position:absolute;z-index:3}.ace_marker-layer .ace_selection{position:absolute;z-index:5}.ace_marker-layer .ace_bracket{position:absolute;z-index:6}.ace_marker-layer .ace_error_bracket{position:absolute;border-bottom:1px solid #de5555;border-radius:0}.ace_marker-layer .ace_active-line{position:absolute;z-index:2}.ace_marker-layer .ace_selected-word{position:absolute;z-index:4;box-sizing:border-box}.ace_line .ace_fold{box-sizing:border-box;display:inline-block;height:11px;margin-top:-2px;vertical-align:middle;background-image:url("main-13.png"),url("main-14.png");background-repeat:no-repeat,repeat-x;background-position:center center,top left;color:transparent;border:1px solid #000;border-radius:2px;cursor:pointer;pointer-events:auto}.ace_fold:hover{background-image:url("main-15.png"),url("main-16.png")}.ace_tooltip{background-color:#f5f5f5;border:1px solid gray;border-radius:1px;box-shadow:0 1px 2px rgba(0,0,0,.3);color:#000;max-width:100%;padding:3px 4px;position:fixed;z-index:999999;box-sizing:border-box;cursor:default;white-space:pre-wrap;word-wrap:break-word;line-height:normal;font-style:normal;font-weight:400;letter-spacing:normal;pointer-events:none;overflow:auto;max-width:min(60em,66vw);overscroll-behavior:contain}.ace_tooltip pre{white-space:pre-wrap}.ace_tooltip.ace_dark{background-color:#636363;color:#fff}.ace_tooltip:focus{outline:1px solid #5E9ED6}.ace_icon{display:inline-block;width:18px;vertical-align:top}.ace_icon_svg{display:inline-block;width:12px;vertical-align:top;-webkit-mask-repeat:no-repeat;-webkit-mask-size:12px;-webkit-mask-position:center}.ace_folding-enabled>.ace_gutter-cell,.ace_folding-enabled>.ace_gutter-cell_svg-icons{padding-right:13px}.ace_fold-widget{box-sizing:border-box;margin:0 -12px 0 1px;display:none;width:11px;vertical-align:top;background-image:url("main-17.png");background-repeat:no-repeat;background-position:center;border-radius:3px;border:1px solid transparent;cursor:pointer}.ace_folding-enabled .ace_fold-widget{display:inline-block}.ace_fold-widget.ace_end{background-image:url("main-18.png")}.ace_fold-widget.ace_closed{background-image:url("main-19.png")}.ace_fold-widget:hover{border:1px solid rgba(0,0,0,.3);background-color:rgba(255,255,255,.2);box-shadow:0 1px 1px rgba(255,255,255,.7)}.ace_fold-widget:active{border:1px solid rgba(0,0,0,.4);background-color:rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(255,255,255,.8)}.ace_dark .ace_fold-widget{background-image:url("main-20.png")}.ace_dark .ace_fold-widget.ace_end{background-image:url("main-21.png")}.ace_dark .ace_fold-widget.ace_closed{background-image:url("main-22.png")}.ace_dark .ace_fold-widget:hover{box-shadow:0 1px 1px rgba(255,255,255,.2);background-color:rgba(255,255,255,.1)}.ace_dark .ace_fold-widget:active{box-shadow:0 1px 1px rgba(255,255,255,.2)}.ace_inline_button{border:1px solid #d3d3d3;display:inline-block;margin:-1px 8px;padding:0 5px;pointer-events:auto;cursor:pointer}.ace_inline_button:hover{border-color:gray;background:rgba(200,200,200,.2);display:inline-block;pointer-events:auto}.ace_fold-widget.ace_invalid{background-color:#ffb4b4;border-color:#de5555}.ace_fade-fold-widgets .ace_fold-widget{transition:opacity .4s ease 50ms;opacity:0}.ace_fade-fold-widgets:hover .ace_fold-widget{transition:opacity 50ms ease 50ms;opacity:1}.ace_underline{text-decoration:underline}.ace_bold{font-weight:700}.ace_nobold .ace_bold{font-weight:400}.ace_italic{font-style:italic}.ace_error-marker{background-color:rgba(255,0,0,.2);position:absolute;z-index:9}.ace_highlight-marker{background-color:rgba(255,255,0,.2);position:absolute;z-index:8}.ace_mobile-menu{position:absolute;line-height:1.5;border-radius:4px;-ms-user-select:none;-moz-user-select:none;-webkit-user-select:none;user-select:none;background:#fff;box-shadow:1px 3px 2px grey;border:1px solid #dcdcdc;color:#000}.ace_dark>.ace_mobile-menu{background:#333;color:#ccc;box-shadow:1px 3px 2px grey;border:1px solid #444}.ace_mobile-button{padding:2px;cursor:pointer;overflow:hidden}.ace_mobile-button:hover{background-color:#eee;opacity:1}.ace_mobile-button:active{background-color:#ddd}.ace_placeholder{position:relative;font-family:arial;transform:scale(.9);transform-origin:left;white-space:pre;opacity:.7;margin:0 10px;z-index:1}.ace_ghost_text{opacity:.5;font-style:italic}.ace_ghost_text_container>div{white-space:pre}.ghost_text_line_wrapped::after{content:"↩";position:absolute}.ace_lineWidgetContainer.ace_ghost_text{margin:0 4px}.ace_screenreader-only{position:absolute;left:-10000px;top:auto;width:1px;height:1px;overflow:hidden}.ace_hidden_token{display:none}.ace-tm .ace_gutter{background:#f0f0f0;color:#333}.ace-tm .ace_print-margin{width:1px;background:#e8e8e8}.ace-tm .ace_fold{background-color:#6b72e6}.ace-tm{background-color:#fff;color:#000}.ace-tm .ace_cursor{color:#000}.ace-tm .ace_invisible{color:#bfbfbf}.ace-tm .ace_keyword,.ace-tm .ace_storage{color:#00f}.ace-tm .ace_constant{color:#c5060b}.ace-tm .ace_constant.ace_buildin{color:#5848f6}.ace-tm .ace_constant.ace_language{color:#585cf6}.ace-tm .ace_constant.ace_library{color:#06960e}.ace-tm .ace_invalid{background-color:rgba(255,0,0,.1);color:red}.ace-tm .ace_support.ace_function{color:#3c4c72}.ace-tm .ace_support.ace_constant{color:#06960e}.ace-tm .ace_support.ace_class,.ace-tm .ace_support.ace_type{color:#6d79de}.ace-tm .ace_keyword.ace_operator{color:#687687}.ace-tm .ace_string{color:#036a07}.ace-tm .ace_comment{color:#4c886b}.ace-tm .ace_comment.ace_doc{color:#06f}.ace-tm .ace_comment.ace_doc.ace_tag{color:#809fbf}.ace-tm .ace_constant.ace_numeric{color:#0000cd}.ace-tm .ace_variable{color:#318495}.ace-tm .ace_xml-pe{color:#68685b}.ace-tm .ace_entity.ace_name.ace_function{color:#0000a2}.ace-tm .ace_heading{color:#0c07ff}.ace-tm .ace_list{color:#b90690}.ace-tm .ace_meta.ace_tag{color:#00168e}.ace-tm .ace_string.ace_regex{color:red}.ace-tm .ace_marker-layer .ace_selection{background:#b5d5ff}.ace-tm.ace_multiselect .ace_selection.ace_start{box-shadow:0 0 3px 0 #fff}.ace-tm .ace_marker-layer .ace_step{background:#fcff00}.ace-tm .ace_marker-layer .ace_stack{background:#a4e565}.ace-tm .ace_marker-layer .ace_bracket{margin:-1px 0 0 -1px;border:1px solid silver}.ace-tm .ace_marker-layer .ace_active-line{background:rgba(0,0,0,.07)}.ace-tm .ace_gutter-active-line{background-color:#dcdcdc}.ace-tm .ace_marker-layer .ace_selected-word{background:#fafaff;border:1px solid #c8c8fa}.ace-tm .ace_indent-guide{background:url("main-23.png") right repeat-y}.ace-tm .ace_indent-guide-active{background:url("main-24.png") right repeat-y}.error_widget_wrapper{background:inherit;color:inherit;border:none}.error_widget{border-top:solid 2px;border-bottom:solid 2px;margin:5px 0;padding:10px 40px;white-space:pre-wrap}.error_widget.ace_error,.error_widget_arrow.ace_error{border-color:#ff5a5a}.error_widget.ace_warning,.error_widget_arrow.ace_warning{border-color:#f1d817}.error_widget.ace_info,.error_widget_arrow.ace_info{border-color:#5a5a5a}.error_widget.ace_ok,.error_widget_arrow.ace_ok{border-color:#5aaa5a}.error_widget_arrow{position:absolute;border:solid 5px;border-top-color:transparent!important;border-right-color:transparent!important;border-left-color:transparent!important;top:-5px}.ace_codeLens{position:absolute;color:#aaa;font-size:88%;background:inherit;width:100%;display:flex;align-items:flex-end;pointer-events:none}.ace_codeLens>a{cursor:pointer;pointer-events:auto}.ace_codeLens>a:hover{color:#00f;text-decoration:underline}.ace_dark>.ace_codeLens>a:hover{color:#4e94ce}.ace_tooltip.command_bar_tooltip_wrapper{padding:0}.ace_tooltip .command_bar_tooltip{padding:1px 5px;display:flex;pointer-events:auto}.ace_tooltip .command_bar_tooltip.tooltip_more_options{padding:1px;flex-direction:column}div.command_bar_tooltip_button{display:inline-flex;cursor:pointer;margin:1px;border-radius:2px;padding:2px 5px;align-items:center}div.command_bar_tooltip_button.ace_selected,div.command_bar_tooltip_button:hover:not(.ace_disabled){background-color:rgba(0,0,0,.1)}div.command_bar_tooltip_button.ace_disabled{color:#777;pointer-events:none}div.command_bar_tooltip_button .ace_icon_svg{height:12px;background-color:#000}div.command_bar_tooltip_button.ace_disabled .ace_icon_svg{background-color:#777}.command_bar_tooltip.tooltip_more_options .command_bar_tooltip_button{display:flex}.command_bar_tooltip.command_bar_button_value{display:none}.command_bar_tooltip.tooltip_more_options .command_bar_button_value{display:inline-block;width:12px}.command_bar_button_caption{display:inline-block}.command_bar_keybinding{margin:0 2px;display:inline-block;font-size:8px}.command_bar_tooltip.tooltip_more_options .command_bar_keybinding{margin-left:auto}.command_bar_keybinding div{display:inline-block;min-width:8px;padding:2px;margin:0 1px;border-radius:2px;background-color:#ccc;text-align:center}.ace_dark.ace_tooltip .command_bar_tooltip{background-color:#373737;color:#eee}.ace_dark div.command_bar_tooltip_button.ace_disabled{color:#979797}.ace_dark div.command_bar_tooltip_button.ace_selected,.ace_dark div.command_bar_tooltip_button:hover:not(.ace_disabled){background-color:rgba(255,255,255,.1)}.ace_dark div.command_bar_tooltip_button .ace_icon_svg{background-color:#eee}.ace_dark div.command_bar_tooltip_button.ace_disabled .ace_icon_svg{background-color:#979797}.ace_dark .command_bar_tooltip_button.ace_disabled{color:#979797}.ace_dark .command_bar_keybinding div{background-color:#575757}.ace_checkmark::before{content:'✓'}.ace_snippet-marker{-moz-box-sizing:border-box;box-sizing:border-box;background:rgba(194,193,208,.09);border:1px dotted rgba(211,208,235,.62);position:absolute}.ace_editor.ace_autocomplete .ace_marker-layer .ace_active-line{background-color:#cad6fa;z-index:1}.ace_dark.ace_editor.ace_autocomplete .ace_marker-layer .ace_active-line{background-color:#3a674e}.ace_editor.ace_autocomplete .ace_line-hover{border:1px solid #abbffe;margin-top:-1px;background:rgba(233,233,253,.4);position:absolute;z-index:2}.ace_dark.ace_editor.ace_autocomplete .ace_line-hover{border:1px solid rgba(109,150,13,.8);background:rgba(58,103,78,.62)}.ace_completion-meta{opacity:.5;margin-left:.9em}.ace_completion-message{margin-left:.9em;color:#00f}.ace_editor.ace_autocomplete .ace_completion-highlight{color:#2d69c7}.ace_dark.ace_editor.ace_autocomplete .ace_completion-highlight{color:#93ca12}.ace_editor.ace_autocomplete{width:300px;z-index:200000;border:1px #d3d3d3 solid;position:fixed;box-shadow:2px 3px 5px rgba(0,0,0,.2);line-height:1.4;background:#fefefe;color:#111}.ace_dark.ace_editor.ace_autocomplete{border:1px #484747 solid;box-shadow:2px 3px 5px rgba(0,0,0,.51);line-height:1.4;background:#25282c;color:#c1c1c1}.ace_autocomplete .ace_text-layer{width:calc(100% - 8px)}.ace_autocomplete .ace_line{display:flex;align-items:center}.ace_autocomplete .ace_line>*{min-width:0;flex:0 0 auto}.ace_autocomplete .ace_line .ace_{flex:0 1 auto;overflow:hidden;text-overflow:ellipsis}.ace_autocomplete .ace_completion-spacer{flex:1}.ace_autocomplete.ace_loading:after{content:"";position:absolute;top:0;height:2px;width:8%;background:#00f;z-index:100;animation:ace_progress 3s infinite linear;animation-delay:.3s;transform:translateX(-100%) scaleX(1)}@keyframes ace_progress{0%{transform:translateX(-100%) scaleX(1)}50%{transform:translateX(625%) scaleX(2)}100%{transform:translateX(1500%) scaleX(3)}}@media (prefers-reduced-motion){.ace_autocomplete.ace_loading:after{transform:translateX(625%) scaleX(2);animation:none}}.ace_icon_svg.ace_arrow,.ace_icon_svg.ace_arrow_rotated{-webkit-mask-image:url("main-25.svg")}.ace_icon_svg.ace_arrow_rotated{transform:rotate(180deg)}div.command_bar_tooltip_button.completion_position{padding:0}#ace_settingsmenu,#kbshortcutmenu{background-color:#f7f7f7;color:#000;box-shadow:-5px 4px 5px rgba(126,126,126,.55);padding:1em .5em 2em 1em;overflow:auto;position:absolute;margin:0;bottom:0;right:0;top:0;z-index:9991;cursor:default}.ace_dark #ace_settingsmenu,.ace_dark #kbshortcutmenu{box-shadow:-20px 10px 25px rgba(126,126,126,.25);background-color:rgba(255,255,255,.6);color:#000}.ace_optionsMenuEntry:hover{background-color:rgba(100,100,100,.1);transition:all .3s}.ace_closeButton{background:rgba(245,146,146,.5);border:1px solid #f48a8a;border-radius:50%;padding:7px;position:absolute;right:-8px;top:-8px;z-index:100000}.ace_closeButton{background:rgba(245,146,146,.9)}.ace_optionsMenuKey{color:#483d8b;font-weight:700}.ace_optionsMenuCommand{color:#008b8b;font-weight:400}.ace_optionsMenuEntry button,.ace_optionsMenuEntry input{vertical-align:middle}.ace_optionsMenuEntry button[ace_selected_button=true]{background:#e7e7e7;box-shadow:1px 0 2px 0 #adadad inset;border-color:#adadad}.ace_optionsMenuEntry button{background:#fff;border:1px solid #d3d3d3;margin:0}.ace_optionsMenuEntry button:hover{background:#f0f0f0}.ace_prompt_container{max-width:603px;width:100%;margin:20px auto;padding:3px;background:#fff;border-radius:2px;box-shadow:0 2px 3px 0 #555}.ace_search{background-color:#ddd;color:#666;border:1px solid #cbcbcb;border-top:0 none;overflow:hidden;margin:0;padding:4px 6px 0 4px;position:absolute;top:0;z-index:99;white-space:normal}.ace_search.left{border-left:0 none;border-radius:0 0 5px 0;left:0}.ace_search.right{border-radius:0 0 0 5px;border-right:0 none;right:0}.ace_replace_form,.ace_search_form{margin:0 20px 4px 0;overflow:hidden;line-height:1.9}.ace_replace_form{margin-right:0}.ace_search_form.ace_nomatch{outline:1px solid red}.ace_search_field{border-radius:3px 0 0 3px;background-color:#fff;color:#000;border:1px solid #cbcbcb;border-right:0 none;outline:0;padding:0;font-size:inherit;margin:0;line-height:inherit;padding:0 6px;min-width:17em;vertical-align:top;min-height:1.8em;box-sizing:content-box}.ace_searchbtn{border:1px solid #cbcbcb;line-height:inherit;display:inline-block;padding:0 6px;background:#fff;border-right:0 none;border-left:1px solid #dcdcdc;cursor:pointer;margin:0;position:relative;color:#666}.ace_searchbtn:last-child{border-radius:0 3px 3px 0;border-right:1px solid #cbcbcb}.ace_searchbtn:disabled{background:0 0;cursor:default}.ace_searchbtn:hover{background-color:#eef1f6}.ace_searchbtn.next,.ace_searchbtn.prev{padding:0 .7em}.ace_searchbtn.next:after,.ace_searchbtn.prev:after{content:"";border:solid 2px #888;width:.5em;height:.5em;border-width:2px 0 0 2px;display:inline-block;transform:rotate(-45deg)}.ace_searchbtn.next:after{border-width:0 2px 2px 0}.ace_searchbtn_close{background:url("main-26.png") no-repeat 50% 0;border-radius:50%;border:0 none;color:#656565;cursor:pointer;font:16px/16px Arial;padding:0;height:14px;width:14px;top:9px;right:7px;position:absolute}.ace_searchbtn_close:hover{background-color:#656565;background-position:50% 100%;color:#fff}.ace_button{margin-left:2px;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-o-user-select:none;-ms-user-select:none;user-select:none;overflow:hidden;opacity:.7;border:1px solid rgba(100,100,100,.23);padding:1px;box-sizing:border-box!important;color:#000}.ace_button:hover{background-color:#eee;opacity:1}.ace_button:active{background-color:#ddd}.ace_button.checked{border-color:#39f;opacity:1}.ace_search_options{margin-bottom:3px;text-align:right;-webkit-user-select:none;-moz-user-select:none;-o-user-select:none;-ms-user-select:none;user-select:none;clear:both}.ace_search_counter{float:left;font-family:arial;padding:0 8px}.ace_occur-highlight{border-radius:4px;background-color:rgba(87,255,8,.25);position:absolute;z-index:4;box-sizing:border-box;box-shadow:0 0 4px #5bff32}.ace_dark .ace_occur-highlight{background-color:#508c55;box-shadow:0 0 4px #3c7846}.ace_marker-layer .ace_isearch-result{position:absolute;z-index:6;box-sizing:border-box}div.ace_isearch-result{border-radius:4px;background-color:rgba(255,200,0,.5);box-shadow:0 0 4px #ffc800}.ace_dark div.ace_isearch-result{background-color:#646ea0;box-shadow:0 0 4px #505a8c}.emacs-mode .ace_cursor{border:1px rgba(50,250,50,.8) solid!important;box-sizing:border-box!important;background-color:rgba(0,250,0,.9);opacity:.5}.emacs-mode .ace_hidden-cursors .ace_cursor{opacity:1;background-color:transparent}.emacs-mode .ace_overwrite-cursors .ace_cursor{opacity:1;background-color:transparent;border-width:0 0 2px 2px!important}.emacs-mode .ace_text-layer{z-index:4}.emacs-mode .ace_cursor-layer{z-index:2}.normal-mode .ace_cursor{border:none;background-color:rgba(255,0,0,.5)}.normal-mode .ace_hidden-cursors .ace_cursor{background-color:transparent;border:1px solid red;opacity:.7}.ace_dialog{position:absolute;left:0;right:0;background:inherit;z-index:15;padding:.1em .8em;overflow:hidden;color:inherit}.ace_dialog-top{border-bottom:1px solid #444;top:0}.ace_dialog-bottom{border-top:1px solid #444;bottom:0}.ace_dialog input{border:none;outline:0;background:0 0;width:20em;color:inherit;font-family:monospace} +/*# sourceMappingURL=/sm/51590284bb802d5fb686f3a4c22ec5a8f0eefefb51088aaa9b3816f6c7e618c5.map */ \ No newline at end of file diff --git a/webui/vendor/socket.io.esm.min.js b/webui/vendor/socket.io.esm.min.js new file mode 100644 index 0000000000..53b3941c66 --- /dev/null +++ b/webui/vendor/socket.io.esm.min.js @@ -0,0 +1,7 @@ +/*! + * Socket.IO v4.8.1 + * (c) 2014-2024 Guillermo Rauch + * Released under the MIT License. + */ +const t=Object.create(null);t.open="0",t.close="1",t.ping="2",t.pong="3",t.message="4",t.upgrade="5",t.noop="6";const s=Object.create(null);Object.keys(t).forEach((i=>{s[t[i]]=i}));const i={type:"error",data:"parser error"},e="function"==typeof Blob||"undefined"!=typeof Blob&&"[object BlobConstructor]"===Object.prototype.toString.call(Blob),n="function"==typeof ArrayBuffer,r=t=>"function"==typeof ArrayBuffer.isView?ArrayBuffer.isView(t):t&&t.buffer instanceof ArrayBuffer,o=({type:s,data:i},o,c)=>e&&i instanceof Blob?o?c(i):h(i,c):n&&(i instanceof ArrayBuffer||r(i))?o?c(i):h(new Blob([i]),c):c(t[s]+(i||"")),h=(t,s)=>{const i=new FileReader;return i.onload=function(){const t=i.result.split(",")[1];s("b"+(t||""))},i.readAsDataURL(t)};function c(t){return t instanceof Uint8Array?t:t instanceof ArrayBuffer?new Uint8Array(t):new Uint8Array(t.buffer,t.byteOffset,t.byteLength)}let a;const u="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",f="undefined"==typeof Uint8Array?[]:new Uint8Array(256);for(let t=0;t<64;t++)f[u.charCodeAt(t)]=t;const l="function"==typeof ArrayBuffer,d=(t,e)=>{if("string"!=typeof t)return{type:"message",data:y(t,e)};const n=t.charAt(0);if("b"===n)return{type:"message",data:p(t.substring(1),e)};return s[n]?t.length>1?{type:s[n],data:t.substring(1)}:{type:s[n]}:i},p=(t,s)=>{if(l){const i=(t=>{let s,i,e,n,r,o=.75*t.length,h=t.length,c=0;"="===t[t.length-1]&&(o--,"="===t[t.length-2]&&o--);const a=new ArrayBuffer(o),u=new Uint8Array(a);for(s=0;s>4,u[c++]=(15&e)<<4|n>>2,u[c++]=(3&n)<<6|63&r;return a})(t);return y(i,s)}return{base64:!0,data:t}},y=(t,s)=>"blob"===s?t instanceof Blob?t:new Blob([t]):t instanceof ArrayBuffer?t:t.buffer,b=String.fromCharCode(30);function g(){return new TransformStream({transform(t,s){!function(t,s){e&&t.data instanceof Blob?t.data.arrayBuffer().then(c).then(s):n&&(t.data instanceof ArrayBuffer||r(t.data))?s(c(t.data)):o(t,!1,(t=>{a||(a=new TextEncoder),s(a.encode(t))}))}(t,(i=>{const e=i.length;let n;if(e<126)n=new Uint8Array(1),new DataView(n.buffer).setUint8(0,e);else if(e<65536){n=new Uint8Array(3);const t=new DataView(n.buffer);t.setUint8(0,126),t.setUint16(1,e)}else{n=new Uint8Array(9);const t=new DataView(n.buffer);t.setUint8(0,127),t.setBigUint64(1,BigInt(e))}t.data&&"string"!=typeof t.data&&(n[0]|=128),s.enqueue(n),s.enqueue(i)}))}})}let w;function v(t){return t.reduce(((t,s)=>t+s.length),0)}function m(t,s){if(t[0].length===s)return t.shift();const i=new Uint8Array(s);let e=0;for(let n=0;nPromise.resolve().then(t):(t,s)=>s(t,0),E="undefined"!=typeof self?self:"undefined"!=typeof window?window:Function("return this")();function O(t,...s){return s.reduce(((s,i)=>(t.hasOwnProperty(i)&&(s[i]=t[i]),s)),{})}const _=E.setTimeout,j=E.clearTimeout;function x(t,s){s.useNativeTimers?(t.setTimeoutFn=_.bind(E),t.clearTimeoutFn=j.bind(E)):(t.setTimeoutFn=E.setTimeout.bind(E),t.clearTimeoutFn=E.clearTimeout.bind(E))}function B(){return Date.now().toString(36).substring(3)+Math.random().toString(36).substring(2,5)}class C extends Error{constructor(t,s,i){super(t),this.description=s,this.context=i,this.type="TransportError"}}class T extends k{constructor(t){super(),this.writable=!1,x(this,t),this.opts=t,this.query=t.query,this.socket=t.socket,this.supportsBinary=!t.forceBase64}onError(t,s,i){return super.emitReserved("error",new C(t,s,i)),this}open(){return this.readyState="opening",this.doOpen(),this}close(){return"opening"!==this.readyState&&"open"!==this.readyState||(this.doClose(),this.onClose()),this}send(t){"open"===this.readyState&&this.write(t)}onOpen(){this.readyState="open",this.writable=!0,super.emitReserved("open")}onData(t){const s=d(t,this.socket.binaryType);this.onPacket(s)}onPacket(t){super.emitReserved("packet",t)}onClose(t){this.readyState="closed",super.emitReserved("close",t)}pause(t){}createUri(t,s={}){return t+"://"+this.i()+this.o()+this.opts.path+this.h(s)}i(){const t=this.opts.hostname;return-1===t.indexOf(":")?t:"["+t+"]"}o(){return this.opts.port&&(this.opts.secure&&Number(443!==this.opts.port)||!this.opts.secure&&80!==Number(this.opts.port))?":"+this.opts.port:""}h(t){const s=function(t){let s="";for(let i in t)t.hasOwnProperty(i)&&(s.length&&(s+="&"),s+=encodeURIComponent(i)+"="+encodeURIComponent(t[i]));return s}(t);return s.length?"?"+s:""}}class N extends T{constructor(){super(...arguments),this.u=!1}get name(){return"polling"}doOpen(){this.l()}pause(t){this.readyState="pausing";const s=()=>{this.readyState="paused",t()};if(this.u||!this.writable){let t=0;this.u&&(t++,this.once("pollComplete",(function(){--t||s()}))),this.writable||(t++,this.once("drain",(function(){--t||s()})))}else s()}l(){this.u=!0,this.doPoll(),this.emitReserved("poll")}onData(t){((t,s)=>{const i=t.split(b),e=[];for(let t=0;t{if("opening"===this.readyState&&"open"===t.type&&this.onOpen(),"close"===t.type)return this.onClose({description:"transport closed by the server"}),!1;this.onPacket(t)})),"closed"!==this.readyState&&(this.u=!1,this.emitReserved("pollComplete"),"open"===this.readyState&&this.l())}doClose(){const t=()=>{this.write([{type:"close"}])};"open"===this.readyState?t():this.once("open",t)}write(t){this.writable=!1,((t,s)=>{const i=t.length,e=new Array(i);let n=0;t.forEach(((t,r)=>{o(t,!1,(t=>{e[r]=t,++n===i&&s(e.join(b))}))}))})(t,(t=>{this.doWrite(t,(()=>{this.writable=!0,this.emitReserved("drain")}))}))}uri(){const t=this.opts.secure?"https":"http",s=this.query||{};return!1!==this.opts.timestampRequests&&(s[this.opts.timestampParam]=B()),this.supportsBinary||s.sid||(s.b64=1),this.createUri(t,s)}}let U=!1;try{U="undefined"!=typeof XMLHttpRequest&&"withCredentials"in new XMLHttpRequest}catch(t){}const P=U;function D(){}class M extends N{constructor(t){if(super(t),"undefined"!=typeof location){const s="https:"===location.protocol;let i=location.port;i||(i=s?"443":"80"),this.xd="undefined"!=typeof location&&t.hostname!==location.hostname||i!==t.port}}doWrite(t,s){const i=this.request({method:"POST",data:t});i.on("success",s),i.on("error",((t,s)=>{this.onError("xhr post error",t,s)}))}doPoll(){const t=this.request();t.on("data",this.onData.bind(this)),t.on("error",((t,s)=>{this.onError("xhr poll error",t,s)})),this.pollXhr=t}}class S extends k{constructor(t,s,i){super(),this.createRequest=t,x(this,i),this.p=i,this.v=i.method||"GET",this.m=s,this.k=void 0!==i.data?i.data:null,this.A()}A(){var t;const s=O(this.p,"agent","pfx","key","passphrase","cert","ca","ciphers","rejectUnauthorized","autoUnref");s.xdomain=!!this.p.xd;const i=this.O=this.createRequest(s);try{i.open(this.v,this.m,!0);try{if(this.p.extraHeaders){i.setDisableHeaderCheck&&i.setDisableHeaderCheck(!0);for(let t in this.p.extraHeaders)this.p.extraHeaders.hasOwnProperty(t)&&i.setRequestHeader(t,this.p.extraHeaders[t])}}catch(t){}if("POST"===this.v)try{i.setRequestHeader("Content-type","text/plain;charset=UTF-8")}catch(t){}try{i.setRequestHeader("Accept","*/*")}catch(t){}null===(t=this.p.cookieJar)||void 0===t||t.addCookies(i),"withCredentials"in i&&(i.withCredentials=this.p.withCredentials),this.p.requestTimeout&&(i.timeout=this.p.requestTimeout),i.onreadystatechange=()=>{var t;3===i.readyState&&(null===(t=this.p.cookieJar)||void 0===t||t.parseCookies(i.getResponseHeader("set-cookie"))),4===i.readyState&&(200===i.status||1223===i.status?this._():this.setTimeoutFn((()=>{this.j("number"==typeof i.status?i.status:0)}),0))},i.send(this.k)}catch(t){return void this.setTimeoutFn((()=>{this.j(t)}),0)}"undefined"!=typeof document&&(this.B=S.requestsCount++,S.requests[this.B]=this)}j(t){this.emitReserved("error",t,this.O),this.C(!0)}C(t){if(void 0!==this.O&&null!==this.O){if(this.O.onreadystatechange=D,t)try{this.O.abort()}catch(t){}"undefined"!=typeof document&&delete S.requests[this.B],this.O=null}}_(){const t=this.O.responseText;null!==t&&(this.emitReserved("data",t),this.emitReserved("success"),this.C())}abort(){this.C()}}if(S.requestsCount=0,S.requests={},"undefined"!=typeof document)if("function"==typeof attachEvent)attachEvent("onunload",L);else if("function"==typeof addEventListener){addEventListener("onpagehide"in E?"pagehide":"unload",L,!1)}function L(){for(let t in S.requests)S.requests.hasOwnProperty(t)&&S.requests[t].abort()}const R=function(){const t=F({xdomain:!1});return t&&null!==t.responseType}();class I extends M{constructor(t){super(t);const s=t&&t.forceBase64;this.supportsBinary=R&&!s}request(t={}){return Object.assign(t,{xd:this.xd},this.opts),new S(F,this.uri(),t)}}function F(t){const s=t.xdomain;try{if("undefined"!=typeof XMLHttpRequest&&(!s||P))return new XMLHttpRequest}catch(t){}if(!s)try{return new(E[["Active"].concat("Object").join("X")])("Microsoft.XMLHTTP")}catch(t){}}const $="undefined"!=typeof navigator&&"string"==typeof navigator.product&&"reactnative"===navigator.product.toLowerCase();class V extends T{get name(){return"websocket"}doOpen(){const t=this.uri(),s=this.opts.protocols,i=$?{}:O(this.opts,"agent","perMessageDeflate","pfx","key","passphrase","cert","ca","ciphers","rejectUnauthorized","localAddress","protocolVersion","origin","maxPayload","family","checkServerIdentity");this.opts.extraHeaders&&(i.headers=this.opts.extraHeaders);try{this.ws=this.createSocket(t,s,i)}catch(t){return this.emitReserved("error",t)}this.ws.binaryType=this.socket.binaryType,this.addEventListeners()}addEventListeners(){this.ws.onopen=()=>{this.opts.autoUnref&&this.ws.T.unref(),this.onOpen()},this.ws.onclose=t=>this.onClose({description:"websocket connection closed",context:t}),this.ws.onmessage=t=>this.onData(t.data),this.ws.onerror=t=>this.onError("websocket error",t)}write(t){this.writable=!1;for(let s=0;s{try{this.doWrite(i,t)}catch(t){}e&&A((()=>{this.writable=!0,this.emitReserved("drain")}),this.setTimeoutFn)}))}}doClose(){void 0!==this.ws&&(this.ws.onerror=()=>{},this.ws.close(),this.ws=null)}uri(){const t=this.opts.secure?"wss":"ws",s=this.query||{};return this.opts.timestampRequests&&(s[this.opts.timestampParam]=B()),this.supportsBinary||(s.b64=1),this.createUri(t,s)}}const H=E.WebSocket||E.MozWebSocket;class W extends V{createSocket(t,s,i){return $?new H(t,s,i):s?new H(t,s):new H(t)}doWrite(t,s){this.ws.send(s)}}class q extends T{get name(){return"webtransport"}doOpen(){try{this.N=new WebTransport(this.createUri("https"),this.opts.transportOptions[this.name])}catch(t){return this.emitReserved("error",t)}this.N.closed.then((()=>{this.onClose()})).catch((t=>{this.onError("webtransport error",t)})),this.N.ready.then((()=>{this.N.createBidirectionalStream().then((t=>{const s=function(t,s){w||(w=new TextDecoder);const e=[];let n=0,r=-1,o=!1;return new TransformStream({transform(h,c){for(e.push(h);;){if(0===n){if(v(e)<1)break;const t=m(e,1);o=!(128&~t[0]),r=127&t[0],n=r<126?3:126===r?1:2}else if(1===n){if(v(e)<2)break;const t=m(e,2);r=new DataView(t.buffer,t.byteOffset,t.length).getUint16(0),n=3}else if(2===n){if(v(e)<8)break;const t=m(e,8),s=new DataView(t.buffer,t.byteOffset,t.length),o=s.getUint32(0);if(o>Math.pow(2,21)-1){c.enqueue(i);break}r=o*Math.pow(2,32)+s.getUint32(4),n=3}else{if(v(e)t){c.enqueue(i);break}}}})}(Number.MAX_SAFE_INTEGER,this.socket.binaryType),e=t.readable.pipeThrough(s).getReader(),n=g();n.readable.pipeTo(t.writable),this.U=n.writable.getWriter();const r=()=>{e.read().then((({done:t,value:s})=>{t||(this.onPacket(s),r())})).catch((t=>{}))};r();const o={type:"open"};this.query.sid&&(o.data=`{"sid":"${this.query.sid}"}`),this.U.write(o).then((()=>this.onOpen()))}))}))}write(t){this.writable=!1;for(let s=0;s{e&&A((()=>{this.writable=!0,this.emitReserved("drain")}),this.setTimeoutFn)}))}}doClose(){var t;null===(t=this.N)||void 0===t||t.close()}}const X={websocket:W,webtransport:q,polling:I},z=/^(?:(?![^:@\/?#]+:[^:@\/]*@)(http|https|ws|wss):\/\/)?((?:(([^:@\/?#]*)(?::([^:@\/?#]*))?)?@)?((?:[a-f0-9]{0,4}:){2,7}[a-f0-9]{0,4}|[^:\/?#]*)(?::(\d*))?)(((\/(?:[^?#](?![^?#\/]*\.[^?#\/.]+(?:[?#]|$)))*\/?)?([^?#\/]*))(?:\?([^#]*))?(?:#(.*))?)/,J=["source","protocol","authority","userInfo","user","password","host","port","relative","path","directory","file","query","anchor"];function Q(t){if(t.length>8e3)throw"URI too long";const s=t,i=t.indexOf("["),e=t.indexOf("]");-1!=i&&-1!=e&&(t=t.substring(0,i)+t.substring(i,e).replace(/:/g,";")+t.substring(e,t.length));let n=z.exec(t||""),r={},o=14;for(;o--;)r[J[o]]=n[o]||"";return-1!=i&&-1!=e&&(r.source=s,r.host=r.host.substring(1,r.host.length-1).replace(/;/g,":"),r.authority=r.authority.replace("[","").replace("]","").replace(/;/g,":"),r.ipv6uri=!0),r.pathNames=function(t,s){const i=/\/{2,9}/g,e=s.replace(i,"/").split("/");"/"!=s.slice(0,1)&&0!==s.length||e.splice(0,1);"/"==s.slice(-1)&&e.splice(e.length-1,1);return e}(0,r.path),r.queryKey=function(t,s){const i={};return s.replace(/(?:^|&)([^&=]*)=?([^&]*)/g,(function(t,s,e){s&&(i[s]=e)})),i}(0,r.query),r}const G="function"==typeof addEventListener&&"function"==typeof removeEventListener,K=[];G&&addEventListener("offline",(()=>{K.forEach((t=>t()))}),!1);class Y extends k{constructor(t,s){if(super(),this.binaryType="arraybuffer",this.writeBuffer=[],this.P=0,this.D=-1,this.M=-1,this.S=-1,this.L=1/0,t&&"object"==typeof t&&(s=t,t=null),t){const i=Q(t);s.hostname=i.host,s.secure="https"===i.protocol||"wss"===i.protocol,s.port=i.port,i.query&&(s.query=i.query)}else s.host&&(s.hostname=Q(s.host).host);x(this,s),this.secure=null!=s.secure?s.secure:"undefined"!=typeof location&&"https:"===location.protocol,s.hostname&&!s.port&&(s.port=this.secure?"443":"80"),this.hostname=s.hostname||("undefined"!=typeof location?location.hostname:"localhost"),this.port=s.port||("undefined"!=typeof location&&location.port?location.port:this.secure?"443":"80"),this.transports=[],this.R={},s.transports.forEach((t=>{const s=t.prototype.name;this.transports.push(s),this.R[s]=t})),this.opts=Object.assign({path:"/engine.io",agent:!1,withCredentials:!1,upgrade:!0,timestampParam:"t",rememberUpgrade:!1,addTrailingSlash:!0,rejectUnauthorized:!0,perMessageDeflate:{threshold:1024},transportOptions:{},closeOnBeforeunload:!1},s),this.opts.path=this.opts.path.replace(/\/$/,"")+(this.opts.addTrailingSlash?"/":""),"string"==typeof this.opts.query&&(this.opts.query=function(t){let s={},i=t.split("&");for(let t=0,e=i.length;t{this.transport&&(this.transport.removeAllListeners(),this.transport.close())},addEventListener("beforeunload",this.I,!1)),"localhost"!==this.hostname&&(this.F=()=>{this.$("transport close",{description:"network connection lost"})},K.push(this.F))),this.opts.withCredentials&&(this.V=void 0),this.H()}createTransport(t){const s=Object.assign({},this.opts.query);s.EIO=4,s.transport=t,this.id&&(s.sid=this.id);const i=Object.assign({},this.opts,{query:s,socket:this,hostname:this.hostname,secure:this.secure,port:this.port},this.opts.transportOptions[t]);return new this.R[t](i)}H(){if(0===this.transports.length)return void this.setTimeoutFn((()=>{this.emitReserved("error","No transports available")}),0);const t=this.opts.rememberUpgrade&&Y.priorWebsocketSuccess&&-1!==this.transports.indexOf("websocket")?"websocket":this.transports[0];this.readyState="opening";const s=this.createTransport(t);s.open(),this.setTransport(s)}setTransport(t){this.transport&&this.transport.removeAllListeners(),this.transport=t,t.on("drain",this.W.bind(this)).on("packet",this.q.bind(this)).on("error",this.j.bind(this)).on("close",(t=>this.$("transport close",t)))}onOpen(){this.readyState="open",Y.priorWebsocketSuccess="websocket"===this.transport.name,this.emitReserved("open"),this.flush()}q(t){if("opening"===this.readyState||"open"===this.readyState||"closing"===this.readyState)switch(this.emitReserved("packet",t),this.emitReserved("heartbeat"),t.type){case"open":this.onHandshake(JSON.parse(t.data));break;case"ping":this.X("pong"),this.emitReserved("ping"),this.emitReserved("pong"),this.J();break;case"error":const s=new Error("server error");s.code=t.data,this.j(s);break;case"message":this.emitReserved("data",t.data),this.emitReserved("message",t.data)}}onHandshake(t){this.emitReserved("handshake",t),this.id=t.sid,this.transport.query.sid=t.sid,this.D=t.pingInterval,this.M=t.pingTimeout,this.S=t.maxPayload,this.onOpen(),"closed"!==this.readyState&&this.J()}J(){this.clearTimeoutFn(this.G);const t=this.D+this.M;this.L=Date.now()+t,this.G=this.setTimeoutFn((()=>{this.$("ping timeout")}),t),this.opts.autoUnref&&this.G.unref()}W(){this.writeBuffer.splice(0,this.P),this.P=0,0===this.writeBuffer.length?this.emitReserved("drain"):this.flush()}flush(){if("closed"!==this.readyState&&this.transport.writable&&!this.upgrading&&this.writeBuffer.length){const t=this.K();this.transport.send(t),this.P=t.length,this.emitReserved("flush")}}K(){if(!(this.S&&"polling"===this.transport.name&&this.writeBuffer.length>1))return this.writeBuffer;let t=1;for(let i=0;i=57344?i+=3:(e++,i+=4);return i}(s):Math.ceil(1.33*(s.byteLength||s.size))),i>0&&t>this.S)return this.writeBuffer.slice(0,i);t+=2}var s;return this.writeBuffer}Y(){if(!this.L)return!0;const t=Date.now()>this.L;return t&&(this.L=0,A((()=>{this.$("ping timeout")}),this.setTimeoutFn)),t}write(t,s,i){return this.X("message",t,s,i),this}send(t,s,i){return this.X("message",t,s,i),this}X(t,s,i,e){if("function"==typeof s&&(e=s,s=void 0),"function"==typeof i&&(e=i,i=null),"closing"===this.readyState||"closed"===this.readyState)return;(i=i||{}).compress=!1!==i.compress;const n={type:t,data:s,options:i};this.emitReserved("packetCreate",n),this.writeBuffer.push(n),e&&this.once("flush",e),this.flush()}close(){const t=()=>{this.$("forced close"),this.transport.close()},s=()=>{this.off("upgrade",s),this.off("upgradeError",s),t()},i=()=>{this.once("upgrade",s),this.once("upgradeError",s)};return"opening"!==this.readyState&&"open"!==this.readyState||(this.readyState="closing",this.writeBuffer.length?this.once("drain",(()=>{this.upgrading?i():t()})):this.upgrading?i():t()),this}j(t){if(Y.priorWebsocketSuccess=!1,this.opts.tryAllTransports&&this.transports.length>1&&"opening"===this.readyState)return this.transports.shift(),this.H();this.emitReserved("error",t),this.$("transport error",t)}$(t,s){if("opening"===this.readyState||"open"===this.readyState||"closing"===this.readyState){if(this.clearTimeoutFn(this.G),this.transport.removeAllListeners("close"),this.transport.close(),this.transport.removeAllListeners(),G&&(this.I&&removeEventListener("beforeunload",this.I,!1),this.F)){const t=K.indexOf(this.F);-1!==t&&K.splice(t,1)}this.readyState="closed",this.id=null,this.emitReserved("close",t,s),this.writeBuffer=[],this.P=0}}}Y.protocol=4;class Z extends Y{constructor(){super(...arguments),this.Z=[]}onOpen(){if(super.onOpen(),"open"===this.readyState&&this.opts.upgrade)for(let t=0;t{i||(s.send([{type:"ping",data:"probe"}]),s.once("packet",(t=>{if(!i)if("pong"===t.type&&"probe"===t.data){if(this.upgrading=!0,this.emitReserved("upgrading",s),!s)return;Y.priorWebsocketSuccess="websocket"===s.name,this.transport.pause((()=>{i||"closed"!==this.readyState&&(a(),this.setTransport(s),s.send([{type:"upgrade"}]),this.emitReserved("upgrade",s),s=null,this.upgrading=!1,this.flush())}))}else{const t=new Error("probe error");t.transport=s.name,this.emitReserved("upgradeError",t)}})))};function n(){i||(i=!0,a(),s.close(),s=null)}const r=t=>{const i=new Error("probe error: "+t);i.transport=s.name,n(),this.emitReserved("upgradeError",i)};function o(){r("transport closed")}function h(){r("socket closed")}function c(t){s&&t.name!==s.name&&n()}const a=()=>{s.removeListener("open",e),s.removeListener("error",r),s.removeListener("close",o),this.off("close",h),this.off("upgrading",c)};s.once("open",e),s.once("error",r),s.once("close",o),this.once("close",h),this.once("upgrading",c),-1!==this.Z.indexOf("webtransport")&&"webtransport"!==t?this.setTimeoutFn((()=>{i||s.open()}),200):s.open()}onHandshake(t){this.Z=this.st(t.upgrades),super.onHandshake(t)}st(t){const s=[];for(let i=0;iX[t])).filter((t=>!!t))),super(t,i)}}class st extends N{doPoll(){this.it().then((t=>{if(!t.ok)return this.onError("fetch read error",t.status,t);t.text().then((t=>this.onData(t)))})).catch((t=>{this.onError("fetch read error",t)}))}doWrite(t,s){this.it(t).then((t=>{if(!t.ok)return this.onError("fetch write error",t.status,t);s()})).catch((t=>{this.onError("fetch write error",t)}))}it(t){var s;const i=void 0!==t,e=new Headers(this.opts.extraHeaders);return i&&e.set("content-type","text/plain;charset=UTF-8"),null===(s=this.socket.V)||void 0===s||s.appendCookies(e),fetch(this.uri(),{method:i?"POST":"GET",body:i?t:null,headers:e,credentials:this.opts.withCredentials?"include":"omit"}).then((t=>{var s;return null===(s=this.socket.V)||void 0===s||s.parseCookies(t.headers.getSetCookie()),t}))}}const it="function"==typeof ArrayBuffer,et=t=>"function"==typeof ArrayBuffer.isView?ArrayBuffer.isView(t):t.buffer instanceof ArrayBuffer,nt=Object.prototype.toString,rt="function"==typeof Blob||"undefined"!=typeof Blob&&"[object BlobConstructor]"===nt.call(Blob),ot="function"==typeof File||"undefined"!=typeof File&&"[object FileConstructor]"===nt.call(File);function ht(t){return it&&(t instanceof ArrayBuffer||et(t))||rt&&t instanceof Blob||ot&&t instanceof File}function ct(t,s){if(!t||"object"!=typeof t)return!1;if(Array.isArray(t)){for(let s=0,i=t.length;s=0&&t.num{delete this.acks[t];for(let s=0;s{this.io.clearTimeoutFn(n),s.apply(this,t)};r.withError=!0,this.acks[t]=r}emitWithAck(t,...s){return new Promise(((i,e)=>{const n=(t,s)=>t?e(t):i(s);n.withError=!0,s.push(n),this.emit(t,...s)}))}ut(t){let s;"function"==typeof t[t.length-1]&&(s=t.pop());const i={id:this.rt++,tryCount:0,pending:!1,args:t,flags:Object.assign({fromQueue:!0},this.flags)};t.push(((t,...e)=>{if(i!==this.nt[0])return;return null!==t?i.tryCount>this.p.retries&&(this.nt.shift(),s&&s(t)):(this.nt.shift(),s&&s(null,...e)),i.pending=!1,this.lt()})),this.nt.push(i),this.lt()}lt(t=!1){if(!this.connected||0===this.nt.length)return;const s=this.nt[0];s.pending&&!t||(s.pending=!0,s.tryCount++,this.flags=s.flags,this.emit.apply(this,s.args))}packet(t){t.nsp=this.nsp,this.io.dt(t)}onopen(){"function"==typeof this.auth?this.auth((t=>{this.yt(t)})):this.yt(this.auth)}yt(t){this.packet({type:yt.CONNECT,data:this.bt?Object.assign({pid:this.bt,offset:this.gt},t):t})}onerror(t){this.connected||this.emitReserved("connect_error",t)}onclose(t,s){this.connected=!1,delete this.id,this.emitReserved("disconnect",t,s),this.wt()}wt(){Object.keys(this.acks).forEach((t=>{if(!this.sendBuffer.some((s=>String(s.id)===t))){const s=this.acks[t];delete this.acks[t],s.withError&&s.call(this,new Error("socket has been disconnected"))}}))}onpacket(t){if(t.nsp===this.nsp)switch(t.type){case yt.CONNECT:t.data&&t.data.sid?this.onconnect(t.data.sid,t.data.pid):this.emitReserved("connect_error",new Error("It seems you are trying to reach a Socket.IO server in v2.x with a v3.x client, but they are not compatible (more information here: https://socket.io/docs/v3/migrating-from-2-x-to-3-0/)"));break;case yt.EVENT:case yt.BINARY_EVENT:this.onevent(t);break;case yt.ACK:case yt.BINARY_ACK:this.onack(t);break;case yt.DISCONNECT:this.ondisconnect();break;case yt.CONNECT_ERROR:this.destroy();const s=new Error(t.data.message);s.data=t.data.data,this.emitReserved("connect_error",s)}}onevent(t){const s=t.data||[];null!=t.id&&s.push(this.ack(t.id)),this.connected?this.emitEvent(s):this.receiveBuffer.push(Object.freeze(s))}emitEvent(t){if(this.vt&&this.vt.length){const s=this.vt.slice();for(const i of s)i.apply(this,t)}super.emit.apply(this,t),this.bt&&t.length&&"string"==typeof t[t.length-1]&&(this.gt=t[t.length-1])}ack(t){const s=this;let i=!1;return function(...e){i||(i=!0,s.packet({type:yt.ACK,id:t,data:e}))}}onack(t){const s=this.acks[t.id];"function"==typeof s&&(delete this.acks[t.id],s.withError&&t.data.unshift(null),s.apply(this,t.data))}onconnect(t,s){this.id=t,this.recovered=s&&this.bt===s,this.bt=s,this.connected=!0,this.emitBuffered(),this.emitReserved("connect"),this.lt(!0)}emitBuffered(){this.receiveBuffer.forEach((t=>this.emitEvent(t))),this.receiveBuffer=[],this.sendBuffer.forEach((t=>{this.notifyOutgoingListeners(t),this.packet(t)})),this.sendBuffer=[]}ondisconnect(){this.destroy(),this.onclose("io server disconnect")}destroy(){this.subs&&(this.subs.forEach((t=>t())),this.subs=void 0),this.io.kt(this)}disconnect(){return this.connected&&this.packet({type:yt.DISCONNECT}),this.destroy(),this.connected&&this.onclose("io client disconnect"),this}close(){return this.disconnect()}compress(t){return this.flags.compress=t,this}get volatile(){return this.flags.volatile=!0,this}timeout(t){return this.flags.timeout=t,this}onAny(t){return this.vt=this.vt||[],this.vt.push(t),this}prependAny(t){return this.vt=this.vt||[],this.vt.unshift(t),this}offAny(t){if(!this.vt)return this;if(t){const s=this.vt;for(let i=0;i0&&t.jitter<=1?t.jitter:0,this.attempts=0}Ot.prototype.duration=function(){var t=this.ms*Math.pow(this.factor,this.attempts++);if(this.jitter){var s=Math.random(),i=Math.floor(s*this.jitter*t);t=1&Math.floor(10*s)?t+i:t-i}return 0|Math.min(t,this.max)},Ot.prototype.reset=function(){this.attempts=0},Ot.prototype.setMin=function(t){this.ms=t},Ot.prototype.setMax=function(t){this.max=t},Ot.prototype.setJitter=function(t){this.jitter=t};class _t extends k{constructor(t,s){var i;super(),this.nsps={},this.subs=[],t&&"object"==typeof t&&(s=t,t=void 0),(s=s||{}).path=s.path||"/socket.io",this.opts=s,x(this,s),this.reconnection(!1!==s.reconnection),this.reconnectionAttempts(s.reconnectionAttempts||1/0),this.reconnectionDelay(s.reconnectionDelay||1e3),this.reconnectionDelayMax(s.reconnectionDelayMax||5e3),this.randomizationFactor(null!==(i=s.randomizationFactor)&&void 0!==i?i:.5),this.backoff=new Ot({min:this.reconnectionDelay(),max:this.reconnectionDelayMax(),jitter:this.randomizationFactor()}),this.timeout(null==s.timeout?2e4:s.timeout),this.ct="closed",this.uri=t;const e=s.parser||mt;this.encoder=new e.Encoder,this.decoder=new e.Decoder,this.ot=!1!==s.autoConnect,this.ot&&this.open()}reconnection(t){return arguments.length?(this.Et=!!t,t||(this.skipReconnect=!0),this):this.Et}reconnectionAttempts(t){return void 0===t?this.Ot:(this.Ot=t,this)}reconnectionDelay(t){var s;return void 0===t?this._t:(this._t=t,null===(s=this.backoff)||void 0===s||s.setMin(t),this)}randomizationFactor(t){var s;return void 0===t?this.jt:(this.jt=t,null===(s=this.backoff)||void 0===s||s.setJitter(t),this)}reconnectionDelayMax(t){var s;return void 0===t?this.xt:(this.xt=t,null===(s=this.backoff)||void 0===s||s.setMax(t),this)}timeout(t){return arguments.length?(this.Bt=t,this):this.Bt}maybeReconnectOnOpen(){!this.ht&&this.Et&&0===this.backoff.attempts&&this.reconnect()}open(t){if(~this.ct.indexOf("open"))return this;this.engine=new tt(this.uri,this.opts);const s=this.engine,i=this;this.ct="opening",this.skipReconnect=!1;const e=kt(s,"open",(function(){i.onopen(),t&&t()})),n=s=>{this.cleanup(),this.ct="closed",this.emitReserved("error",s),t?t(s):this.maybeReconnectOnOpen()},r=kt(s,"error",n);if(!1!==this.Bt){const t=this.Bt,i=this.setTimeoutFn((()=>{e(),n(new Error("timeout")),s.close()}),t);this.opts.autoUnref&&i.unref(),this.subs.push((()=>{this.clearTimeoutFn(i)}))}return this.subs.push(e),this.subs.push(r),this}connect(t){return this.open(t)}onopen(){this.cleanup(),this.ct="open",this.emitReserved("open");const t=this.engine;this.subs.push(kt(t,"ping",this.onping.bind(this)),kt(t,"data",this.ondata.bind(this)),kt(t,"error",this.onerror.bind(this)),kt(t,"close",this.onclose.bind(this)),kt(this.decoder,"decoded",this.ondecoded.bind(this)))}onping(){this.emitReserved("ping")}ondata(t){try{this.decoder.add(t)}catch(t){this.onclose("parse error",t)}}ondecoded(t){A((()=>{this.emitReserved("packet",t)}),this.setTimeoutFn)}onerror(t){this.emitReserved("error",t)}socket(t,s){let i=this.nsps[t];return i?this.ot&&!i.active&&i.connect():(i=new Et(this,t,s),this.nsps[t]=i),i}kt(t){const s=Object.keys(this.nsps);for(const t of s){if(this.nsps[t].active)return}this.Ct()}dt(t){const s=this.encoder.encode(t);for(let i=0;it())),this.subs.length=0,this.decoder.destroy()}Ct(){this.skipReconnect=!0,this.ht=!1,this.onclose("forced close")}disconnect(){return this.Ct()}onclose(t,s){var i;this.cleanup(),null===(i=this.engine)||void 0===i||i.close(),this.backoff.reset(),this.ct="closed",this.emitReserved("close",t,s),this.Et&&!this.skipReconnect&&this.reconnect()}reconnect(){if(this.ht||this.skipReconnect)return this;const t=this;if(this.backoff.attempts>=this.Ot)this.backoff.reset(),this.emitReserved("reconnect_failed"),this.ht=!1;else{const s=this.backoff.duration();this.ht=!0;const i=this.setTimeoutFn((()=>{t.skipReconnect||(this.emitReserved("reconnect_attempt",t.backoff.attempts),t.skipReconnect||t.open((s=>{s?(t.ht=!1,t.reconnect(),this.emitReserved("reconnect_error",s)):t.onreconnect()})))}),s);this.opts.autoUnref&&i.unref(),this.subs.push((()=>{this.clearTimeoutFn(i)}))}}onreconnect(){const t=this.backoff.attempts;this.ht=!1,this.backoff.reset(),this.emitReserved("reconnect",t)}}const jt={};function xt(t,s){"object"==typeof t&&(s=t,t=void 0);const i=function(t,s="",i){let e=t;i=i||"undefined"!=typeof location&&location,null==t&&(t=i.protocol+"//"+i.host),"string"==typeof t&&("/"===t.charAt(0)&&(t="/"===t.charAt(1)?i.protocol+t:i.host+t),/^(https?|wss?):\/\//.test(t)||(t=void 0!==i?i.protocol+"//"+t:"https://"+t),e=Q(t)),e.port||(/^(http|ws)$/.test(e.protocol)?e.port="80":/^(http|ws)s$/.test(e.protocol)&&(e.port="443")),e.path=e.path||"/";const n=-1!==e.host.indexOf(":")?"["+e.host+"]":e.host;return e.id=e.protocol+"://"+n+":"+e.port+s,e.href=e.protocol+"://"+n+(i&&i.port===e.port?"":":"+e.port),e}(t,(s=s||{}).path||"/socket.io"),e=i.source,n=i.id,r=i.path,o=jt[n]&&r in jt[n].nsps;let h;return s.forceNew||s["force new connection"]||!1===s.multiplex||o?h=new _t(e,s):(jt[n]||(jt[n]=new _t(e,s)),h=jt[n]),i.query&&!s.query&&(s.query=i.queryKey),h.socket(i.path,s)}Object.assign(xt,{Manager:_t,Socket:Et,io:xt,connect:xt});export{st as Fetch,_t as Manager,W as NodeWebSocket,I as NodeXHR,Et as Socket,W as WebSocket,q as WebTransport,I as XHR,xt as connect,xt as default,xt as io,pt as protocol}; +//# sourceMappingURL=socket.io.esm.min.js.map diff --git a/webui/vendor/socket.io.min.js b/webui/vendor/socket.io.min.js new file mode 100644 index 0000000000..c72110d7fa --- /dev/null +++ b/webui/vendor/socket.io.min.js @@ -0,0 +1,7 @@ +/*! + * Socket.IO v4.8.1 + * (c) 2014-2024 Guillermo Rauch + * Released under the MIT License. + */ +!function(t,n){"object"==typeof exports&&"undefined"!=typeof module?module.exports=n():"function"==typeof define&&define.amd?define(n):(t="undefined"!=typeof globalThis?globalThis:t||self).io=n()}(this,(function(){"use strict";function t(t,n){(null==n||n>t.length)&&(n=t.length);for(var i=0,r=Array(n);i=n.length?{done:!0}:{done:!1,value:n[e++]}},e:function(t){throw t},f:o}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var s,u=!0,h=!1;return{s:function(){r=r.call(n)},n:function(){var t=r.next();return u=t.done,t},e:function(t){h=!0,s=t},f:function(){try{u||null==r.return||r.return()}finally{if(h)throw s}}}}function e(){return e=Object.assign?Object.assign.bind():function(t){for(var n=1;n1?{type:l[i],data:t.substring(1)}:{type:l[i]}:d},N=function(t,n){if(B){var i=function(t){var n,i,r,e,o,s=.75*t.length,u=t.length,h=0;"="===t[t.length-1]&&(s--,"="===t[t.length-2]&&s--);var f=new ArrayBuffer(s),c=new Uint8Array(f);for(n=0;n>4,c[h++]=(15&r)<<4|e>>2,c[h++]=(3&e)<<6|63&o;return f}(t);return C(i,n)}return{base64:!0,data:t}},C=function(t,n){return"blob"===n?t instanceof Blob?t:new Blob([t]):t instanceof ArrayBuffer?t:t.buffer},T=String.fromCharCode(30);function U(){return new TransformStream({transform:function(t,n){!function(t,n){y&&t.data instanceof Blob?t.data.arrayBuffer().then(k).then(n):b&&(t.data instanceof ArrayBuffer||w(t.data))?n(k(t.data)):g(t,!1,(function(t){p||(p=new TextEncoder),n(p.encode(t))}))}(t,(function(i){var r,e=i.length;if(e<126)r=new Uint8Array(1),new DataView(r.buffer).setUint8(0,e);else if(e<65536){r=new Uint8Array(3);var o=new DataView(r.buffer);o.setUint8(0,126),o.setUint16(1,e)}else{r=new Uint8Array(9);var s=new DataView(r.buffer);s.setUint8(0,127),s.setBigUint64(1,BigInt(e))}t.data&&"string"!=typeof t.data&&(r[0]|=128),n.enqueue(r),n.enqueue(i)}))}})}function M(t){return t.reduce((function(t,n){return t+n.length}),0)}function x(t,n){if(t[0].length===n)return t.shift();for(var i=new Uint8Array(n),r=0,e=0;e1?n-1:0),r=1;r1&&void 0!==arguments[1]?arguments[1]:{};return t+"://"+this.i()+this.o()+this.opts.path+this.u(n)},i.i=function(){var t=this.opts.hostname;return-1===t.indexOf(":")?t:"["+t+"]"},i.o=function(){return this.opts.port&&(this.opts.secure&&Number(443!==this.opts.port)||!this.opts.secure&&80!==Number(this.opts.port))?":"+this.opts.port:""},i.u=function(t){var n=function(t){var n="";for(var i in t)t.hasOwnProperty(i)&&(n.length&&(n+="&"),n+=encodeURIComponent(i)+"="+encodeURIComponent(t[i]));return n}(t);return n.length?"?"+n:""},n}(I),X=function(t){function n(){var n;return(n=t.apply(this,arguments)||this).h=!1,n}s(n,t);var r=n.prototype;return r.doOpen=function(){this.v()},r.pause=function(t){var n=this;this.readyState="pausing";var i=function(){n.readyState="paused",t()};if(this.h||!this.writable){var r=0;this.h&&(r++,this.once("pollComplete",(function(){--r||i()}))),this.writable||(r++,this.once("drain",(function(){--r||i()})))}else i()},r.v=function(){this.h=!0,this.doPoll(),this.emitReserved("poll")},r.onData=function(t){var n=this;(function(t,n){for(var i=t.split(T),r=[],e=0;e0&&void 0!==arguments[0]?arguments[0]:{};return e(t,{xd:this.xd},this.opts),new Y(tt,this.uri(),t)},n}(K);function tt(t){var n=t.xdomain;try{if("undefined"!=typeof XMLHttpRequest&&(!n||z))return new XMLHttpRequest}catch(t){}if(!n)try{return new(L[["Active"].concat("Object").join("X")])("Microsoft.XMLHTTP")}catch(t){}}var nt="undefined"!=typeof navigator&&"string"==typeof navigator.product&&"reactnative"===navigator.product.toLowerCase(),it=function(t){function n(){return t.apply(this,arguments)||this}s(n,t);var r=n.prototype;return r.doOpen=function(){var t=this.uri(),n=this.opts.protocols,i=nt?{}:_(this.opts,"agent","perMessageDeflate","pfx","key","passphrase","cert","ca","ciphers","rejectUnauthorized","localAddress","protocolVersion","origin","maxPayload","family","checkServerIdentity");this.opts.extraHeaders&&(i.headers=this.opts.extraHeaders);try{this.ws=this.createSocket(t,n,i)}catch(t){return this.emitReserved("error",t)}this.ws.binaryType=this.socket.binaryType,this.addEventListeners()},r.addEventListeners=function(){var t=this;this.ws.onopen=function(){t.opts.autoUnref&&t.ws.C.unref(),t.onOpen()},this.ws.onclose=function(n){return t.onClose({description:"websocket connection closed",context:n})},this.ws.onmessage=function(n){return t.onData(n.data)},this.ws.onerror=function(n){return t.onError("websocket error",n)}},r.write=function(t){var n=this;this.writable=!1;for(var i=function(){var i=t[r],e=r===t.length-1;g(i,n.supportsBinary,(function(t){try{n.doWrite(i,t)}catch(t){}e&&R((function(){n.writable=!0,n.emitReserved("drain")}),n.setTimeoutFn)}))},r=0;rMath.pow(2,21)-1){u.enqueue(d);break}e=v*Math.pow(2,32)+a.getUint32(4),r=3}else{if(M(i)t){u.enqueue(d);break}}}})}(Number.MAX_SAFE_INTEGER,t.socket.binaryType),r=n.readable.pipeThrough(i).getReader(),e=U();e.readable.pipeTo(n.writable),t.U=e.writable.getWriter();!function n(){r.read().then((function(i){var r=i.done,e=i.value;r||(t.onPacket(e),n())})).catch((function(t){}))}();var o={type:"open"};t.query.sid&&(o.data='{"sid":"'.concat(t.query.sid,'"}')),t.U.write(o).then((function(){return t.onOpen()}))}))}))},r.write=function(t){var n=this;this.writable=!1;for(var i=function(){var i=t[r],e=r===t.length-1;n.U.write(i).then((function(){e&&R((function(){n.writable=!0,n.emitReserved("drain")}),n.setTimeoutFn)}))},r=0;r8e3)throw"URI too long";var n=t,i=t.indexOf("["),r=t.indexOf("]");-1!=i&&-1!=r&&(t=t.substring(0,i)+t.substring(i,r).replace(/:/g,";")+t.substring(r,t.length));for(var e,o,s=ut.exec(t||""),u={},h=14;h--;)u[ht[h]]=s[h]||"";return-1!=i&&-1!=r&&(u.source=n,u.host=u.host.substring(1,u.host.length-1).replace(/;/g,":"),u.authority=u.authority.replace("[","").replace("]","").replace(/;/g,":"),u.ipv6uri=!0),u.pathNames=function(t,n){var i=/\/{2,9}/g,r=n.replace(i,"/").split("/");"/"!=n.slice(0,1)&&0!==n.length||r.splice(0,1);"/"==n.slice(-1)&&r.splice(r.length-1,1);return r}(0,u.path),u.queryKey=(e=u.query,o={},e.replace(/(?:^|&)([^&=]*)=?([^&]*)/g,(function(t,n,i){n&&(o[n]=i)})),o),u}var ct="function"==typeof addEventListener&&"function"==typeof removeEventListener,at=[];ct&&addEventListener("offline",(function(){at.forEach((function(t){return t()}))}),!1);var vt=function(t){function n(n,i){var r;if((r=t.call(this)||this).binaryType="arraybuffer",r.writeBuffer=[],r.M=0,r.I=-1,r.R=-1,r.L=-1,r._=1/0,n&&"object"===c(n)&&(i=n,n=null),n){var o=ft(n);i.hostname=o.host,i.secure="https"===o.protocol||"wss"===o.protocol,i.port=o.port,o.query&&(i.query=o.query)}else i.host&&(i.hostname=ft(i.host).host);return $(r,i),r.secure=null!=i.secure?i.secure:"undefined"!=typeof location&&"https:"===location.protocol,i.hostname&&!i.port&&(i.port=r.secure?"443":"80"),r.hostname=i.hostname||("undefined"!=typeof location?location.hostname:"localhost"),r.port=i.port||("undefined"!=typeof location&&location.port?location.port:r.secure?"443":"80"),r.transports=[],r.D={},i.transports.forEach((function(t){var n=t.prototype.name;r.transports.push(n),r.D[n]=t})),r.opts=e({path:"/engine.io",agent:!1,withCredentials:!1,upgrade:!0,timestampParam:"t",rememberUpgrade:!1,addTrailingSlash:!0,rejectUnauthorized:!0,perMessageDeflate:{threshold:1024},transportOptions:{},closeOnBeforeunload:!1},i),r.opts.path=r.opts.path.replace(/\/$/,"")+(r.opts.addTrailingSlash?"/":""),"string"==typeof r.opts.query&&(r.opts.query=function(t){for(var n={},i=t.split("&"),r=0,e=i.length;r1))return this.writeBuffer;for(var t,n=1,i=0;i=57344?i+=3:(r++,i+=4);return i}(t):Math.ceil(1.33*(t.byteLength||t.size))),i>0&&n>this.L)return this.writeBuffer.slice(0,i);n+=2}return this.writeBuffer},i.W=function(){var t=this;if(!this._)return!0;var n=Date.now()>this._;return n&&(this._=0,R((function(){t.F("ping timeout")}),this.setTimeoutFn)),n},i.write=function(t,n,i){return this.J("message",t,n,i),this},i.send=function(t,n,i){return this.J("message",t,n,i),this},i.J=function(t,n,i,r){if("function"==typeof n&&(r=n,n=void 0),"function"==typeof i&&(r=i,i=null),"closing"!==this.readyState&&"closed"!==this.readyState){(i=i||{}).compress=!1!==i.compress;var e={type:t,data:n,options:i};this.emitReserved("packetCreate",e),this.writeBuffer.push(e),r&&this.once("flush",r),this.flush()}},i.close=function(){var t=this,n=function(){t.F("forced close"),t.transport.close()},i=function i(){t.off("upgrade",i),t.off("upgradeError",i),n()},r=function(){t.once("upgrade",i),t.once("upgradeError",i)};return"opening"!==this.readyState&&"open"!==this.readyState||(this.readyState="closing",this.writeBuffer.length?this.once("drain",(function(){t.upgrading?r():n()})):this.upgrading?r():n()),this},i.B=function(t){if(n.priorWebsocketSuccess=!1,this.opts.tryAllTransports&&this.transports.length>1&&"opening"===this.readyState)return this.transports.shift(),this.q();this.emitReserved("error",t),this.F("transport error",t)},i.F=function(t,n){if("opening"===this.readyState||"open"===this.readyState||"closing"===this.readyState){if(this.clearTimeoutFn(this.Y),this.transport.removeAllListeners("close"),this.transport.close(),this.transport.removeAllListeners(),ct&&(this.P&&removeEventListener("beforeunload",this.P,!1),this.$)){var i=at.indexOf(this.$);-1!==i&&at.splice(i,1)}this.readyState="closed",this.id=null,this.emitReserved("close",t,n),this.writeBuffer=[],this.M=0}},n}(I);vt.protocol=4;var lt=function(t){function n(){var n;return(n=t.apply(this,arguments)||this).Z=[],n}s(n,t);var i=n.prototype;return i.onOpen=function(){if(t.prototype.onOpen.call(this),"open"===this.readyState&&this.opts.upgrade)for(var n=0;n1&&void 0!==arguments[1]?arguments[1]:{},r="object"===c(n)?n:i;return(!r.transports||r.transports&&"string"==typeof r.transports[0])&&(r.transports=(r.transports||["polling","websocket","webtransport"]).map((function(t){return st[t]})).filter((function(t){return!!t}))),t.call(this,n,r)||this}return s(n,t),n}(lt);pt.protocol;var dt="function"==typeof ArrayBuffer,yt=function(t){return"function"==typeof ArrayBuffer.isView?ArrayBuffer.isView(t):t.buffer instanceof ArrayBuffer},bt=Object.prototype.toString,wt="function"==typeof Blob||"undefined"!=typeof Blob&&"[object BlobConstructor]"===bt.call(Blob),gt="function"==typeof File||"undefined"!=typeof File&&"[object FileConstructor]"===bt.call(File);function mt(t){return dt&&(t instanceof ArrayBuffer||yt(t))||wt&&t instanceof Blob||gt&&t instanceof File}function kt(t,n){if(!t||"object"!==c(t))return!1;if(Array.isArray(t)){for(var i=0,r=t.length;i=0&&t.num1?e-1:0),s=1;s1?i-1:0),e=1;ei.l.retries&&(i.it.shift(),n&&n(t));else if(i.it.shift(),n){for(var e=arguments.length,o=new Array(e>1?e-1:0),s=1;s0&&void 0!==arguments[0]&&arguments[0];if(this.connected&&0!==this.it.length){var n=this.it[0];n.pending&&!t||(n.pending=!0,n.tryCount++,this.flags=n.flags,this.emit.apply(this,n.args))}},o.packet=function(t){t.nsp=this.nsp,this.io.ct(t)},o.onopen=function(){var t=this;"function"==typeof this.auth?this.auth((function(n){t.vt(n)})):this.vt(this.auth)},o.vt=function(t){this.packet({type:Bt.CONNECT,data:this.lt?e({pid:this.lt,offset:this.dt},t):t})},o.onerror=function(t){this.connected||this.emitReserved("connect_error",t)},o.onclose=function(t,n){this.connected=!1,delete this.id,this.emitReserved("disconnect",t,n),this.yt()},o.yt=function(){var t=this;Object.keys(this.acks).forEach((function(n){if(!t.sendBuffer.some((function(t){return String(t.id)===n}))){var i=t.acks[n];delete t.acks[n],i.withError&&i.call(t,new Error("socket has been disconnected"))}}))},o.onpacket=function(t){if(t.nsp===this.nsp)switch(t.type){case Bt.CONNECT:t.data&&t.data.sid?this.onconnect(t.data.sid,t.data.pid):this.emitReserved("connect_error",new Error("It seems you are trying to reach a Socket.IO server in v2.x with a v3.x client, but they are not compatible (more information here: https://socket.io/docs/v3/migrating-from-2-x-to-3-0/)"));break;case Bt.EVENT:case Bt.BINARY_EVENT:this.onevent(t);break;case Bt.ACK:case Bt.BINARY_ACK:this.onack(t);break;case Bt.DISCONNECT:this.ondisconnect();break;case Bt.CONNECT_ERROR:this.destroy();var n=new Error(t.data.message);n.data=t.data.data,this.emitReserved("connect_error",n)}},o.onevent=function(t){var n=t.data||[];null!=t.id&&n.push(this.ack(t.id)),this.connected?this.emitEvent(n):this.receiveBuffer.push(Object.freeze(n))},o.emitEvent=function(n){if(this.bt&&this.bt.length){var i,e=r(this.bt.slice());try{for(e.s();!(i=e.n()).done;){i.value.apply(this,n)}}catch(t){e.e(t)}finally{e.f()}}t.prototype.emit.apply(this,n),this.lt&&n.length&&"string"==typeof n[n.length-1]&&(this.dt=n[n.length-1])},o.ack=function(t){var n=this,i=!1;return function(){if(!i){i=!0;for(var r=arguments.length,e=new Array(r),o=0;o0&&t.jitter<=1?t.jitter:0,this.attempts=0}_t.prototype.duration=function(){var t=this.ms*Math.pow(this.factor,this.attempts++);if(this.jitter){var n=Math.random(),i=Math.floor(n*this.jitter*t);t=1&Math.floor(10*n)?t+i:t-i}return 0|Math.min(t,this.max)},_t.prototype.reset=function(){this.attempts=0},_t.prototype.setMin=function(t){this.ms=t},_t.prototype.setMax=function(t){this.max=t},_t.prototype.setJitter=function(t){this.jitter=t};var Dt=function(t){function n(n,i){var r,e;(r=t.call(this)||this).nsps={},r.subs=[],n&&"object"===c(n)&&(i=n,n=void 0),(i=i||{}).path=i.path||"/socket.io",r.opts=i,$(r,i),r.reconnection(!1!==i.reconnection),r.reconnectionAttempts(i.reconnectionAttempts||1/0),r.reconnectionDelay(i.reconnectionDelay||1e3),r.reconnectionDelayMax(i.reconnectionDelayMax||5e3),r.randomizationFactor(null!==(e=i.randomizationFactor)&&void 0!==e?e:.5),r.backoff=new _t({min:r.reconnectionDelay(),max:r.reconnectionDelayMax(),jitter:r.randomizationFactor()}),r.timeout(null==i.timeout?2e4:i.timeout),r.st="closed",r.uri=n;var o=i.parser||xt;return r.encoder=new o.Encoder,r.decoder=new o.Decoder,r.et=!1!==i.autoConnect,r.et&&r.open(),r}s(n,t);var i=n.prototype;return i.reconnection=function(t){return arguments.length?(this.kt=!!t,t||(this.skipReconnect=!0),this):this.kt},i.reconnectionAttempts=function(t){return void 0===t?this.At:(this.At=t,this)},i.reconnectionDelay=function(t){var n;return void 0===t?this.jt:(this.jt=t,null===(n=this.backoff)||void 0===n||n.setMin(t),this)},i.randomizationFactor=function(t){var n;return void 0===t?this.Et:(this.Et=t,null===(n=this.backoff)||void 0===n||n.setJitter(t),this)},i.reconnectionDelayMax=function(t){var n;return void 0===t?this.Ot:(this.Ot=t,null===(n=this.backoff)||void 0===n||n.setMax(t),this)},i.timeout=function(t){return arguments.length?(this.Bt=t,this):this.Bt},i.maybeReconnectOnOpen=function(){!this.ot&&this.kt&&0===this.backoff.attempts&&this.reconnect()},i.open=function(t){var n=this;if(~this.st.indexOf("open"))return this;this.engine=new pt(this.uri,this.opts);var i=this.engine,r=this;this.st="opening",this.skipReconnect=!1;var e=It(i,"open",(function(){r.onopen(),t&&t()})),o=function(i){n.cleanup(),n.st="closed",n.emitReserved("error",i),t?t(i):n.maybeReconnectOnOpen()},s=It(i,"error",o);if(!1!==this.Bt){var u=this.Bt,h=this.setTimeoutFn((function(){e(),o(new Error("timeout")),i.close()}),u);this.opts.autoUnref&&h.unref(),this.subs.push((function(){n.clearTimeoutFn(h)}))}return this.subs.push(e),this.subs.push(s),this},i.connect=function(t){return this.open(t)},i.onopen=function(){this.cleanup(),this.st="open",this.emitReserved("open");var t=this.engine;this.subs.push(It(t,"ping",this.onping.bind(this)),It(t,"data",this.ondata.bind(this)),It(t,"error",this.onerror.bind(this)),It(t,"close",this.onclose.bind(this)),It(this.decoder,"decoded",this.ondecoded.bind(this)))},i.onping=function(){this.emitReserved("ping")},i.ondata=function(t){try{this.decoder.add(t)}catch(t){this.onclose("parse error",t)}},i.ondecoded=function(t){var n=this;R((function(){n.emitReserved("packet",t)}),this.setTimeoutFn)},i.onerror=function(t){this.emitReserved("error",t)},i.socket=function(t,n){var i=this.nsps[t];return i?this.et&&!i.active&&i.connect():(i=new Lt(this,t,n),this.nsps[t]=i),i},i.wt=function(t){for(var n=0,i=Object.keys(this.nsps);n=this.At)this.backoff.reset(),this.emitReserved("reconnect_failed"),this.ot=!1;else{var i=this.backoff.duration();this.ot=!0;var r=this.setTimeoutFn((function(){n.skipReconnect||(t.emitReserved("reconnect_attempt",n.backoff.attempts),n.skipReconnect||n.open((function(i){i?(n.ot=!1,n.reconnect(),t.emitReserved("reconnect_error",i)):n.onreconnect()})))}),i);this.opts.autoUnref&&r.unref(),this.subs.push((function(){t.clearTimeoutFn(r)}))}},i.onreconnect=function(){var t=this.backoff.attempts;this.ot=!1,this.backoff.reset(),this.emitReserved("reconnect",t)},n}(I),Pt={};function $t(t,n){"object"===c(t)&&(n=t,t=void 0);var i,r=function(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",i=arguments.length>2?arguments[2]:void 0,r=t;i=i||"undefined"!=typeof location&&location,null==t&&(t=i.protocol+"//"+i.host),"string"==typeof t&&("/"===t.charAt(0)&&(t="/"===t.charAt(1)?i.protocol+t:i.host+t),/^(https?|wss?):\/\//.test(t)||(t=void 0!==i?i.protocol+"//"+t:"https://"+t),r=ft(t)),r.port||(/^(http|ws)$/.test(r.protocol)?r.port="80":/^(http|ws)s$/.test(r.protocol)&&(r.port="443")),r.path=r.path||"/";var e=-1!==r.host.indexOf(":")?"["+r.host+"]":r.host;return r.id=r.protocol+"://"+e+":"+r.port+n,r.href=r.protocol+"://"+e+(i&&i.port===r.port?"":":"+r.port),r}(t,(n=n||{}).path||"/socket.io"),e=r.source,o=r.id,s=r.path,u=Pt[o]&&s in Pt[o].nsps;return n.forceNew||n["force new connection"]||!1===n.multiplex||u?i=new Dt(e,n):(Pt[o]||(Pt[o]=new Dt(e,n)),i=Pt[o]),r.query&&!n.query&&(n.query=r.queryKey),i.socket(r.path,n)}return e($t,{Manager:Dt,Socket:Lt,io:$t,connect:$t}),$t})); +//# sourceMappingURL=socket.io.min.js.map