diff --git a/.github/workflows/build-deploy-docs.yml b/.github/workflows/build-deploy-docs.yml new file mode 100644 index 000000000..8024b54dc --- /dev/null +++ b/.github/workflows/build-deploy-docs.yml @@ -0,0 +1,204 @@ +name: 🥘 Build & Deploy Docs HB + +on: + pull_request: + branches: + - main + paths: + # Trigger on changes to docs, mkdocs config, or the workflow itself + - "docs/**" + - "mkdocs.yml" + - ".github/workflows/build-deploy-docs.yml" + push: + branches: + - main + paths: + # Trigger on changes to docs, mkdocs config, or the workflow itself + - "docs/**" + - "mkdocs.yml" + - ".github/workflows/build-deploy-docs.yml" + + # Perform a release using a workflow dispatch + workflow_dispatch: + +defaults: + run: + shell: bash + +jobs: + # Run the build as part of PRs to confirm the site properly builds + check_build: + if: ${{ startsWith(github.ref, 'refs/pull/') }} + runs-on: ubuntu-22.04 + steps: + - name: ⬇️ Checkout repo + uses: actions/checkout@v3 + + # Setup Python environment + - name: 🐍 Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.x' # Use a recent Python 3 version + + # Install Erlang OTP 27 using kerl + - name: Install Erlang OTP 27 + run: | + sudo apt-get update + sudo apt-get install -y build-essential autoconf libncurses5-dev libssl-dev + git clone https://github.com/kerl/kerl.git + ./kerl/kerl build 27.0 otp-27.0 + ./kerl/kerl install otp-27.0 ~/otp-27.0 + echo '. ~/otp-27.0/activate' >> ~/.bashrc + . ~/otp-27.0/activate + echo "Erlang version:" + erl -eval 'io:format("~s~n", [erlang:system_info(otp_release)]), halt().' + # Install system dependencies needed for HyperBEAM + - name: Install system dependencies + run: | + sudo apt-get update && sudo apt-get install -y --no-install-recommends \ + build-essential \ + cmake \ + pkg-config \ + ncurses-dev \ + libssl-dev \ + ca-certificates + # Debug step - display the region with syntax error + - name: Debug syntax error region + run: | + echo "Showing the region with syntax error in hb_message.erl:" + sed -n '1440,1460p' src/hb_message.erl || echo "File not found or cannot be read" + echo "Checking for syntax error fix files:" + find . -name "*.erl.fix" -o -name "hb_message.erl.*" | grep -v ".beam" || echo "No fix files found" + echo "Erlang version:" + . ~/otp-27.0/activate && erl -eval 'io:format("~s~n", [erlang:system_info(otp_release)]), halt().' + # Install rebar3 + - name: Install rebar3 + run: | + . ~/otp-27.0/activate + mkdir -p ~/.config/rebar3 + curl -O https://s3.amazonaws.com/rebar3/rebar3 && chmod +x rebar3 + sudo mv rebar3 /usr/local/bin/rebar3 + . ~/otp-27.0/activate && rebar3 --version + # Install Rust toolchain (needed for WASM components) + - name: Install Rust and Cargo + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + echo "$HOME/.cargo/bin" >> $GITHUB_PATH + source "$HOME/.cargo/env" + # Setup Node.js + - name: ⎔ Setup Node + uses: actions/setup-node@v3 + with: + node-version: 22 # Or your preferred version + + # Install pip dependencies and cache them + - name: 📦 Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin + - name: 🛠 Build Docs + run: | + . ~/otp-27.0/activate + SKIP_COMPILE=1 SKIP_EDOC=1 ./docs/build-all.sh -v + # Build and deploy the artifacts to Arweave via ArDrive + deploy: + if: github.ref == 'refs/heads/main' + runs-on: ubuntu-22.04 + # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. + # However, do NOT cancel in-progress runs as we want to allow these deployments to complete. + concurrency: + group: deploy + cancel-in-progress: false + steps: + - name: ⬇️ Checkout repo + uses: actions/checkout@v3 + + # Setup Python environment + - name: 🐍 Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.x' + + # Install Erlang OTP 27 using kerl + - name: Install Erlang OTP 27 + run: | + sudo apt-get update + sudo apt-get install -y build-essential autoconf libncurses5-dev libssl-dev + git clone https://github.com/kerl/kerl.git + ./kerl/kerl build 27.0 otp-27.0 + ./kerl/kerl install otp-27.0 ~/otp-27.0 + echo '. ~/otp-27.0/activate' >> ~/.bashrc + . ~/otp-27.0/activate + echo "Erlang version:" + erl -eval 'io:format("~s~n", [erlang:system_info(otp_release)]), halt().' + # Install system dependencies needed for HyperBEAM + - name: Install system dependencies + run: | + sudo apt-get update && sudo apt-get install -y --no-install-recommends \ + build-essential \ + cmake \ + pkg-config \ + ncurses-dev \ + libssl-dev \ + ca-certificates + # Debug step - display the region with syntax error + - name: Debug syntax error region + run: | + echo "Showing the region with syntax error in hb_message.erl:" + sed -n '1440,1460p' src/hb_message.erl || echo "File not found or cannot be read" + echo "Checking for syntax error fix files:" + find . -name "*.erl.fix" -o -name "hb_message.erl.*" | grep -v ".beam" || echo "No fix files found" + echo "Erlang version:" + . ~/otp-27.0/activate && erl -eval 'io:format("~s~n", [erlang:system_info(otp_release)]), halt().' + # Install rebar3 + - name: Install rebar3 + run: | + . ~/otp-27.0/activate + mkdir -p ~/.config/rebar3 + curl -O https://s3.amazonaws.com/rebar3/rebar3 && chmod +x rebar3 + sudo mv rebar3 /usr/local/bin/rebar3 + . ~/otp-27.0/activate && rebar3 --version + # Install Rust toolchain (needed for WASM components) + - name: Install Rust and Cargo + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + echo "$HOME/.cargo/bin" >> $GITHUB_PATH + source "$HOME/.cargo/env" + # Install pip dependencies and cache them + - name: 📦 Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin + # Setup Node.js (needed for npx deploy command) + - name: ⎔ Setup Node + uses: actions/setup-node@v3 + with: + node-version: 22 # Or your preferred version + + - name: 👀 Env + run: | + echo "Event name: ${{ github.event_name }}" + echo "Git ref: ${{ github.ref }}" + echo "GH actor: ${{ github.actor }}" + echo "SHA: ${{ github.sha }}" + VER=`node --version`; echo "Node ver: $VER" + VER=`npm --version`; echo "npm ver: $VER" + . ~/otp-27.0/activate && erl -eval 'io:format("Erlang OTP version: ~s~n", [erlang:system_info(otp_release)]), halt().' + - name: 🛠 Build Docs + id: build_artifacts + run: | + . ~/otp-27.0/activate + SKIP_COMPILE=1 SKIP_EDOC=1 ./docs/build-all.sh -v + touch mkdocs-site/.nojekyll + echo "artifacts_output_dir=mkdocs-site" >> $GITHUB_OUTPUT + - name: 💾 Publish to Arweave + id: publish_artifacts + run: | + npx permaweb-deploy \ + --arns-name=dps-testing-facility \ + --ant-process=${{ secrets.ANT_PROCESS }} \ + --deploy-folder=${ARTIFACTS_OUTPUT_DIR} + env: + DEPLOY_KEY: ${{ secrets.DEPLOY_KEY }} + ARTIFACTS_OUTPUT_DIR: ${{ steps.build_artifacts.outputs.artifacts_output_dir }} + ANT_PROCESS: ${{ secrets.ANT_PROCESS }} diff --git a/.github/workflows/build-deploy-docs.yml.disabled b/.github/workflows/build-deploy-docs.yml.disabled deleted file mode 100644 index d4dbbaeeb..000000000 --- a/.github/workflows/build-deploy-docs.yml.disabled +++ /dev/null @@ -1,113 +0,0 @@ -name: 🥘 Build & Deploy Docs HB - -on: - pull_request: - branches: - - main - paths: - # Trigger on changes to docs, mkdocs config, or the workflow itself - - "docs/**" - - "mkdocs.yml" - - ".github/workflows/build-deploy-docs.yml" - push: - branches: - - main - paths: - # Trigger on changes to docs, mkdocs config, or the workflow itself - - "docs/**" - - "mkdocs.yml" - - ".github/workflows/build-deploy-docs.yml" - - # Perform a release using a workflow dispatch - workflow_dispatch: - -defaults: - run: - shell: bash - -jobs: - # Run the build as part of PRs to confirm the site properly builds - check_build: - if: ${{ startsWith(github.ref, 'refs/pull/') }} - runs-on: ubuntu-latest - steps: - - name: ⬇️ Checkout repo - uses: actions/checkout@v3 - - # Setup Python environment - - name: 🐍 Set up Python - uses: actions/setup-python@v5 - with: - python-version: '3.x' # Use a recent Python 3 version - - # Install pip dependencies and cache them - - name: 📦 Install Python dependencies - run: | - python -m pip install --upgrade pip - pip install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin - - - name: 🛠 Build Docs - run: | - ./docs/build-all.sh - - # Build and deploy the artifacts to Arweave via ArDrive - deploy: - if: github.ref == 'refs/heads/main' - runs-on: ubuntu-latest - # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. - # However, do NOT cancel in-progress runs as we want to allow these deployments to complete. - concurrency: - group: deploy - cancel-in-progress: false - steps: - - name: ⬇️ Checkout repo - uses: actions/checkout@v3 - - # Setup Python environment - - name: 🐍 Set up Python - uses: actions/setup-python@v5 - with: - python-version: '3.x' - - # Install pip dependencies and cache them - - name: 📦 Install Python dependencies - run: | - python -m pip install --upgrade pip - pip install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin - - # Setup Node.js (needed for npx deploy command) - - name: ⎔ Setup Node - uses: actions/setup-node@v3 - with: - node-version: 22 # Or your preferred version - # cache: yarn # Caching might not be needed just for npx - - - name: 👀 Env - run: | - echo "Event name: ${{ github.event_name }}" - echo "Git ref: ${{ github.ref }}" - echo "GH actor: ${{ github.actor }}" - echo "SHA: ${{ github.sha }}" - VER=`node --version`; echo "Node ver: $VER" - VER=`npm --version`; echo "npm ver: $VER" - - - name: 🛠 Build Docs - id: build_artifacts - run: | - ./docs/build-all.sh - touch mkdocs-site/.nojekyll - - echo "artifacts_output_dir=mkdocs-site" >> $GITHUB_OUTPUT - - - name: 💾 Publish to Arweave - id: publish_artifacts - run: | - npx permaweb-deploy \ - --ant-process=${ANT_PROCESS} \ - --undername=${UNDERNAME} \ - --deploy-folder=${ARTIFACTS_OUTPUT_DIR} - env: - DEPLOY_KEY: ${{ secrets.CI_WALLET }} - ARTIFACTS_OUTPUT_DIR: ${{ steps.build_artifacts.outputs.artifacts_output_dir }} - ANT_PROCESS: HY021r2MQL9Zi0qSNFAQ9QRshIc2mNPYf65pZBP04cE - UNDERNAME: docs \ No newline at end of file diff --git a/README.md b/README.md index 57dffca02..fc1b805bd 100644 --- a/README.md +++ b/README.md @@ -99,7 +99,7 @@ To start a shell with profiles: rebar3 as rocksdb shell # Multiple profiles -rebar3 as rocksdb,genesis_wasm shell +rebar3 as rocksdb, genesis_wasm shell ``` To create a release with profiles: @@ -266,19 +266,11 @@ schedule of another execution. Details on other devices found in the pre-loaded set can be located in their respective documentation. -## Contributing - -HyperBEAM is developed as an open source implementation of the AO-Core protocol -by [Forward Research](https://fwd.arweave.net). Pull Requests are always welcome! - -To get started building on HyperBEAM, check out the [hacking on HyperBEAM](./docs/contribute/setup.md) -guide. - ## Documentation -HyperBEAM uses [MkDocs](https://www.mkdocs.org/) with the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) theme to build its documentation site. +HyperBEAM uses [MkDocs](https://www.mkdocs.org/) with the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) theme to build its documentation site. All documentation source files are located in the `docs/` directory. -Building the documentation requires Python 3 and pip. It's recommended to use a virtual environment: +To build and view the documentation locally: ```bash # Create and activate a virtual environment (optional but recommended) @@ -288,103 +280,22 @@ source venv/bin/activate # (macOS/Linux) On Windows use `venv\Scripts\activate` # Install required packages pip3 install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin -# Deactivate the virtual environment when done -# deactivate +# Build the docs +./docs/build-all.sh + +# Serve the docs +cd mkdocs-site +python3 -m http.server 8000 +# Then open http://127.0.0.1:8000/ in your browser ``` -- **Source Files:** All documentation source files (Markdown `.md`, images, CSS) are located in the `docs/` directory. -- **Source Code Docs:** Erlang source code documentation is generated using `rebar3 edoc` (with the `edown_doclet` plugin) into the `docs/source-code-docs/` directory as Markdown files. These are then incorporated into the main MkDocs site. -- **Build Script:** The entire process (compiling, generating edoc, processing source docs, building the site) is handled by the `./docs/build-all.sh` script. - -To build the documentation locally: - -1. Ensure you are in the project root directory. -2. If using a virtual environment, make sure it's activated. -3. Run the build script: - ```bash - ./docs/build-all.sh - ``` - -This script performs the following steps: -- Compiles the Erlang project (`rebar3 compile`). -- Generates Markdown documentation from source code comments (`rebar3 edoc`) into `docs/source-code-docs/`. -- Processes the generated source code Markdown files (updates index, cleans up TOCs). -- Builds the MkDocs site into the `mkdocs-site` directory (`mkdocs build`). - -To view the built documentation locally: - -1. Navigate to the site directory: - ```bash - cd mkdocs-site - ``` -2. Start a simple Python HTTP server: - ```bash - python3 -m http.server 8000 - ``` -3. Open your web browser and go to `http://127.0.0.1:8000/`. - -Press `Ctrl+C` in the terminal where the server is running to stop it. - -The final static site is generated in the `mkdocs-site` directory, as configured in `mkdocs.yml` (`site_dir: mkdocs-site`). - -### Contributing to the Documentation - -To contribute documentation to HyperBEAM, follow these steps: - -1. **Fork the Repository** - - Fork the [HyperBEAM repository](https://github.com/permaweb/HyperBEAM) to your GitHub account - -2. **Choose the Right Location** - - Review the existing documentation structure in `./docs/` to determine the appropriate location for your content - - Documentation is organized into several main sections: - - `overview/`: High-level concepts and architecture - - `installation-core/`: Setup and configuration guides - - `components/`: Detailed component documentation - - `usage/`: Tutorials and usage guides - - `resources/`: Reference materials and source code documentation - - `community/`: Contribution guidelines and community resources - -3. **Create Your Documentation** - - Create a new Markdown file (`.md`) in the appropriate directory - - Follow the existing documentation style and format - - Use proper Markdown syntax and include: - - Clear headings and subheadings - - Code blocks with appropriate language specification - - Links to related documentation - - Images (if needed) in the `docs/assets/` directory - -4. **Update the Navigation** - - Edit `mkdocs.yml` to add your documentation to the navigation - - Place your entry in the appropriate section under the `nav:` configuration - - Follow the existing indentation and format - -5. **Test Your Changes** - - Set up a local development environment: - ```bash - python3 -m venv venv - source venv/bin/activate - pip3 install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin - ``` - - Run the build script to verify your changes: - ```bash - ./docs/build-all.sh - ``` - - View the documentation locally at `http://127.0.0.1:8000/` - -6. **Submit a Pull Request** - - Create a new branch for your documentation changes - - Commit your changes with a descriptive message - - Submit a PR with: - - A clear title describing the documentation addition - - A detailed description explaining: - - The purpose of the new documentation - - Why it should be added to the official docs - - Any related issues or discussions - - Screenshots of the rendered documentation (if applicable) - -7. **Review Process** - - The HyperBEAM team will review your PR - - Be prepared to make adjustments based on feedback - - Once approved, your documentation will be merged into the main repository - -For more detailed contribution guidelines, see the [Community Guidelines](./docs/community/guidelines.md) and [Development Setup](./docs/community/setup.md) documentation. +For more details on the documentation structure, how to contribute, and other information, please see the [full documentation README](./docs/README.md). + +## Contributing + +HyperBEAM is developed as an open source implementation of the AO-Core protocol +by [Forward Research](https://fwd.arweave.net). Pull Requests are always welcome! + +To get started building on HyperBEAM, check out the [hacking on HyperBEAM](./docs/misc/hacking-on-hyperbeam.md) +guide. + diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..4168a5bed --- /dev/null +++ b/docs/README.md @@ -0,0 +1,115 @@ + +## Documentation + +HyperBEAM uses [MkDocs](https://www.mkdocs.org/) with the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) theme to build its documentation site. + +Building the documentation requires Python 3 and pip. It's recommended to use a virtual environment: + +```bash +# Create and activate a virtual environment (optional but recommended) +python3 -m venv venv +source venv/bin/activate # (macOS/Linux) On Windows use `venv\Scripts\activate` + +# Install required packages +pip3 install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin + +# Deactivate the virtual environment when done +# deactivate +``` + +- **Source Files:** All documentation source files (Markdown `.md`, images, CSS) are located in the `docs/` directory. +- **Source Code Docs:** Erlang source code documentation is generated using `rebar3 edoc` (with the `edown_doclet` plugin) into the `docs/source-code-docs/` directory as Markdown files. These are then incorporated into the main MkDocs site. +- **Build Script:** The entire process (compiling, generating edoc, processing source docs, building the site) is handled by the `./docs/build-all.sh` script. + +To build the documentation locally: + +1. Ensure you are in the project root directory. +2. If using a virtual environment, make sure it's activated. +3. Run the build script: + ```bash + ./docs/build-all.sh + ``` + +This script performs the following steps: +- Compiles the Erlang project (`rebar3 compile`). +- Generates Markdown documentation from source code comments (`rebar3 edoc`) into `docs/source-code-docs/`. +- Processes the generated source code Markdown files (updates index, cleans up TOCs). +- Builds the MkDocs site into the `mkdocs-site` directory (`mkdocs build`). + +To view the built documentation locally: + +1. Navigate to the site directory: + ```bash + cd mkdocs-site + ``` +2. Start a simple Python HTTP server: + ```bash + python3 -m http.server 8000 + ``` +3. Open your web browser and go to `http://127.0.0.1:8000/`. + +Press `Ctrl+C` in the terminal where the server is running to stop it. + +The final static site is generated in the `mkdocs-site` directory, as configured in `mkdocs.yml` (`site_dir: mkdocs-site`). + +### Contributing to the Documentation + +To contribute documentation to HyperBEAM, follow these steps: + +1. **Fork the Repository** + - Fork the [HyperBEAM repository](https://github.com/permaweb/HyperBEAM) to your GitHub account + +2. **Choose the Right Location** + - Review the existing documentation structure in `./docs/` to determine the appropriate location for your content + - Documentation is organized into several main sections: + - `overview/`: High-level concepts and architecture + - `installation-core/`: Setup and configuration guides + - `components/`: Detailed component documentation + - `usage/`: Tutorials and usage guides + - `resources/`: Reference materials and source code documentation + - `community/`: Contribution guidelines and community resources + +3. **Create Your Documentation** + - Create a new Markdown file (`.md`) in the appropriate directory + - Follow the existing documentation style and format + - Use proper Markdown syntax and include: + - Clear headings and subheadings + - Code blocks with appropriate language specification + - Links to related documentation + - Images (if needed) in the `docs/assets/` directory + +4. **Update the Navigation** + - Edit `mkdocs.yml` to add your documentation to the navigation + - Place your entry in the appropriate section under the `nav:` configuration + - Follow the existing indentation and format + +5. **Test Your Changes** + - Set up a local development environment: + ```bash + python3 -m venv venv + source venv/bin/activate + pip3 install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin + ``` + - Run the build script to verify your changes: + ```bash + ./docs/build-all.sh + ``` + - View the documentation locally at `http://127.0.0.1:8000/` + +6. **Submit a Pull Request** + - Create a new branch for your documentation changes + - Commit your changes with a descriptive message + - Submit a PR with: + - A clear title describing the documentation addition + - A detailed description explaining: + - The purpose of the new documentation + - Why it should be added to the official docs + - Any related issues or discussions + - Screenshots of the rendered documentation (if applicable) + +7. **Review Process** + - The HyperBEAM team will review your PR + - Be prepared to make adjustments based on feedback + - Once approved, your documentation will be merged into the main repository + +For more detailed contribution guidelines, see the [Community Guidelines](./docs/misc/community/guidelines.md) and [Development Setup](./docs/misc/community/setup.md) documentation. diff --git a/docs/assets/images/Power-web2-web3-fig.mp4 b/docs/assets/images/Power-web2-web3-fig.mp4 new file mode 100644 index 000000000..ff11d1511 Binary files /dev/null and b/docs/assets/images/Power-web2-web3-fig.mp4 differ diff --git a/docs/assets/images/back-rocks.avif b/docs/assets/images/back-rocks.avif deleted file mode 100644 index 4736a7888..000000000 Binary files a/docs/assets/images/back-rocks.avif and /dev/null differ diff --git a/docs/assets/images/bg-low.avif b/docs/assets/images/bg-low.avif deleted file mode 100644 index feab077ce..000000000 Binary files a/docs/assets/images/bg-low.avif and /dev/null differ diff --git a/docs/assets/images/bg-med.avif b/docs/assets/images/bg-med.avif deleted file mode 100644 index 76fcea8ce..000000000 Binary files a/docs/assets/images/bg-med.avif and /dev/null differ diff --git a/docs/assets/images/bg.avif b/docs/assets/images/bg.avif deleted file mode 100644 index e4b9f49cb..000000000 Binary files a/docs/assets/images/bg.avif and /dev/null differ diff --git a/docs/assets/images/chroma-sky.avif b/docs/assets/images/chroma-sky.avif deleted file mode 100644 index 72e5eef37..000000000 Binary files a/docs/assets/images/chroma-sky.avif and /dev/null differ diff --git a/docs/assets/images/create-new-devices-fig.png b/docs/assets/images/create-new-devices-fig.png new file mode 100644 index 000000000..c65714ea8 Binary files /dev/null and b/docs/assets/images/create-new-devices-fig.png differ diff --git a/docs/assets/images/front-boulder.avif b/docs/assets/images/front-boulder.avif deleted file mode 100644 index 55e67f387..000000000 Binary files a/docs/assets/images/front-boulder.avif and /dev/null differ diff --git a/docs/assets/images/front-rocks.avif b/docs/assets/images/front-rocks.avif deleted file mode 100644 index 450061092..000000000 Binary files a/docs/assets/images/front-rocks.avif and /dev/null differ diff --git a/docs/assets/images/mid-rocks.avif b/docs/assets/images/mid-rocks.avif deleted file mode 100644 index cc5c40640..000000000 Binary files a/docs/assets/images/mid-rocks.avif and /dev/null differ diff --git a/docs/assets/images/monetize-fig.mp4 b/docs/assets/images/monetize-fig.mp4 new file mode 100644 index 000000000..2b69faa52 Binary files /dev/null and b/docs/assets/images/monetize-fig.mp4 differ diff --git a/docs/assets/images/monetize-your-hardware-fig.png b/docs/assets/images/monetize-your-hardware-fig.png new file mode 100644 index 000000000..04fb303a2 Binary files /dev/null and b/docs/assets/images/monetize-your-hardware-fig.png differ diff --git a/docs/assets/images/rock-solid-fig.png b/docs/assets/images/rock-solid-fig.png new file mode 100644 index 000000000..5de564c07 Binary files /dev/null and b/docs/assets/images/rock-solid-fig.png differ diff --git a/docs/assets/images/what-is-hyperbeam-fig.mp4 b/docs/assets/images/what-is-hyperbeam-fig.mp4 new file mode 100644 index 000000000..364d598f3 Binary files /dev/null and b/docs/assets/images/what-is-hyperbeam-fig.mp4 differ diff --git a/docs/assets/style.css b/docs/assets/style.css index 63b42a25f..faf217687 100644 --- a/docs/assets/style.css +++ b/docs/assets/style.css @@ -1,21 +1,25 @@ /* General Text Styles */ h1 { - font-size: clamp(1.1rem, 1.5vw, 1.2rem) !important; - color: black !important; + font-size: clamp(1.6rem, 1.5vw, 1.7rem) !important; + color: rgba(60, 60, 67) !important; + font-weight: 600 !important; } h2 { - font-size: clamp(0.9rem, 1.5vw, 1rem) !important; - color: #0000008a; + font-size: clamp(1.2rem, 1.5vw, 1.3rem) !important; + color: rgba(60, 60, 67); + ; /* Semi-transparent black */ } p { - font-size: clamp(0.6rem, 1.5vw, 0.65rem); + font-size: clamp(0.6rem, 1.5vw, 0.7rem); + line-height: 1.75; } li { - font-size: clamp(0.6rem, 1.5vw, 0.65rem); + font-size: clamp(0.6rem, 1.5vw, 0.75rem); + line-height: 1.75; } img { @@ -27,16 +31,76 @@ input { } body { - --docs-max-width: 55rem; + --docs-max-width: 60rem; --homepage-max-width: 90rem; --sections-max-width: 80rem; --parallax-perspective: 2rem; + --md-accent-fg-color: #555555 !important; + --md-default-fg-color--light: #bebebe !important; +} + +.md-nav__item--section>.md-nav__link { + color: black !important; + margin-bottom: 8px; +} + +/* +h1, h2, h3, h4, h5, h6 { + border-bottom: 1px solid #ccc; + padding-bottom: 10px; +} +*/ + +.md-content h2, +h3, +h4, +h5, +h6 { + font-weight: 400 !important; } +.md-content h2 { + border-top: 1px solid #ccc; + padding-top: 1.5rem; +} + +.md-content h3, +h4, +h5, +h6 { + border-top: 1px solid #e5e5e58a; + padding-top: 1rem; +} /* Body and Header Customization */ .md-header { box-shadow: none !important; + z-index: 100; + transition: transform 0.3s ease-in-out; + position: fixed; + width: 100%; + transform: translateY(0); +} + +.header-hidden { + transform: translateY(-100%); + transition: transform 0.15s ease-in-out; +} + +.md-main { + transition: padding-top 0.3s ease; +} + +.header-hidden + .md-main { + padding-top: 0; +} + +[dir=ltr] .md-sidebar--primary { + left: -15rem !important; +} + +[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary { + transform: translateX(15rem) !important; } .md-grid { @@ -44,15 +108,21 @@ body { max-width: var(--docs-max-width); } +.md-main__inner { + gap: 2.25rem; +} + .custom-homepage-header { position: fixed; filter: invert(1); top: 0; z-index: 20 !important; background: linear-gradient(0deg, rgba(255, 255, 255, 0) 0%, #ffffff 100%); - + border-bottom: 0px solid; } + + .custom-homepage-header .md-grid { max-width: var(--homepage-max-width); } @@ -75,19 +145,69 @@ body { /* Navigation Styles */ .md-nav__title, .md-nav__item { - font-size: clamp(0.45rem, 1.5vw, 0.5rem) !important; + font-size: clamp(0.6rem, 1.5vw, 0.65rem) !important; box-shadow: none !important; } +.md-nav__title { + color: #000000 !important; +} + +.md-nav__link { + padding: 4px 16px; + border-radius: 6px; + margin: 0; + margin-top: 4px; +} + +.md-tabs__item--active .md-tabs__link { + position: relative; + font-weight: 700; +} + +.md-tabs__item--active .md-tabs__link::after { + content: ""; + position: absolute; + bottom: -9px; + left: 5%; + width: 90%; + height: 2px; + background-color: black; + border-radius: 1px; + transition: all 0.3s ease; +} + + +.md-nav__link:hover { + background: rgb(243, 243, 243); +} + .md-nav__link--active { - color: #fc8516 !important; + color: #000000 !important; /* Active link color */ - font-weight: 600; + font-weight: 700; + background: rgb(237, 237, 237); +} + +.md-sidebar { + width: 15rem; +} + +[dir=ltr] .md-sidebar__inner { + padding-right: calc(100% - 15rem); +} + +.md-nav--secondary { + border-left: 0.05rem solid lightgray !important; } /* Tab Navigation */ .md-tabs__link { - font-size: clamp(0.4rem, 1.5vw, 0.5rem); + font-size: clamp(0.65rem, 1.5vw, 0.65rem) !important; +} + +.md-tabs__list { + justify-content: space-between; } .md-tabs__item { @@ -150,6 +270,7 @@ body { } /* Source Citation Styles */ + .md-header__source { width: 10rem; padding: 0.6rem; @@ -272,7 +393,7 @@ body { flex: 1.5 0 0; gap: 0.5rem; height: 40%; - min-height: 6rem; + min-height: 7rem; backdrop-filter: blur(50px); } @@ -284,7 +405,7 @@ body { flex: 1 0 0; border-radius: 4px; border: 1px solid #2b2b2b; - background: rgba(0, 0, 0, 0.8); + background: rgba(0, 0, 0, 0.75); justify-content: space-between; align-items: start; padding: 8px; @@ -294,32 +415,43 @@ body { } .hero-button-card:hover { - background: rgba(0, 0, 0, 0.9); + background: rgba(0, 0, 0, 1); + border: 1px solid rgb(149, 149, 149); } .hero-main-heading h1 { - font-size: clamp(1.0rem, 1.5vw, 1rem) !important; + font-size: clamp(1.8rem, 1.5vw, 2.0rem) !important; margin: 0 !important; font-weight: 500 !important; color: white !important; } .hero-button-card h2 { - font-size: clamp(0.7rem, 1.5vw, 0.8rem) !important; + font-size: clamp(0.9rem, 1.5vw, 1rem) !important; margin: 0 !important; font-weight: 500 !important; color: white !important; } +.hero-button-card p { + font-size: clamp(0.65rem, 1.5vw, 0.7rem) !important; + color: #c0c0c0 !important; + font-weight: 500; +} + .hero-main-heading h2, .hero-button-card p { font-size: clamp(0.55rem, 1.5vw, 0.6rem) !important; - color: white !important; + line-height: normal; margin: 0; font-weight: 500; text-align: left; } +.hero-main-heading h2 { + color: white !important; +} + /* Hero Rocks */ .rocks { @@ -423,7 +555,7 @@ body { } .section-inner-content h1 { - font-size: clamp(0.95rem, 1.5vw, 1rem) !important; + font-size: clamp(1.1rem, 1.5vw, 1.2rem) !important; margin: 0 !important; font-weight: 500 !important; } @@ -469,6 +601,12 @@ body { .section-monetize { justify-content: space-between; + position: relative; + overflow: hidden; +} + +.section-monetize>*:not(:last-child) { + z-index: 3; } .double-column-content { @@ -490,6 +628,7 @@ body { .column-container { width: 100%; height: 100%; + min-height: 70vh; } .grid-container { @@ -499,10 +638,13 @@ body { width: 100%; background: white; overflow: hidden; + border-radius: 4px; } .full-span { grid-column: span 2; + min-height: 600px; + overflow: hidden; } .column-text { @@ -529,17 +671,27 @@ body { background: #F9F9F9; border: 1px solid #E6E6E6; border-radius: 4px; - min-height: 125px; + min-height: 130px; padding: 8px; overflow: hidden; } - +.card p { + font-size: clamp(0.65rem, 1.5vw, 0.7rem) !important; + color: #6E6E6E !important; + font-weight: 500; + line-height: normal; +} .transparent-card { min-height: 125px; display: flex; width: 100%; + background: rgba(255, 255, 255, 0.559); +} + +.transparent-card p { + line-height: normal; } .card-row { @@ -555,7 +707,7 @@ body { } .card span { - background: white; + /* background: white; */ width: fit-content; } @@ -580,13 +732,18 @@ body { } .main-button { - background: #FFDE0B; - padding: 0px 40px; - height: 30px; + display: flex; + align-items: center; + justify-content: center; + background: #E4EABB; + padding: 12.5px 60px; border-radius: 4px; + font-size: clamp(0.6rem, 1.5vw, 0.7rem); + cursor: pointer; } .fig-container { + position: relative; height: 100%; display: flex; width: 100%; @@ -594,19 +751,46 @@ body { overflow: hidden; } -.fig img { - image-rendering: pixelated; - shape-rendering: geometricPrecision; - transform: translateZ(0); - /* force GPU for sharpness */ -} +.fig { + position: absolute; + width: 100%; + max-width: 400px; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); +} +.what-is-hyperbeam-fig { + position: absolute; + width: 100%; + top: 50%; + left: 50%; + max-width: 600px; + transform: translate(-50%, -50%); +} -.fig { +.power-web2-web3-fig { position: absolute; width: 100%; - max-width: 500px; + top: 0%; + right: 0%; + max-width: 700px; + transform: translate(-10%, -10%); + +} + + + +.monetize-fig { + position: absolute; + top: 0%; + left: 0%; + + transform: translate(0%, 0%); + z-index: 1; + opacity: 20%; + } /* Footer Customization */ diff --git a/docs/build-all.sh b/docs/build-all.sh index 29d713386..80f075d5c 100755 --- a/docs/build-all.sh +++ b/docs/build-all.sh @@ -70,7 +70,7 @@ ${NEON_GREEN} ++++* ${BLACK}${BOLD}| |__ _ _ _ __ ___ _ ${NEON_GREEN} :+++*${BRIGHT_YELLOW}## ${BLACK}${BOLD} | '_ \\| | | | '_ \\ / _ \\ '__| ${NC} ${NEON_GREEN} ++**${BRIGHT_YELLOW}#### ${BLACK}${BOLD} | | | | |_| | |_) | __/ | ${NC} ${NEON_GREEN} +++${BRIGHT_YELLOW}####${NEON_GREEN}*** ${BLACK}${BOLD} |_| |_|\\__, | .__/ \\___|_| ${NC} -${NEON_GREEN} +*${BRIGHT_YELLOW}##${NEON_GREEN}****${MAGENTA}+-- ${BLACK}${BOLD} |___/|_| ${NC} +${NEON_GREEN} +*${BRIGHT_YELLOW}##${NEON_GREEN}****${MAGENTA}+-- ${BLACK}${BOLD} |___/|_| ${NC} ${MAGENTA} -**${BRIGHT_YELLOW}##${NEON_GREEN}**${MAGENTA}+------ ${BLACK}${BOLD} BEAM.${NC} ${MAGENTA} -##${NEON_GREEN}*+${BRIGHT_RED}---::::::: ${GRAY} =${GRAY}%%${NEON_GREEN}*+${BRIGHT_RED}=-:::::::::${GRAY} DECENTRALIZED OPERATING SYSTEM${NC} @@ -129,6 +129,7 @@ find "$DOCS_DIR" -maxdepth 1 -type f -name "*.md" -not -name "index.md" -not -na /^\* \[Description\]\(#description\)$/ { next; } /^\* \[Function Index\]\(#index\)$/ { next; } /^\* \[Function Details\]\(#functions\)$/ { next; } + /^\* \[Data Types\]\(#types\)$/ { next; } { print; } ' "$file" > "$TEMP_MODULE_FILE" @@ -288,9 +289,9 @@ log_step "Generating LLM context files" LLM_SUMMARY_FILE="$ROOT_DIR/docs/llms.txt" LLM_FULL_FILE="$ROOT_DIR/docs/llms-full.txt" DOC_DIRS=( - "$ROOT_DIR/docs/begin" + "$ROOT_DIR/docs/introduction" "$ROOT_DIR/docs/run" - "$ROOT_DIR/docs/guides" + "$ROOT_DIR/docs/build" "$ROOT_DIR/docs/devices" "$ROOT_DIR/docs/resources" ) diff --git a/docs/build/exposing-process-state.md b/docs/build/exposing-process-state.md index 64b3818de..a15e8985e 100644 --- a/docs/build/exposing-process-state.md +++ b/docs/build/exposing-process-state.md @@ -49,7 +49,7 @@ InitialSync = InitialSync or 'INCOMPLETE' -- Sync state on spawn/load if not already done if InitialSync == 'INCOMPLETE' then -- Send the relevant state variables to the patch device - Send({ Target = ao.id, device = 'patch@1.0', cache = { balances = Balances, totalsupply = TotalSupply } }) + Send({ device = 'patch@1.0', cache = { balances = Balances, totalsupply = TotalSupply } }) -- Update the flag to prevent re-syncing on subsequent executions InitialSync = 'COMPLETE' print("Initial state sync complete. Balances and TotalSupply patched.") @@ -75,7 +75,7 @@ Handlers.add( function (msg) local dataToPublish = "Some important state: " .. math.random() -- Expose 'currentstatus' key under the 'cache' path - Send({ Target = ao.id, device = 'patch@1.0', cache = { currentstatus = dataToPublish } }) + Send({ device = 'patch@1.0', cache = { currentstatus = dataToPublish } }) print("Published data to /cache/currentstatus") end ) diff --git a/docs/build/extending-hyperbeam.md b/docs/build/extending-hyperbeam.md index 72d3b6b5d..c033a8d93 100644 --- a/docs/build/extending-hyperbeam.md +++ b/docs/build/extending-hyperbeam.md @@ -55,19 +55,18 @@ Pre/post-processors allow you to intercept incoming requests *before* they reach Processors often involve checking specific conditions (like request path or headers) and then either: -a) Passing the request through unchanged. -b) Modifying the request/response message structure. -c) Returning an error or redirect. - -dThe guide on [Building Pre/Post-Processors](TODO:link-to-pre-post-processor-guide-once-available) provides a detailed example pattern, particularly focusing on exempting certain routes. +a. Passing the request through unchanged. +b. Modifying the request/response message structure. +c. Returning an error or redirect. + **Example Idea:** A preprocessor that automatically adds a timestamp tag to all incoming messages for a specific process. - + -## Approach 4: Custom Routing Strategies +## Approach 3: Custom Routing Strategies While `dev_router` provides basic strategies (round-robin, etc.), you could potentially implement a custom load balancing or routing strategy module that `dev_router` could be configured to use. This would involve understanding the interfaces expected by `dev_router`. @@ -75,7 +74,7 @@ While `dev_router` provides basic strategies (round-robin, etc.), you could pote ## Getting Started -1. **Familiarize Yourself:** Deeply understand Erlang/OTP and the HyperBEAM codebase (`src/` directory), especially `hb_ao.erl`, `hb_message.erl`, and existing `dev_*.erl` modules relevant to your idea. +1. **Familiarize Yourself:** Deeply understand Erlang/OTP and the HyperBEAM codebase (`src/` directory), especially [`hb_ao.erl`](../resources/source-code/hb_ao.md), [`hb_message.erl`](../resources/source-code/hb_message.md), and existing `dev_*.erl` modules relevant to your idea. 2. **Study Examples:** Look at simple devices like `dev_patch.erl` or more complex ones like `dev_process.erl` to understand patterns. 3. **Start Small:** Implement a minimal version of your idea first. 4. **Test Rigorously:** Use `rebar3 eunit` extensively. diff --git a/docs/build/get-started-building-on-ao-core.md b/docs/build/get-started-building-on-ao-core.md index 163c393eb..117b923da 100644 --- a/docs/build/get-started-building-on-ao-core.md +++ b/docs/build/get-started-building-on-ao-core.md @@ -19,17 +19,17 @@ The primary tool for interacting with AO and developing processes is `aos`, a co === "npm" ```bash - npm i -g https://get_ao.g8way.io + npm i -g https://get_ao.arweave.net ``` === "bun" ```bash - bun install -g https://get_ao.g8way.io + bun install -g https://get_ao.arweave.net ``` === "pnpm" ```bash - pnpm add -g https://get_ao.g8way.io + pnpm add -g https://get_ao.arweave.net ``` **Starting `aos`:** diff --git a/docs/build/serverless-decentralized-compute.md b/docs/build/serverless-decentralized-compute.md index 1e51df25a..cb1c589c6 100644 --- a/docs/build/serverless-decentralized-compute.md +++ b/docs/build/serverless-decentralized-compute.md @@ -13,7 +13,7 @@ Instead of deploying code to centralized servers, you deploy code *to* the Arwea 2. Fetch the associated WASM/Lua code from Arweave. 3. Execute the code using the relevant device ([`dev_wasm`](../resources/source-code/dev_wasm.md) or [`dev_lua`](../resources/source-code/dev_lua.md)), passing the message data and current state. 4. Update the process state based on the execution results. - + ## TEE Attestations (via [`~snp@1.0`](../resources/source-code/dev_snp.md)) diff --git a/docs/devices/json-at-1-0.md b/docs/devices/json-at-1-0.md index 94facd996..eec5e7985 100644 --- a/docs/devices/json-at-1-0.md +++ b/docs/devices/json-at-1-0.md @@ -2,63 +2,41 @@ ## Overview -The `json` device provides a structured way to access and interact with JSON (JavaScript Object Notation) data within the HyperBEAM environment. It allows processes to read, query, and potentially modify JSON objects stored or referenced by the device. It can also be used via HyperPATH chaining to serialize arbitrary data from other devices. +The [`~json@1.0`](../resources/source-code/dev_json_iface.md) device provides a mechanism to interact with JSON (JavaScript Object Notation) data structures using HyperPATHs. It allows treating a JSON document or string as a stateful entity against which HyperPATH queries can be executed. -**Status:** Experimental +This device is useful for: -## Core Functions (Keys) - -These keys are typically accessed via HyperPATHs relative to the device's mount point (e.g., `/data/myJson`) or used in HyperPATH chains. +* Serializing and deserializing JSON data. +* Querying and modifying JSON objects. +* Integrating with other devices and operations via HyperPATH chaining. -* **`GET //` (Read Action)** - * **Action:** Retrieves the data located at the specified `` within the device's *own* stored JSON structure. The device serializes the targeted JSON fragment (object, array, value) into a standard JSON string format. - * **Example:** `GET /data/myJson/user/settings` on the example JSON below would return the string `"{\"theme\":\"dark\",\"notifications\":true}"`. - * **HyperPATH:** Required. Specifies the target within the device's JSON data. +## Core Functions (Keys) -* **`//serialize` (Serialize Action)** - * **Action:** Takes arbitrary input data (piped from the `` segment of a HyperPATH chain) and returns its serialized JSON string representation. - * **Example:** `GET /~meta@1.0/info /~json@1.0/serialize` - fetches node info and then pipes it to this device to serialize the result as JSON. - * **HyperPATH:** This segment (`/serialize`) is appended to a previous HyperPATH segment. +### Serialization -* **`GET //query?` (Query Action)** - * **Action:** Performs a more complex query against the device's *own* stored JSON data using a specific query syntax (details TBD). - * **Example:** - * **HyperPATH:** Required. The base path to the device's data. - * **Query Parameter:** `?query=` (Syntax TBD). +* **`GET /~json@1.0/serialize` (Direct Serialize Action)** + * **Action:** Serializes the input message or data into a JSON string. + * **Example:** `GET /~json@1.0/serialize` - serializes the current message as JSON. + * **HyperPATH:** The path segment `/serialize` directly follows the device identifier. - +* **`GET //~json@1.0/serialize` (Chained Serialize Action)** + * **Action:** Takes arbitrary data output from `` (another device or operation) and returns its serialized JSON string representation. + * **Example:** `GET /~meta@1.0/info/~json@1.0/serialize` - fetches node info from the meta device and then pipes it to the JSON device to serialize the result as JSON. + * **HyperPATH:** This segment (`/~json@1.0/serialize`) is appended to a previous HyperPATH segment. -## Example JSON Data +## HyperPATH Chaining Example -Assuming `json` is mounted at `/data/myJson` and holds the following JSON: +The JSON device is particularly useful in HyperPATH chains to convert output from other devices into JSON format: -```json -{ - "user": { - "name": "Alice", - "id": 123, - "settings": { - "theme": "dark", - "notifications": true - } - }, - "items": [ - {"sku": "abc", "price": 10}, - {"sku": "def", "price": 20} - ] -} +``` +GET /~meta@1.0/info/~json@1.0/serialize ``` -**Access Examples:** - -- Get user name: `GET /data/myJson/user/name` -- Get theme setting: `GET /data/myJson/user/settings/theme` -- Get first item price: `GET /data/myJson/items[0]/price` - -## Events +This retrieves the node configuration from the meta device and serializes it to JSON. - +## See Also - +- [Message Device](../resources/source-code/dev_message.md) - Works well with JSON serialization +- [Meta Device](../resources/source-code/dev_meta.md) - Can provide configuration data to serialize [json module](../resources/source-code/dev_codec_json.md) \ No newline at end of file diff --git a/docs/devices/lua-at-5-3a.md b/docs/devices/lua-at-5-3a.md index 1c19eb556..4c961bca2 100644 --- a/docs/devices/lua-at-5-3a.md +++ b/docs/devices/lua-at-5-3a.md @@ -2,20 +2,18 @@ ## Overview -The `~lua@5.3a` device enables the execution of Lua scripts within an AO process. It utilizes the `luerl` Erlang library to provide a Lua 5.3 compatible environment. - -**Status:** Stable +The [`~lua@5.3a`](../resources/source-code/dev_lua.md) device enables the execution of Lua scripts within the HyperBEAM environment. It provides an isolated sandbox where Lua code can process incoming messages, interact with other devices, and manage state. ## Core Concept: Lua Script Execution -This device allows processes to perform computations defined in Lua scripts. Similar to the `~wasm64@1.0` device, it manages the lifecycle of a Lua execution state associated with the process. +This device allows processes to perform computations defined in Lua scripts. Similar to the [`~wasm64@1.0`](../resources/source-code/dev_wasm.md) device, it manages the lifecycle of a Lua execution state associated with the process. ## Key Functions (Keys) -These keys are typically used within an execution stack (managed by `dev_stack`) for an AO process. +These keys are typically used within an execution stack (managed by [`dev_stack`](../resources/source-code/dev_stack.md)) for an AO process. * **`init`** - * **Action:** Initializes the Lua environment for the process. It finds and loads the Lua script(s) associated with the process, creates a `luerl` state, applies sandboxing rules if specified, installs the `dev_lua_lib` (providing AO-specific functions like `ao.send`), and stores the initialized state in the process's private area (`priv/state`). + * **Action:** Initializes the Lua environment for the process. It finds and loads the Lua script(s) associated with the process, creates a `luerl` state, applies sandboxing rules if specified, installs the [`dev_lua_lib`](../resources/source-code/dev_lua_lib.md) (providing AO-specific functions like `ao.send`), and stores the initialized state in the process's private area (`priv/state`). * **Inputs (Expected in Process Definition or `init` Message):** * `script`: Can be: * An Arweave Transaction ID of the Lua script file. @@ -50,19 +48,19 @@ The `sandbox` option in the process definition restricts potentially harmful Lua ## AO Library (`dev_lua_lib`) -The `init` function automatically installs a helper library (`dev_lua_lib`) into the Lua state. This library typically provides functions for interacting with the AO environment from within the Lua script, such as: +The `init` function automatically installs a helper library ([`dev_lua_lib`](../resources/source-code/dev_lua_lib.md)) into the Lua state. This library typically provides functions for interacting with the AO environment from within the Lua script, such as: * `ao.send({ Target = ..., ... })`: To send messages from the process. * Access to message tags and data. ## Usage within `dev_stack` -Like `~wasm64@1.0`, the `~lua@5.3a` device is typically used within an execution stack. +Like [`~wasm64@1.0`](../resources/source-code/dev_wasm.md), the `~lua@5.3a` device is typically used within an execution stack. ```text # Example Process Definition Snippet Execution-Device: stack@1.0 -Execution-Stack: "scheduler@1.0", "lua@5.3a" +Execution-Stack: scheduler@1.0, lua@5.3a Script: Sandbox: true ``` diff --git a/docs/devices/message-at-1-0.md b/docs/devices/message-at-1-0.md index 828f6891f..000bdb860 100644 --- a/docs/devices/message-at-1-0.md +++ b/docs/devices/message-at-1-0.md @@ -4,14 +4,12 @@ The [`~message@1.0`](../resources/source-code/dev_message.md) device is a fundamental built-in device in HyperBEAM. It serves as the identity device for standard AO-Core messages, which are represented as Erlang maps internally. Its primary function is to allow manipulation and inspection of these message maps directly via HyperPATH requests, without needing a persistent process state. -**Status:** Stable - This device is particularly useful for: * Creating and modifying transient messages on the fly using query parameters. * Retrieving specific values from a message map. * Inspecting the keys of a message. -* Handling message commitments and verification (though often delegated to specialized commitment devices like `httpsig@1.0`). +* Handling message commitments and verification (though often delegated to specialized commitment devices like [`httpsig@1.0`](../resources/source-code/dev_codec_httpsig.md)). ## Core Functionality @@ -45,8 +43,8 @@ The `message@1.0` device reserves several keys for specific operations: * **`set_path`**: A special case for setting the `path` key itself, which cannot be done via the standard `set` operation. * **`remove`**: Removes one or more specified keys from the message. Requires an `item` or `items` parameter. * **`keys`**: Returns a list of all public (non-private) keys present in the message map. -* **`id`**: Calculates and returns the ID (hash) of the message. Considers active commitments based on specified `committers`. May delegate ID calculation to a device specified by the message's `id-device` key or the default (`httpsig@1.0`). -* **`commit`**: Creates a commitment (e.g., a signature) for the message. Requires parameters like `commitment-device` and potentially committer information. Delegates the actual commitment generation to the specified device (default `httpsig@1.0`). +* **`id`**: Calculates and returns the ID (hash) of the message. Considers active commitments based on specified `committers`. May delegate ID calculation to a device specified by the message\'s `id-device` key or the default ([`httpsig@1.0`](../resources/source-code/dev_codec_httpsig.md)). +* **`commit`**: Creates a commitment (e.g., a signature) for the message. Requires parameters like `commitment-device` and potentially committer information. Delegates the actual commitment generation to the specified device (default [`httpsig@1.0`](../resources/source-code/dev_codec_httpsig.md)). * **`committers`**: Returns a list of committers associated with the commitments in the message. Can be filtered by request parameters. * **`commitments`**: Used internally and in requests to filter or specify which commitments to operate on (e.g., for `id` or `verify`). * **`verify`**: Verifies the commitments attached to the message. Can be filtered by `committers` or specific `commitment` IDs in the request. Delegates verification to the device specified in each commitment (`commitment-device`). diff --git a/docs/devices/meta-at-1-0.md b/docs/devices/meta-at-1-0.md index 55b249b5a..b448a135b 100644 --- a/docs/devices/meta-at-1-0.md +++ b/docs/devices/meta-at-1-0.md @@ -2,9 +2,9 @@ ## Overview -The `~meta@1.0` device serves as the primary configuration and information endpoint for a HyperBEAM node. It's the default entry point for processing requests and allows querying or modifying the node's settings. +The [`~meta@1.0`](../resources/source-code/dev_meta.md) device provides access to metadata and configuration information about the local HyperBEAM node and the broader AO network. -**Status:** Stable +This device is essential for: ## Core Functions (Keys) @@ -30,7 +30,7 @@ While the `info` key is the primary interaction point, the `NodeMsg` managed by * `operator`: The address designated as the node operator (defaults to the address derived from `priv_wallet`). * `initialized`: Status indicating if the node setup is temporary or permanent. * `preprocessor` / `postprocessor`: Optional messages defining pre/post-processing logic for requests. -* `routes`: Routing table used by `dev_router`. +* `routes`: Routing table used by [`dev_router`](../resources/source-code/dev_router.md). * `store`: Configuration for data storage. * `trace`: Debug tracing options. * `p4_*`: Payment configuration. @@ -40,7 +40,7 @@ While the `info` key is the primary interaction point, the `NodeMsg` managed by ## Utility Functions (Internal/Module Level) -The `dev_meta.erl` module also contains helper functions used internally or callable from other Erlang modules: +The [`dev_meta.erl`](../resources/source-code/dev_meta.md) module also contains helper functions used internally or callable from other Erlang modules: * `is_operator(, ) -> boolean()`: Checks if the signer of `RequestMsg` matches the configured `operator` in `NodeMsg`. @@ -52,4 +52,4 @@ The `~meta` device applies the node's configured `preprocessor` message before r Before a node can process general requests, it usually needs to be initialized. Attempts to access devices other than `~meta@1.0/info` before initialization typically result in an error. Initialization often involves setting essential parameters like the operator key via a `POST` to `info`. -[meta module](../resources/source-code/dev_meta.md) +[meta module](../resources/source-code/dev_meta.md) \ No newline at end of file diff --git a/docs/devices/index.md b/docs/devices/overview.md similarity index 85% rename from docs/devices/index.md rename to docs/devices/overview.md index 981a6b7f4..5a9ce0d1b 100644 --- a/docs/devices/index.md +++ b/docs/devices/overview.md @@ -6,7 +6,7 @@ Each device listed here represents a specific capability available to AO process ## Available Devices -Below is a list of documented built-in devices. Each page details the device's purpose, status, available functions (keys), and usage examples where applicable. +Below is a list of documented built-in devices. Each page details the device's purpose, available functions (keys), and usage examples where applicable. * **[`~message@1.0`](./message-at-1-0.md):** Base message handling and manipulation. * **[`~meta@1.0`](./meta-at-1-0.md):** Node configuration and metadata. @@ -24,7 +24,3 @@ Below is a list of documented built-in devices. Each page details the device's p Devices are typically referenced using a name and version, like `~@` (e.g., `~process@1.0`). The tilde (`~`) often indicates a primary, user-facing device, while internal or utility devices might use a `dev_` prefix in the source code (e.g., `dev_router`). Versioning indicates the specific interface and behavior of the device. Changes to a device that break backward compatibility usually result in a version increment. - -## Status Indicators - -Each device page will include a status indicator (e.g., `Stable`, `Beta`, `Experimental`) to help you gauge its production readiness and likelihood of future changes. diff --git a/docs/devices/process-at-1-0.md b/docs/devices/process-at-1-0.md index 82d8103cc..090d5d6d0 100644 --- a/docs/devices/process-at-1-0.md +++ b/docs/devices/process-at-1-0.md @@ -2,17 +2,15 @@ ## Overview -The `~process@1.0` device provides the core abstraction for persistent, shared computations within AO, analogous to smart contracts in other systems but with greater flexibility. It orchestrates the interaction between scheduling, state management, and computation execution for a specific process instance. - -**Status:** Stable +The [`~process@1.0`](../resources/source-code/dev_process.md) device represents a persistent, shared execution environment within HyperBEAM, analogous to a process or actor in other systems. It allows for stateful computation and interaction over time. ## Core Concept: Orchestration A message tagged with `Device: process@1.0` (the "Process Definition Message") doesn't typically perform computation itself. Instead, it defines *which other devices* should be used for key aspects of its lifecycle: -* **Scheduler Device:** Determines the order of incoming messages (assignments) to be processed. (Defaults to `~scheduler@1.0`). -* **Execution Device:** Executes the actual computation based on the current state and the scheduled message. Often configured as `dev_stack` to allow multiple computational steps (e.g., running WASM, applying cron jobs, handling proofs). -* **Push Device:** Handles the injection of new messages into the process's schedule. (Defaults to `~push@1.0`). +* **Scheduler Device:** Determines the order of incoming messages (assignments) to be processed. (Defaults to [`~scheduler@1.0`](../resources/source-code/dev_scheduler.md)). +* **Execution Device:** Executes the actual computation based on the current state and the scheduled message. Often configured as [`dev_stack`](../resources/source-code/dev_stack.md) to allow multiple computational steps (e.g., running WASM, applying cron jobs, handling proofs). +* **Push Device:** Handles the injection of new messages into the process\'s schedule. (Defaults to [`~push@1.0`](../resources/source-code/dev_push.md)). The `~process@1.0` device acts as a router, intercepting requests and delegating them to the appropriate configured device (scheduler, executor, etc.) by temporarily swapping the device tag on the message before resolving. @@ -30,7 +28,7 @@ These keys are accessed via HyperPATHs relative to the Process Definition Messag * **`GET /~process@1.0/compute/`** * **Action:** Computes the process state up to a specific point identified by `` (either a slot number or a message ID within the schedule). It retrieves assignments from the Scheduler Device and applies them sequentially using the configured Execution Device. * **Response:** The process state message after executing up to the target slot/message. - * **Caching:** Results are cached aggressively (see `dev_process_cache`) to avoid recomputation. + * **Caching:** Results are cached aggressively (see [`dev_process_cache`](../resources/source-code/dev_process_cache.md)) to avoid recomputation. * **`GET /~process@1.0/now`** * **Action:** Computes and returns the `Results` key from the *latest* known state of the process. This typically involves computing all pending assignments. * **Response:** The value of the `Results` key from the final state. @@ -47,13 +45,13 @@ A typical process definition message might look like this (represented conceptua ```text Device: process@1.0 -Scheduler-Device: scheduler@1.0 -Execution-Device: stack@1.0 -Execution-Stack: "scheduler@1.0", "cron@1.0", "wasm64@1.0", "PoDA@1.0" +Scheduler-Device: [`scheduler@1.0`](../resources/source-code/dev_scheduler.md) +Execution-Device: [`stack@1.0`](../resources/source-code/dev_stack.md) +Execution-Stack: "[`scheduler@1.0`](../resources/source-code/dev_scheduler.md)", "[`cron@1.0`](../resources/source-code/dev_cron.md)", "[`wasm64@1.0`](../resources/source-code/dev_wasm.md)", "[`PoDA@1.0`](../resources/source-code/dev_poda.md)" Cron-Frequency: 10-Minutes WASM-Image: PoDA: - Device: PoDA/1.0 + Device: [`PoDA/1.0`](../resources/source-code/dev_poda.md) Authority: Authority: Quorum: 2 @@ -65,7 +63,7 @@ This defines a process that uses: ## State Management & Caching -`~process@1.0` relies heavily on caching (`dev_process_cache`) to optimize performance. Full state snapshots and intermediate results are cached periodically (configurable via `Cache-Frequency` and `Cache-Keys` options) to avoid recomputing the entire history for every request. +`~process@1.0` relies heavily on caching ([`dev_process_cache`](../resources/source-code/dev_process_cache.md)) to optimize performance. Full state snapshots and intermediate results are cached periodically (configurable via `Cache-Frequency` and `Cache-Keys` options) to avoid recomputing the entire history for every request. ## Initialization (`init`) diff --git a/docs/devices/relay-at-1-0.md b/docs/devices/relay-at-1-0.md index c5b213a58..9d432568c 100644 --- a/docs/devices/relay-at-1-0.md +++ b/docs/devices/relay-at-1-0.md @@ -2,9 +2,7 @@ ## Overview -The `~relay@1.0` device is responsible for forwarding messages (HTTP requests) from one HyperBEAM node to another node or to any external HTTP(S) endpoint. - -**Status:** Stable +The [`~relay@1.0`](../resources/source-code/dev_relay.md) device enables HyperBEAM nodes to send messages to external HTTP endpoints or other AO nodes. ## Core Concept: Message Forwarding @@ -31,7 +29,7 @@ This device acts as an HTTP client within the AO ecosystem. It allows a node or * **Inputs:** Same as `call`. * **Response:** `{ok, <<"OK">>}`. * **`preprocess`** - * **Action:** This function is designed to be used as a node's global `preprocessor` (configured via `~meta@1.0`). When configured, it intercepts *all* incoming requests to the node and automatically rewrites them to be relayed via the `call` key. This effectively turns the node into a pure forwarding proxy, using its routing table (`dev_router`) to determine the destination. + * **Action:** This function is designed to be used as a node's global `preprocessor` (configured via [`~meta@1.0`](../resources/source-code/dev_meta.md)). When configured, it intercepts *all* incoming requests to the node and automatically rewrites them to be relayed via the `call` key. This effectively turns the node into a pure forwarding proxy, using its routing table ([`dev_router`](../resources/source-code/dev_router.md)) to determine the destination. * **Response:** A message structure that invokes `/~relay@1.0/call` with the original request as the target body. ## Use Cases @@ -43,6 +41,6 @@ This device acts as an HTTP client within the AO ecosystem. It allows a node or ## Interaction with Routing -When `call` or `cast` is invoked, the actual HTTP request dispatch is handled by `hb_http:request/2`. This function often utilizes the node's routing configuration (`dev_router`) to determine the specific peer/URL to send the request to, especially if the target path is an AO process ID or another internal identifier rather than a full external URL. +When `call` or `cast` is invoked, the actual HTTP request dispatch is handled by `hb_http:request/2`. This function often utilizes the node's routing configuration ([`dev_router`](../resources/source-code/dev_router.md)) to determine the specific peer/URL to send the request to, especially if the target path is an AO process ID or another internal identifier rather than a full external URL. [relay module](../resources/source-code/dev_relay.md) diff --git a/docs/devices/scheduler-at-1-0.md b/docs/devices/scheduler-at-1-0.md index d3e6ebf7e..2922d699c 100644 --- a/docs/devices/scheduler-at-1-0.md +++ b/docs/devices/scheduler-at-1-0.md @@ -2,19 +2,33 @@ ## Overview -The `~scheduler@1.0` device is responsible for managing the order of message execution for an AO process. It maintains the list of pending messages (assignments) and provides them sequentially to the process's Execution Device. - -**Status:** Stable +The [`~scheduler@1.0`](../resources/source-code/dev_scheduler.md) device manages the queueing and ordering of messages targeted at a specific process ([`~process@1.0`](../resources/source-code/dev_process.md)). It ensures that messages are processed according to defined scheduling rules. ## Core Concept: Message Ordering -When messages are sent to an AO process (typically via the `~push@1.0` device or a `POST` to the process's `/schedule` endpoint), they are added to a queue managed by the Scheduler Device associated with that process. The scheduler ensures that messages are processed one after another in a deterministic order, typically based on arrival time and potentially other factors like message nonces or timestamps (depending on the specific scheduler implementation details). +When messages are sent to an AO process (typically via the [`~push@1.0`](../resources/source-code/dev_push.md) device or a `POST` to the process's `/schedule` endpoint), they are added to a queue managed by the Scheduler Device associated with that process. The scheduler ensures that messages are processed one after another in a deterministic order, typically based on arrival time and potentially other factors like message nonces or timestamps (depending on the specific scheduler implementation details). + +The [`~process@1.0`](../resources/source-code/dev_process.md) device interacts with its configured Scheduler Device (which defaults to `~scheduler@1.0`) primarily through the `next` key to retrieve the next message to be executed. + +## Slot System + +Slots are a fundamental concept in the `~scheduler@1.0` device, providing a structured mechanism for organizing and sequencing computation. + +* **Sequential Ordering:** Slots act as numbered containers (starting at 0) that hold specific messages or tasks to be processed in a deterministic order. +* **State Tracking:** The `at-slot` key in a process's state (or a similar internal field like `current-slot` within the scheduler itself) tracks execution progress, indicating which messages have been processed and which are pending. The `slot` function can be used to query this. +* **Assignment Storage:** Each slot contains an "assignment" - the cryptographically verified message waiting to be executed. These assignments are retrieved using the `schedule` function or internally via `next`. +* **Schedule Organization:** The collection of all slots for a process forms its "schedule". +* **Application Scenarios:** + * **Scheduling Messages:** When a message is posted to a process (e.g., via `register`), it's assigned to the next available slot. + * **Status Monitoring:** Clients can query a process's current slot (via the `slot` function) to check progress. + * **Task Retrieval:** Processes find their next task by requesting the next assignment via the `next` function, which implicitly uses the next slot number based on the current state. + * **Distributed Consistency:** Slots ensure deterministic execution order across nodes, crucial for maintaining consistency in AO. -The `~process@1.0` device interacts with its configured Scheduler Device (which defaults to `~scheduler@1.0`) primarily through the `next` key to retrieve the next message to be executed. +This slotting mechanism is central to AO processes built on HyperBEAM, allowing for deterministic, verifiable computation. ## Key Functions (Keys) -These keys are typically accessed via the `~process@1.0` device, which delegates the calls to its configured scheduler. +These keys are typically accessed via the [`~process@1.0`](../resources/source-code/dev_process.md) device, which delegates the calls to its configured scheduler. * **`schedule` (Handler for `GET /~process@1.0/schedule`)** * **Action:** Retrieves the list of pending assignments (messages) for the process. May support cursor-based traversal for long schedules. @@ -29,7 +43,7 @@ These keys are typically accessed via the `~process@1.0` device, which delegates * **`status` (Handler for `GET /~process@1.0/status`)** * **Action:** Retrieves status information about the scheduler for the process. * **Response:** A status message. -* **`next` (Internal Key used by `~process@1.0`)** +* **`next` (Internal Key used by [`~process@1.0`](../resources/source-code/dev_process.md))** * **Action:** Retrieves the next assignment message from the schedule based on the process's current `at-slot` state. * **State Management:** Requires the current process state (`Msg1`) containing the `at-slot` key. * **Response:** `{ok, #{ "body" => , "state" => }}` or `{error, Reason}` if no next assignment is found. @@ -41,8 +55,8 @@ These keys are typically accessed via the `~process@1.0` device, which delegates ## Interaction with Other Components -* **`~process@1.0`:** The primary user of the scheduler, calling `next` to drive process execution. -* **`~push@1.0`:** Often used to add messages to the schedule via `POST /schedule`. +* **[`~process@1.0`](../resources/source-code/dev_process.md):** The primary user of the scheduler, calling `next` to drive process execution. +* **[`~push@1.0`](../resources/source-code/dev_push.md):** Often used to add messages to the schedule via `POST /schedule`. * **`dev_scheduler_cache`:** Internal module used for caching assignments locally on the node to reduce latency. * **Scheduling Unit (SU):** Schedulers may interact with external entities (like Arweave gateways or dedicated SU nodes) to fetch or commit schedules, although `~scheduler@1.0` aims for a simpler, often node-local or SU-client model. diff --git a/docs/devices/wasm64-at-1-0.md b/docs/devices/wasm64-at-1-0.md index 2673dd2eb..492d86e40 100644 --- a/docs/devices/wasm64-at-1-0.md +++ b/docs/devices/wasm64-at-1-0.md @@ -2,9 +2,7 @@ ## Overview -The `~wasm64@1.0` device enables the execution of WebAssembly (WASM) code within an AO process, specifically targeting the WASM Memory64 specification. It uses `hb_beamr`, an Erlang wrapper for the WebAssembly Micro Runtime (WAMR), as its backend. - -**Status:** Stable +The [`~wasm64@1.0`](../resources/source-code/dev_wasm.md) device enables the execution of 64-bit WebAssembly (WASM) code within the HyperBEAM environment. It provides a sandboxed environment for running compiled code from various languages (like Rust, C++, Go) that target WASM. ## Core Concept: WASM Execution @@ -14,7 +12,7 @@ The device manages the lifecycle of a WASM instance associated with the process ## Key Functions (Keys) -These keys are typically used within an execution stack (managed by `dev_stack`) for an AO process. +These keys are typically used within an execution stack (managed by [`dev_stack`](../resources/source-code/dev_stack.md)) for an AO process. * **`init`** * **Action:** Initializes the WASM environment for the process. It locates the WASM image (binary), starts a WAMR instance, and stores the instance handle and helper functions (for reading/writing WASM memory) in the process's private state (`priv/...`). @@ -51,12 +49,12 @@ These keys are typically used within an execution stack (managed by `dev_stack`) ## Usage within `dev_stack` -The `~wasm64@1.0` device is almost always used as part of an execution stack configured in the Process Definition Message and managed by `dev_stack`. `dev_stack` ensures that `init` is called on the first pass, `compute` on subsequent passes, and potentially `snapshot` or `terminate` as needed. +The `~wasm64@1.0` device is almost always used as part of an execution stack configured in the Process Definition Message and managed by [`dev_stack`](../resources/source-code/dev_stack.md). [`dev_stack`](../resources/source-code/dev_stack.md) ensures that `init` is called on the first pass, `compute` on subsequent passes, and potentially `snapshot` or `terminate` as needed. ```text # Example Process Definition Snippet -Execution-Device: stack@1.0 -Execution-Stack: "scheduler@1.0", "wasm64@1.0" +Execution-Device: [`stack@1.0`](../resources/source-code/dev_stack.md) +Execution-Stack: "[`scheduler@1.0`](../resources/source-code/dev_scheduler.md)", "wasm64@1.0" WASM-Image: ``` diff --git a/docs/introduction/ao-devices.md b/docs/introduction/ao-devices.md index 71e87a209..818e0d42d 100644 --- a/docs/introduction/ao-devices.md +++ b/docs/introduction/ao-devices.md @@ -1,6 +1,6 @@ # AO Devices -In AO-Core and its implementation HyperBEAM, **Devices** are modular components responsible for processing and interpreting [Messages](./what-is-ao-core.md#messages). They define the specific logic for how computations are performed, data is handled, or interactions occur within the AO ecosystem. +In AO-Core and its implementation HyperBEAM, **Devices** are modular components responsible for processing and interpreting [Messages](./what-is-ao-core.md#core-concepts). They define the specific logic for how computations are performed, data is handled, or interactions occur within the AO ecosystem. Think of Devices as specialized engines or services that can be plugged into the AO framework. This modularity is key to AO's flexibility and extensibility. @@ -39,10 +39,22 @@ Devices are typically invoked via [HyperPATHs](./pathing-in-ao-core.md). The pat ``` # Example: Execute the 'now' key on the process device for a specific process -/PROCESS_ID~process@1.0/now +/~process@1.0/now # Example: Relay a GET request via the relay device /~relay@1.0/call?method=GET&path=https://example.com ``` -The specific functions or 'keys' available for each Device are documented individually. See the [Devices section](../devices/index.md) for details on specific built-in devices. \ No newline at end of file +The specific functions or 'keys' available for each Device are documented individually. See the [Devices section](../devices/index.md) for details on specific built-in devices. + +## The Potential of Devices + +The modular nature of AO Devices opens up vast possibilities for future expansion and innovation. The current set of preloaded and community devices is just the beginning. As the AO ecosystem evolves, we can anticipate the development of new devices catering to increasingly specialized needs: + +* **Specialized Hardware Integration:** Devices could be created to interface directly with specialized hardware accelerators like GPUs (for AI/ML tasks such as running large language models), TPUs, or FPGAs, allowing AO processes to leverage high-performance computing resources securely and verifiably. +* **Advanced Cryptography:** New devices could implement cutting-edge cryptographic techniques, such as zero-knowledge proofs (ZKPs) or fully homomorphic encryption (FHE), enabling enhanced privacy and complex computations on encrypted data. +* **Cross-Chain & Off-Chain Bridges:** Devices could act as secure bridges to other blockchain networks or traditional Web2 APIs, facilitating seamless interoperability and data exchange between AO and the wider digital world. +* **AI/ML Specific Devices:** Beyond raw GPU access, specialized devices could offer higher-level AI/ML functionalities, like optimized model inference engines or distributed training frameworks. +* **Domain-Specific Logic:** Communities or organizations could develop devices tailored to specific industries or use cases, such as decentralized finance (DeFi) primitives, scientific computing libraries, or decentralized identity management systems. + +The Device framework ensures that AO can adapt and grow, incorporating new technologies and computational paradigms without requiring fundamental changes to the core protocol. This extensibility is key to AO's long-term vision of becoming a truly global, decentralized computer. diff --git a/docs/introduction/pathing-in-ao-core.md b/docs/introduction/pathing-in-ao-core.md index 334d53834..2c1a82be4 100644 --- a/docs/introduction/pathing-in-ao-core.md +++ b/docs/introduction/pathing-in-ao-core.md @@ -38,11 +38,11 @@ Under the surface, these keys represent AO-Core messages. As we progress through ### State Navigation -You can browse through sub-messages and data fields by accessing them as keys. For example, if a process stores its interaction count in a field named `at-slot`, you can access it like this: +You can browse through sub-messages and data fields by accessing them as keys. For example, if a process stores its interaction count in a field named `cache`, you can access it like this: ``` -/~process@1.0/compute/at-slot +/~process@1.0/compute/cache ``` -This shows the latest 'slot' (number of interactions) of your process. Each response is: +This shows the 'cache' of your process. Each response is: - A message with a signature attesting to its correctness - A hashpath describing its generation @@ -58,67 +58,42 @@ Beyond path segments, HyperBEAM URLs can include query parameters that utilize a This powerful feature enables the expression of complex data structures directly in URLs. -**Example:** - -Consider the following URL: - -``` -GET /~message@1.0&name="Alice"&age+integer=30&items+list="apple",1,"banana"&config+map=key1="val1";key2=true/[PATH] -``` - -HyperBEAM processes this as follows: - -1. Root Device: `~message@1.0` -2. Query Parameters: - * `name="Alice"`: Key `name`, implicit type `binary`, value `<<"Alice">>`. - * `age+integer=30`: Key `age`, type `integer`, value `30`. - * `items+list="apple",1,"banana"`: Key `items`, type `list`, value `[<<"apple">>, 1, <<"banana">>]` (parsed using [Structured Fields](https://www.rfc-editor.org/rfc/rfc8941.html)). - * `config+map=key1="val1";key2=true`: Key `config`, type `map`, value `#{<<"key1">> => <<"val1">>, <<"key2">> => true}` (parsed using [Structured Fields](https://www.rfc-editor.org/rfc/rfc8941.html)). -3. Initial Message Map: `#{ <<"name">> => <<"Alice">>, <<"age">> => 30, <<"items">> => [<<"apple">>, 1, <<"banana">>], <<"config">> => #{<<"key1">> => <<"val1">>, <<"key2">> => true} }` - -Depending on the `[PATH]` provided: - -* **If `[PATH]` is `/items/1`:** - * The path instructs the `~message@1.0` device to access the key `items` and then the element at index `1` (0-indexed internally, so the *second* element). - * Response: The integer `1`. - -* **If `[PATH]` is `/config/key1`:** - * The path instructs the `~message@1.0` device to access the key `config` (which resolves to the map `#{<<"key1">> => <<"val1">>, <<"key2">> => true}`), and then access the key `key1` within that map. - * Response: The binary `<<"val1">>`. - ## Examples +The following examples illustrate using HyperPATH with various AO-Core processes and devices. While these cover a few specific use cases, HyperBEAM's extensible nature allows interaction with any device or process via HyperPATH. For a deeper understanding, we encourage exploring the [source code](https://github.com/permaweb/hyperbeam) and experimenting with different paths. + ### Example 1: Accessing Full Process State -To get the complete, real-time state of a process identified by ``, use the `/now` path component with the `~process@1.0` device: +To get the complete, real-time state of a process identified by ``, use the `/now` path component with the [`~process@1.0`](../devices/process-at-1-0.md) device: ``` GET /~process@1.0/now ``` -This instructs the AO-Core node to load the process and execute the `now` function on the `~process@1.0` device. +This instructs the AO-Core node to load the process and execute the `now` function on the [`~process@1.0`](../devices/process-at-1-0.md) device. ### Example 2: Navigating to Specific Process Data If a process maintains its state in a map and you want to access a specific field, like `at-slot`, using the faster `/compute` endpoint: ``` -GET /~process@1.0/compute/at-slot +GET /~process@1.0/compute/cache ``` -This accesses the `compute` key on the `~process@1.0` device and then navigates to the `at-slot` key within the resulting state map. +This accesses the `compute` key on the [`~process@1.0`](../devices/process-at-1-0.md) device and then navigates to the `cache` key within the resulting state map. Using this path, you will see the latest 'cache' of your process (the number of interactions it has received). Every piece of relevant information about your process can be accessed similarly, effectively providing a native API. + (Note: This represents direct navigation within the process state structure. For accessing data specifically published via the `~patch@1.0` device, see the documentation on [Exposing Process State](../build/exposing-process-state.md), which typically uses the `/cache/` path.) ### Example 3: Basic `~message@1.0` Usage -Here's a simpler use of `~message@1.0` to create a message and retrieve a value: +Here's a simple example of using [`~message@1.0`](../devices/message-at-1-0.md) to create a message and retrieve a value: ``` GET /~message@1.0&greeting="Hello"&count+integer=42/count ``` 1. **Base:** `/` - The base URL of the HyperBEAM node. -2. **Root Device:** `~message@1.0` +2. **Root Device:** [`~message@1.0`](../devices/message-at-1-0.md) 3. **Query Params:** `greeting="Hello"` (binary) and `count+integer=42` (integer), forming the message `#{ <<"greeting">> => <<"Hello">>, <<"count">> => 42 }`. 4. **Path:** `/count` tells `~message@1.0` to retrieve the value associated with the key `count`. @@ -126,7 +101,7 @@ GET /~message@1.0&greeting="Hello"&count+integer=42/count ### Example 4: Using the `~message@1.0` Device with Type Casting -The `~message@1.0` device can be used to construct and query transient messages, utilizing type casting in query parameters. +The [`~message@1.0`](../devices/message-at-1-0.md) device can be used to construct and query transient messages, utilizing type casting in query parameters. Consider the following URL: @@ -136,14 +111,15 @@ GET /~message@1.0&name="Alice"&age+integer=30&items+list="apple",1,"banana"&conf HyperBEAM processes this as follows: -1. Root Device: `~message@1.0` -2. Query Parameters (with type casting): +1. **Base:** `/` - The base URL of the HyperBEAM node. +2. **Root Device:** [`~message@1.0`](../devices/message-at-1-0.md) +3. **Query Parameters (with type casting):** * `name="Alice"` -> `#{ <<"name">> => <<"Alice">> }` (binary) * `age+integer=30` -> `#{ <<"age">> => 30 }` (integer) * `items+list="apple",1,"banana"` -> `#{ <<"items">> => [<<"apple">>, 1, <<"banana">>] }` (list) * `config+map=key1="val1";key2=true` -> `#{ <<"config">> => #{<<"key1">> => <<"val1">>, <<"key2">> => true} }` (map) -3. Initial Message Map: A combination of the above key-value pairs. -4. Path Evaluation: +4. **Initial Message Map:** A combination of the above key-value pairs. +5. **Path Evaluation:** * If `[PATH]` is `/items/1`, the response is the integer `1`. * If `[PATH]` is `/config/key1`, the response is the binary `<<"val1">>`. diff --git a/docs/introduction/what-is-ao-core.md b/docs/introduction/what-is-ao-core.md index 89133f9c4..11e59a6bc 100644 --- a/docs/introduction/what-is-ao-core.md +++ b/docs/introduction/what-is-ao-core.md @@ -19,4 +19,4 @@ AO-Core revolves around three fundamental components: AO-Core transforms the permanent data storage of Arweave into a global, shared computation space, enabling the creation of complex, autonomous, and scalable decentralized applications. -*See also: [The AO-Core Protocol Specification (Draft)](https://github.com/permaweb/ao-core/blob/main/spec.md)* \ No newline at end of file + \ No newline at end of file diff --git a/docs/introduction/what-is-hyperbeam.md b/docs/introduction/what-is-hyperbeam.md index 481b8d8eb..3d9f669c4 100644 --- a/docs/introduction/what-is-hyperbeam.md +++ b/docs/introduction/what-is-hyperbeam.md @@ -1,21 +1,39 @@ # What is HyperBEAM? -HyperBEAM is the primary, production-ready implementation of the [AO-Core protocol](./what-is-ao-core.md), written in Erlang/OTP. It functions as a decentralized operating system, providing the runtime environment and necessary services to execute AO-Core computations across a network of distributed nodes. +HyperBEAM is the primary, production-ready implementation of the [AO-Core protocol](./what-is-ao-core.md), built on the robust Erlang/OTP framework. It serves as a decentralized operating system, powering the AO Computer—a scalable, trust-minimized, distributed supercomputer built on permanent storage. HyperBEAM provides the runtime environment and essential services to execute AO-Core computations across a network of distributed nodes. -## Key Roles +## Why HyperBEAM Matters -* **AO-Core Implementation:** HyperBEAM translates the abstract concepts of AO-Core (Messages, Devices, Paths) into a concrete, runnable system. -* **Decentralized OS:** It provides the necessary infrastructure for nodes to participate in the AO network, manage resources, execute computations, and communicate with each other. -* **Erlang/OTP Foundation:** Built on the highly concurrent, fault-tolerant BEAM virtual machine, HyperBEAM inherits robustness, scalability, and features ideal for distributed systems, such as lightweight processes and message passing. -* **Hardware Abstraction:** HyperBEAM allows AO computations to run independently of the underlying hardware, enabling a diverse network of nodes to contribute resources. -* **Node Coordination:** It manages how individual nodes join the network, offer services (by running specific Devices), and interact with each other. +HyperBEAM transforms the abstract concepts of AO-Core—such as [Messages](./what-is-ao-core.md#core-concepts), [Devices](./what-is-ao-core.md#core-concepts), and [Paths](./what-is-ao-core.md#core-concepts)—into a concrete, operational system. Here's why it's pivotal to the AO ecosystem: + +- **Modularity via Devices:** HyperBEAM introduces a uniquely modular architecture centered around [Devices](./ao-devices.md). These pluggable components define specific computational logic (like running WASM, managing state, or relaying data), allowing for unprecedented flexibility, specialization, and extensibility in a decentralized system. +- **Decentralized OS:** It equips nodes with the infrastructure to join the AO network, manage resources, execute computations, and communicate seamlessly. +- **Erlang/OTP Powerhouse:** Leveraging the BEAM virtual machine, HyperBEAM inherits Erlang's concurrency, fault tolerance, and scalability—perfect for distributed systems with lightweight processes and message passing. +- **Hardware Independence:** It abstracts underlying hardware, allowing diverse nodes to contribute resources without compatibility issues. +- **Node Coordination:** It governs how nodes join the network, offer services through specific Devices, and interact with one another. +- **Verifiable Computation:** Through hashpaths and the Converge Protocol, HyperBEAM ensures computation results are cryptographically verified and trustworthy. + +In essence, HyperBEAM is the engine that drives the AO Computer, enabling a vision of decentralized, verifiable computing at scale. ## Core Components & Features -* **Preloaded Devices:** HyperBEAM comes with a suite of built-in devices (e.g., `~meta`, `~relay`, `~process`, `~scheduler`, `~wasm64`) providing essential functionalities for node operation, computation, and communication. -* **HTTP Interface:** Nodes expose an HTTP server, allowing interaction via standard web requests and HyperPATHs. -* **Modularity:** Its design allows for easy extension and addition of new devices and functionalities. -* **Developer Tooling:** Includes tools for testing (`rebar3 eunit`), debugging (`?event` logging), and profiling (`eflame`). +- **Pluggable Devices:** The heart of HyperBEAM's extensibility. It includes essential built-in devices like [`~meta`](../devices/meta-at-1-0.md), [`~relay`](../devices/relay-at-1-0.md), [`~process`](../devices/process-at-1-0.md), [`~scheduler`](../devices/scheduler-at-1-0.md), and [`~wasm64`](../devices/wasm64-at-1-0.md) for core functionality, but the system is designed for easy addition of new custom devices. +- **Message System:** Everything in HyperBEAM is a "Message"—a map of named functions or binary data that can be processed, transformed, and cryptographically verified. +- **HTTP Interface:** Nodes expose an HTTP server for interaction via standard web requests and HyperPATHs, structured URLs that represent computation paths. +- **Modularity:** Its design supports easy extension, allowing new devices and functionalities to be added effortlessly. + +## Architecture + +* **Initialization Flow:** When a HyperBEAM node starts, it initializes the name service, scheduler registry, timestamp server, and HTTP server, establishing core services for process management, timing, communication, and storage. +* **Compute Model:** Computation follows the pattern \`Message1(Message2) => Message3\`, where messages are resolved through their devices and [paths](./pathing-in-ao-core.md). The integrity and history of these computations are ensured by **hashpaths**, which serves as a cryptographic audit trail. +* **Scheduler System:** The scheduler component manages execution order using ["slots"](../devices/scheduler-at-1-0.md#slot-system) — sequential positions that guarantee deterministic computation. +* **Process Slots:** Each process has numbered slots starting from 0 that track message execution order, ensuring consistent computation even across distributed nodes. + +## HTTP API and Pathing + +HyperBEAM exposes a powerful HTTP API that allows for interacting with processes and accessing data through structured URL patterns. We call URLs that represent computation paths "HyperPATHs". The URL bar effectively functions as a command-line interface for AO's trustless and verifiable compute. + +For a comprehensive guide on constructing and interpreting paths in AO-Core, including detailed examples and best practices, see [Pathing in AO-Core](./pathing-in-ao-core.md). In essence, HyperBEAM is the engine that powers the AO Computer, enabling the vision of a scalable, trust-minimized, decentralized supercomputer built on permanent storage. diff --git a/docs/js/custom-nav.js b/docs/js/custom-nav.js deleted file mode 100644 index ff3c8f621..000000000 --- a/docs/js/custom-nav.js +++ /dev/null @@ -1,23 +0,0 @@ -document.addEventListener('DOMContentLoaded', () => { - // Select all links within the main navigation tabs - const tabLinks = document.querySelectorAll('nav.md-tabs .md-tabs__list .md-tabs__item a'); - - tabLinks.forEach(link => { - link.addEventListener('click', function(event) { - // Basic check if it's an internal link (avoids issues if external links are ever added) - // You might refine this check based on your site structure if needed. - if (link.hostname === window.location.hostname || !link.hostname.length) { - - console.log('Tab clicked, forcing full reload for:', link.href); // Optional: for debugging - - // Prevent the default navigation behavior AND stop the event from bubbling up - // to the theme's instant navigation handler. - event.preventDefault(); - event.stopPropagation(); - - // Force a full page load - window.location.href = link.href; - } - }, true); // Use capture phase to try and catch the event before the theme's handler - }); -}); \ No newline at end of file diff --git a/docs/js/header-scroll.js b/docs/js/header-scroll.js new file mode 100644 index 000000000..1a27e8e35 --- /dev/null +++ b/docs/js/header-scroll.js @@ -0,0 +1,91 @@ +document.addEventListener('DOMContentLoaded', function() { + const header = document.querySelector('.md-header'); + const paddingTargetElement = document.querySelector('.md-content'); // Element for padding adjustments + const contentVisibilityTargetElement = document.querySelector('.md-main__inner.md-grid'); // Element to hide/show with transition + + if (!header || !paddingTargetElement || !contentVisibilityTargetElement) { + console.error('Header scroll: Required elements (.md-header, .md-content, or .md-main__inner.md-grid) not found.'); + return; + } + + const HIDING_CLASS = 'content--initializing'; + + // Function to inject CSS for transition and initial hiding + function injectTransitionStyles() { + const styleId = 'md-content-transition-style'; + if (document.getElementById(styleId)) { + return; // Style already added + } + const styleElement = document.createElement('style'); + styleElement.id = styleId; + styleElement.textContent = ` + .md-main__inner.md-grid { /* Style for the element to be shown with transition */ + opacity: 0; + transition: opacity 200ms ease-in-out; /* Tiny transition */ + } + .${HIDING_CLASS} { /* Class to initially hide the content */ + display: none !important; + opacity: 0 !important; /* Ensure opacity is 0 when hidden */ + } + `; + document.head.appendChild(styleElement); + } + + // Initially hide the content and set up for transition + injectTransitionStyles(); + contentVisibilityTargetElement.classList.add(HIDING_CLASS); + + let headerHeight = 0; + + // Function to update paddings based on header state + function updatePaddings() { + const currentHeaderHeight = header.offsetHeight; + if (currentHeaderHeight > 0) { + headerHeight = currentHeaderHeight; + } + + if (header.classList.contains('header-hidden')) { + if (paddingTargetElement) paddingTargetElement.style.paddingTop = '75px'; + document.documentElement.style.scrollPaddingTop = '0'; + } else { + if (paddingTargetElement) paddingTargetElement.style.paddingTop = headerHeight + 'px'; + document.documentElement.style.scrollPaddingTop = headerHeight + 'px'; + } + } + + // Function to initialize header state and reveal content + function initializeHeaderState() { + headerHeight = header.offsetHeight; + updatePaddings(); // Apply padding to paddingTargetElement + + // Make content displayable (it's still opacity 0 due to injected styles) + contentVisibilityTargetElement.classList.remove(HIDING_CLASS); + + // Trigger the opacity transition to fade in the content + requestAnimationFrame(() => { + contentVisibilityTargetElement.style.opacity = 1; + }); + } + + window.addEventListener('load', initializeHeaderState); + + window.addEventListener('scroll', function() { + const scrollTop = window.scrollY || document.documentElement.scrollTop; + + if (scrollTop > 0) { // When scrolling down / header should be hidden + if (!header.classList.contains('header-hidden')) { + header.classList.add('header-hidden'); + updatePaddings(); + } + } else { // When at the top / header should be visible + if (header.classList.contains('header-hidden')) { + header.classList.remove('header-hidden'); + updatePaddings(); + } + } + }); + + window.addEventListener('resize', function() { + updatePaddings(); + }); +}); \ No newline at end of file diff --git a/docs/js/toc-highlight.js b/docs/js/toc-highlight.js index 2d95f3532..1d4e1416d 100644 --- a/docs/js/toc-highlight.js +++ b/docs/js/toc-highlight.js @@ -1,49 +1,124 @@ -document.addEventListener('DOMContentLoaded', function() { - function updateTocHighlight() { - // Remove existing active classes from ToC links - const tocLinks = document.querySelectorAll('.md-nav--secondary .md-nav__link'); - tocLinks.forEach(link => { - link.classList.remove('md-nav__link--active'); +document.addEventListener("DOMContentLoaded", function () { + /** + * Fixes navigation highlighting in MkDocs Material Theme: + * 1. If a list item has both an active label and an active link, remove active from label + * 2. If a parent item has active children, remove active from the parent's links + * 3. When scroll position is at the top, reactivate the parent navigation item + */ + function fixNavigationHighlighting() { + // First fix case where both label and anchor in same item are active + document.querySelectorAll(".md-nav__item").forEach(function (item) { + const label = item.querySelector("label.md-nav__link--active"); + const link = item.querySelector("a.md-nav__link--active"); + + // If both exist in the same item, keep only the link active + if (label && link) { + label.classList.remove("md-nav__link--active"); + } }); - // Get the current hash, decoded - const currentHash = window.location.hash; - if (currentHash) { - try { - const decodedHash = decodeURIComponent(currentHash); - // Find the ToC link that matches the current hash - // We specifically target links within the ToC nav (.md-nav--secondary) - const targetLink = document.querySelector(`.md-nav--secondary .md-nav__link[href$="${decodedHash}"]`); - - if (targetLink) { - targetLink.classList.add('md-nav__link--active'); - // console.log('TOC highlight added to:', targetLink.href); // For debugging - } else { - // console.log('TOC highlight: No link found for hash:', decodedHash); // For debugging + // Check if scroll position is at the top + const atTop = window.scrollY === 0; + + // Now fix nested navigation (parent sections shouldn't be active when children are, unless at top) + document + .querySelectorAll(".md-nav__item--active") + .forEach(function (activeItem) { + // Check if this active item contains other active items + const hasActiveChildren = activeItem.querySelector( + ".md-nav__link--active", + ); + + // console.log("Has Active Children:", hasActiveChildren); + + if (hasActiveChildren && !atTop) { + // Remove active class from parent's links + const parentLinks = activeItem.querySelectorAll( + ":scope > a.md-nav__link--active, :scope > label.md-nav__link--active", + ); + parentLinks.forEach(function (link) { + link.classList.remove("md-nav__link--active"); + }); + } else if (!hasActiveChildren && atTop) { + // Reactivate parent link if at top and no active children + const parentLinks = activeItem.querySelectorAll( + ":scope > a.md-nav__link, :scope > label.md-nav__link", + ); + parentLinks.forEach(function (link) { + link.classList.add("md-nav__link--active"); + }); } - } catch (e) { - console.error("Error decoding hash for TOC highlight:", e); - } - } + }); } - // Run the function on initial page load - updateTocHighlight(); + // Initial run + fixNavigationHighlighting(); + + // Set up a mutation observer to detect changes + const observer = new MutationObserver(function (mutations) { + let shouldUpdate = false; + + for (const mutation of mutations) { + if ( + mutation.type === "attributes" && + mutation.attributeName === "class" && + (mutation.target.classList.contains("md-nav__link--active") || + mutation.target.classList.contains("md-nav__item--active")) + ) { + shouldUpdate = true; + break; + } + } - // Run the function whenever the hash changes (for same-page navigation) - window.addEventListener('hashchange', updateTocHighlight); + if (shouldUpdate) { + fixNavigationHighlighting(); + } + }); - // Compatibility with MkDocs Material Instant Loading: - // Subscribe to the document$ observable which emits after instant loading completes. - if (typeof document$ !== 'undefined') { - document$.subscribe(function() { - // Use a small timeout to ensure the DOM is fully updated after navigation - setTimeout(updateTocHighlight, 50); + // Observe all navigation elements + document + .querySelectorAll(".md-nav__item, .md-nav__link") + .forEach(function (el) { + observer.observe(el, { attributes: true }); }); - } else { - console.warn("MkDocs Material 'document$' observable not found. Instant loading TOC highlighting might not work."); - // Fallback or alternative logic could be placed here if needed, - // but relying on document$ is the primary method for instant loading. - } -}); \ No newline at end of file + // Update on navigation events + window.addEventListener("popstate", function () { + setTimeout(fixNavigationHighlighting, 100); + }); + + window.addEventListener("load", fixNavigationHighlighting); + + // Update on scroll with throttling + let scrollTimeout; + window.addEventListener("scroll", function () { + if (!scrollTimeout) { + scrollTimeout = setTimeout(function () { + fixNavigationHighlighting(); + scrollTimeout = null; + }, 50); + } + }); + + document.addEventListener("click", function (e) { + if (e.target.closest(".md-nav__link")) { + setTimeout(fixNavigationHighlighting, 50); + } + }); + + // Add click event handling for navigation tabs + const tabLinks = document.querySelectorAll('nav.md-tabs .md-tabs__list .md-tabs__item a'); + tabLinks.forEach(link => { + link.addEventListener('click', function(event) { + // Basic check if it's an internal link + if (link.hostname === window.location.hostname || !link.hostname.length) { + console.log('Tab clicked, forcing full reload for:', link.href); + console.log('Updating navigation highlighting before navigation'); + fixNavigationHighlighting(); + event.preventDefault(); + event.stopPropagation(); + window.location.href = link.href; + } + }, true); // Use capture phase to catch the event before the theme's handler + }); +}); diff --git a/docs/llms-full.txt b/docs/llms-full.txt index 8e51876a0..29c722720 100644 --- a/docs/llms-full.txt +++ b/docs/llms-full.txt @@ -1,102 +1,467 @@ -Generated: 2025-05-02T14:36:17Z +Generated: 2025-05-15T13:32:25Z ---- START OF FILE: docs/devices/index.md --- -# Devices +--- START OF FILE: docs/build/exposing-process-state.md --- +# Exposing Process State with the Patch Device -Devices are the core functional units within HyperBEAM and AO-Core. They define how messages are processed and what actions can be performed. +The [`~patch@1.0`](../resources/source-code/dev_patch.md) device provides a mechanism for AO processes to expose parts of their internal state, making it readable via direct HTTP GET requests along the process's HyperPATH. -Each device listed here represents a specific capability available to AO processes and nodes. Understanding these devices is key to building complex applications and configuring your HyperBEAM node effectively. +## Why Use the Patch Device? -## Available Devices +Standard AO process execution typically involves sending a message to a process, letting it compute, and then potentially reading results from its outbox or state after the computation is scheduled and finished. This is asynchronous. -Below is a list of documented built-in devices. Each page details the device's purpose, status, available functions (keys), and usage examples where applicable. +The `patch` device allows for a more direct, synchronous-like read pattern. A process can use it to "patch" specific data elements from its internal state into a location that becomes directly accessible via a HyperPATH GET request *before* the full asynchronous scheduling might complete. -* **[`~message@1.0`](./message-at-1-0.md):** Base message handling and manipulation. -* **[`~meta@1.0`](./meta-at-1-0.md):** Node configuration and metadata. -* **[`~process@1.0`](./process-at-1-0.md):** Persistent, shared process execution environment. -* **[`~scheduler@1.0`](./scheduler-at-1-0.md):** Message scheduling and execution ordering for processes. -* **[`~wasm64@1.0`](./wasm64-at-1-0.md):** WebAssembly (WASM) execution engine. -* **[`~lua@5.3a`](./lua-at-5-3a.md):** Lua script execution engine. -* **[`~relay@1.0`](./relay-at-1-0.md):** Relaying messages to other nodes or HTTP endpoints. -* **[`~json@1.0`](./json-at-1-0.md):** Provides access to JSON data structures using HyperPATHs. +This is particularly useful for: -*(More devices will be documented here as specifications are finalized and reviewed.)* +* **Web Interfaces:** Building frontends that need to quickly read specific data points from an AO process without waiting for a full message round-trip. +* **Data Feeds:** Exposing specific metrics or state variables for monitoring or integration with other systems. +* **Caching:** Allowing frequently accessed data to be retrieved efficiently via simple HTTP GETs. -## Device Naming and Versioning +## How it Works -Devices are typically referenced using a name and version, like `~@` (e.g., `~process@1.0`). The tilde (`~`) often indicates a primary, user-facing device, while internal or utility devices might use a `dev_` prefix in the source code (e.g., `dev_router`). +1. **Process Logic:** Inside your AO process code (e.g., in Lua or WASM), when you want to expose data, you construct an **Outbound Message** targeted at the [`~patch@1.0`](../resources/source-code/dev_patch.md) device. +2. **Patch Message Format:** This outbound message typically includes tags that specify: + * `device = 'patch@1.0'` + * A `cache` tag containing a table. The **keys** within this table become the final segments in the HyperPATH used to access the data, and the **values** are the data itself. + * Example Lua using `aos`: `Send({ Target = ao.id, device = 'patch@1.0', cache = { mydatakey = MyValue } })` +3. **HyperBEAM Execution:** When HyperBEAM executes the process schedule and encounters this outbound message: + * It invokes the `dev_patch` module. + * `dev_patch` inspects the message. + * It takes the keys from the `cache` table (`mydatakey` in the example) and their associated values (`MyValue`) and makes these values available under the `/cache/` path segment. +4. **HTTP Access:** You (or any HTTP client) can now access this data directly using a GET request: + ``` + GET /~process@1.0/compute/cache/ + # Or potentially using /now/ + GET /~process@1.0/now/cache/ + ``` + The HyperBEAM node serving the request will resolve the path up to `/compute/cache` (or `/now/cache`), then use the logic associated with the patched data (`mydatakey`) to return the `MyValue` directly. -Versioning indicates the specific interface and behavior of the device. Changes to a device that break backward compatibility usually result in a version increment. +## Initial State Sync (Optional) -## Status Indicators +It can be beneficial to expose the initial state of your process via the `patch` device as soon as the process is loaded or spawned. This makes key data points immediately accessible via HTTP GET requests without requiring an initial interaction message to trigger a `Send` to the patch device. -Each device page will include a status indicator (e.g., `Stable`, `Beta`, `Experimental`) to help you gauge its production readiness and likelihood of future changes. +This pattern typically involves checking a flag within your process state to ensure the initial sync only happens once. Here's an example from the Token Blueprint, demonstrating how to sync `Balances` and `TotalSupply` right after the process starts: ---- END OF FILE: docs/devices/index.md --- +```lua +-- Place this logic at the top level of your process script, +-- outside of specific handlers, so it runs on load. ---- START OF FILE: docs/devices/json-at-1-0.md --- -# Device: ~json@1.0 +-- Initialize the sync flag if it doesn't exist +InitialSync = InitialSync or 'INCOMPLETE' -## Overview +-- Sync state on spawn/load if not already done +if InitialSync == 'INCOMPLETE' then + -- Send the relevant state variables to the patch device + Send({ device = 'patch@1.0', cache = { balances = Balances, totalsupply = TotalSupply } }) + -- Update the flag to prevent re-syncing on subsequent executions + InitialSync = 'COMPLETE' + print("Initial state sync complete. Balances and TotalSupply patched.") +end +``` -The `json` device provides a structured way to access and interact with JSON (JavaScript Object Notation) data within the HyperBEAM environment. It allows processes to read, query, and potentially modify JSON objects stored or referenced by the device. It can also be used via HyperPATH chaining to serialize arbitrary data from other devices. +**Explanation:** -**Status:** Experimental +1. `InitialSync = InitialSync or 'INCOMPLETE'`: This line ensures the `InitialSync` variable exists in the process state, initializing it to `'INCOMPLETE'` if it's the first time the code runs. +2. `if InitialSync == 'INCOMPLETE' then`: The code proceeds only if the initial sync hasn't been marked as complete. +3. `Send(...)`: The relevant state (`Balances`, `TotalSupply`) is sent to the `patch` device, making it available under `/cache/balances` and `/cache/totalsupply`. +4. `InitialSync = 'COMPLETE'`: The flag is updated, so this block won't execute again in future message handlers within the same process lifecycle. -## Core Functions (Keys) +This ensures that clients or frontends can immediately query essential data like token balances as soon as the process ID is known, improving the responsiveness of applications built on AO. -These keys are typically accessed via HyperPATHs relative to the device's mount point (e.g., `/data/myJson`) or used in HyperPATH chains. +## Example (Lua in `aos`) -* **`GET //` (Read Action)** - * **Action:** Retrieves the data located at the specified `` within the device's *own* stored JSON structure. The device serializes the targeted JSON fragment (object, array, value) into a standard JSON string format. - * **Example:** `GET /data/myJson/user/settings` on the example JSON below would return the string `"{\"theme\":\"dark\",\"notifications\":true}"`. - * **HyperPATH:** Required. Specifies the target within the device's JSON data. +```lua +-- In your process code (e.g., loaded via .load) +Handlers.add( + "PublishData", + Handlers.utils.hasMatchingTag("Action", "PublishData"), + function (msg) + local dataToPublish = "Some important state: " .. math.random() + -- Expose 'currentstatus' key under the 'cache' path + Send({ device = 'patch@1.0', cache = { currentstatus = dataToPublish } }) + print("Published data to /cache/currentstatus") + end +) -* **`//serialize` (Serialize Action)** - * **Action:** Takes arbitrary input data (piped from the `` segment of a HyperPATH chain) and returns its serialized JSON string representation. - * **Example:** `GET /~meta@1.0/info /~json@1.0/serialize` - fetches node info and then pipes it to this device to serialize the result as JSON. - * **HyperPATH:** This segment (`/serialize`) is appended to a previous HyperPATH segment. +-- Spawning and interacting +[aos]> MyProcess = spawn(MyModule) -* **`GET //query?` (Query Action)** - * **Action:** Performs a more complex query against the device's *own* stored JSON data using a specific query syntax (details TBD). - * **Example:** - * **HyperPATH:** Required. The base path to the device's data. - * **Query Parameter:** `?query=` (Syntax TBD). +[aos]> Send({ Target = MyProcess, Action = "PublishData" }) +-- Wait a moment for scheduling - +``` -## Example JSON Data +## Avoiding Key Conflicts -Assuming `json` is mounted at `/data/myJson` and holds the following JSON: +When defining keys within the `cache` table (e.g., `cache = { mydatakey = MyValue }`), these keys become path segments under `/cache/` (e.g., `/compute/cache/mydatakey` or `/now/cache/mydatakey`). It's important to choose keys that do not conflict with existing, reserved path segments used by HyperBEAM or the `~process` device itself for state access. -```json -{ - "user": { - "name": "Alice", - "id": 123, - "settings": { - "theme": "dark", - "notifications": true - } - }, - "items": [ - {"sku": "abc", "price": 10}, - {"sku": "def", "price": 20} - ] -} +Using reserved keywords as your cache keys can lead to routing conflicts or prevent you from accessing your patched data as expected. While the exact list can depend on device implementations, it's wise to avoid keys commonly associated with state access, such as: `now`, `compute`, `state`, `info`, `test`. + +It's recommended to use descriptive and specific keys for your cached data to prevent clashes with the underlying HyperPATH routing mechanisms. For example, instead of `cache = { state = ... }`, prefer `cache = { myappstate = ... }` or `cache = { usercount = ... }`. + +!!! warning + Be aware that HTTP path resolution is case-insensitive and automatically normalizes paths to lowercase. While the `patch` device itself stores keys with case sensitivity (e.g., distinguishing `MyKey` from `mykey`), accessing them via an HTTP GET request will treat `/cache/MyKey` and `/cache/mykey` as the same path. This means that using keys that only differ in case (like `MyKey` and `mykey` in your `cache` table) will result in unpredictable behavior or data overwrites when accessed via HyperPATH. To prevent these issues, it is **strongly recommended** to use **consistently lowercase keys** within the `cache` table (e.g., `mykey`, `usercount`, `appstate`). + +## Key Points + +* **Path Structure:** The data is exposed under the `/cache/` path segment. The tag name you use *inside* the `cache` table in the `Send` call (e.g., `currentstatus`) becomes the final segment in the accessible HyperPATH (e.g., `/compute/cache/currentstatus`). +* **Data Types:** The `patch` device typically handles basic data types (strings, numbers) within the `cache` table effectively. Complex nested tables might require specific encoding or handling. +* **`compute` vs `now`:** Accessing patched data via `/compute/cache/...` typically serves the last known patched value quickly. Accessing via `/now/cache/...` might involve more computation to ensure the absolute latest state before checking for the patched key under `/cache/`. +* **Not a Replacement for State:** Patching is primarily for *exposing* reads. It doesn't replace the core state management within your process handler logic. + +By using the `patch` device, you can make parts of your AO process state easily and efficiently readable over standard HTTP, bridging the gap between decentralized computation and web-based applications. +--- END OF FILE: docs/build/exposing-process-state.md --- + +--- START OF FILE: docs/build/extending-hyperbeam.md --- +# Extending HyperBEAM + +HyperBEAM's modular design, built on AO-Core principles and Erlang/OTP, makes it highly extensible. You can add new functionalities or modify existing behaviors primarily by creating new **Devices** or implementing **Pre/Post-Processors**. + +!!! warning "Advanced Topic" + Extending HyperBEAM requires a good understanding of Erlang/OTP, the AO-Core protocol, and HyperBEAM's internal architecture. This guide provides a high-level overview; detailed implementation requires deeper exploration of the source code. + +## Approach 1: Creating New Devices + +This is the most common way to add significant new capabilities. +A Device is essentially an Erlang module (typically named `dev_*.erl`) that processes AO-Core messages. + +**Steps:** + +1. **Define Purpose:** Clearly define what your device will do. What kind of messages will it process? What state will it manage (if any)? What functions (keys) will it expose? +2. **Create Module:** Create a new Erlang module (e.g., `src/dev_my_new_device.erl`). +3. **Implement `info/0..2` (Optional but Recommended):** Define an `info` function to signal capabilities and requirements to HyperBEAM (e.g., exported keys, variant/version ID). + ```erlang + info() -> + #{ + variant => <<"MyNewDevice/1.0">>, + exports => [<<"do_something">>, <<"get_status">>] + }. + ``` +4. **Implement Key Functions:** Create Erlang functions corresponding to the keys your device exposes. These functions typically take `StateMessage`, `InputMessage`, and `Environment` as arguments and return `{ok, NewMessage}` or `{error, Reason}`. + ```erlang + do_something(StateMsg, InputMsg, Env) -> + % ... perform action based on InputMsg ... + NewState = ..., % Calculate new state + {ok, NewState}. + + get_status(StateMsg, _InputMsg, _Env) -> + % ... read status from StateMsg ... + StatusData = ..., + {ok, StatusData}. + ``` +5. **Handle State (If Applicable):** Devices can be stateless or stateful. Stateful devices manage their state within the `StateMessage` passed between function calls. +6. **Register Device:** Ensure HyperBEAM knows about your device. This might involve adding it to build configurations or potentially a dynamic registration mechanism if available. +7. **Testing:** Write EUnit tests for your device's functions. + +**Example Idea:** A device that bridges to another blockchain network, allowing AO processes to read data or trigger transactions on that chain. + +## Approach 2: Building Pre/Post-Processors + +Pre/post-processors allow you to intercept incoming requests *before* they reach the target device/process (`preprocess`) or modify the response *after* execution (`postprocess`). These are often implemented using the `dev_stack` device or specific hooks within the request handling pipeline. + +**Use Cases:** + +* **Authentication/Authorization:** Checking signatures or permissions before allowing execution. +* **Request Modification:** Rewriting requests, adding metadata, or routing based on specific criteria. +* **Response Formatting:** Changing the structure or content type of the response. +* **Metering/Logging:** Recording request details or charging for usage before or after execution. + +**Implementation:** + +Processors often involve checking specific conditions (like request path or headers) and then either: + +a. Passing the request through unchanged. +b. Modifying the request/response message structure. +c. Returning an error or redirect. + + +**Example Idea:** A preprocessor that automatically adds a timestamp tag to all incoming messages for a specific process. + + +## Approach 3: Custom Routing Strategies + +While `dev_router` provides basic strategies (round-robin, etc.), you could potentially implement a custom load balancing or routing strategy module that `dev_router` could be configured to use. This would involve understanding the interfaces expected by `dev_router`. + +**Example Idea:** A routing strategy that queries worker nodes for their specific capabilities before forwarding a request. + +## Getting Started + +1. **Familiarize Yourself:** Deeply understand Erlang/OTP and the HyperBEAM codebase (`src/` directory), especially [`hb_ao.erl`](../resources/source-code/hb_ao.md), [`hb_message.erl`](../resources/source-code/hb_message.md), and existing `dev_*.erl` modules relevant to your idea. +2. **Study Examples:** Look at simple devices like `dev_patch.erl` or more complex ones like `dev_process.erl` to understand patterns. +3. **Start Small:** Implement a minimal version of your idea first. +4. **Test Rigorously:** Use `rebar3 eunit` extensively. +5. **Engage Community:** Ask questions in developer channels if you get stuck. + +Extending HyperBEAM allows you to tailor the AO network's capabilities to specific needs, contributing to its rich and evolving ecosystem. + +--- END OF FILE: docs/build/extending-hyperbeam.md --- + +--- START OF FILE: docs/build/get-started-building-on-ao-core.md --- +# Getting Started Building on AO-Core + +Welcome to building on AO, the decentralized supercomputer! + +AO combines the permanent storage of Arweave with the flexible, scalable computation enabled by the AO-Core protocol and its HyperBEAM implementation. This allows you to create truly autonomous applications, agents, and services that run trustlessly and permissionlessly. + +## Core Idea: Processes & Messages + +At its heart, building on AO involves: + +1. **Creating Processes:** Think of these as independent programs or stateful contracts. Each process has a unique ID and maintains its own state. +2. **Sending Messages:** You interact with processes by sending them messages. These messages trigger computations, update state, or cause the process to interact with other processes or the outside world. + +Messages are processed by [Devices](../begin/ao-devices.md), which define *how* the computation happens (e.g., running WASM code, executing Lua scripts, managing state transitions). + +## Starting `aos`: Your Development Environment + +The primary tool for interacting with AO and developing processes is `aos`, a command-line interface and development environment. + +=== "npm" + ```bash + npm i -g https://get_ao.arweave.net + ``` + +=== "bun" + ```bash + bun install -g https://get_ao.arweave.net + ``` + +=== "pnpm" + ```bash + pnpm add -g https://get_ao.arweave.net + ``` + +**Starting `aos`:** + +Simply run the command in your terminal: + +```bash +aos +``` + +This connects you to an interactive Lua environment running within a **process** on the AO network. This process acts as your command-line interface (CLI) to the AO network, allowing you to interact with other processes, manage your wallet, and develop new AO processes. By default, it connects to a process running on the mainnet Compute Unit (CU). + +**What `aos` is doing:** + +* **Connecting:** Establishes a connection from your terminal to a remote process running the `aos` environment. +* **Loading Wallet:** Looks for a default Arweave key file (usually `~/.aos.json` or specified via arguments) to load into the remote process context for signing outgoing messages. +* **Providing Interface:** Gives you a Lua prompt (`[aos]>`) within the remote process where you can: + * Load code for new persistent processes on the network. + * Send messages to existing network processes. + * Inspect process state. + * Manage your local environment. + +## Your First Interaction: Assigning a Variable + +From the `aos` prompt, you can assign a variable. Let's assign a basic Lua process that just holds some data: + +```lua +[aos]> myVariable = "Hello from aos!" +-- This assigns the string "Hello from aos!" to the variable 'myVariable' +-- within the current process's Lua environment. + +[aos]> myVariable +-- Displays the content of 'myVariable' +Hello from aos! ``` -**Access Examples:** -- Get user name: `GET /data/myJson/user/name` -- Get theme setting: `GET /data/myJson/user/settings/theme` -- Get first item price: `GET /data/myJson/items[0]/price` +## Your First Handler + +Follow these steps to create and interact with your first message handler in AO: + +1. **Create a Lua File to Handle Messages:** + Create a new file named `main.lua` in your local directory and add the following Lua code: + + ```lua + Handlers.add( + "HelloWorld", + function(msg) + -- This function gets called when a message with Action = "HelloWorld" arrives. + print("Handler triggered by message from: " .. msg.From) + -- It replies to the sender with a new message containing the specified data. + msg.reply({ Data = "Hello back from your process!" }) + end + ) + + print("HelloWorld handler loaded.") -- Confirmation message + ``` + + * `Handlers.add`: Registers a function to handle incoming messages. + * `"HelloWorld"`: The name of this handler. It will be triggered by messages with `Action = "HelloWorld"`. + * `function(msg)`: The function that executes when the handler is triggered. `msg` contains details about the incoming message (like `msg.From`, the sender's process ID). + * `msg.reply({...})`: Sends a response message back to the original sender. The response must be a Lua table, typically containing a `Data` field. + +2. **Load the Handler into `aos`:** + From your `aos` prompt, load the handler code into your running process: + + ```lua + [aos]> .load main.lua + ``` + +3. **Send a Message to Trigger the Handler:** + Now, send a message to your own process (`ao.id` refers to the current process ID) with the action that matches your handler's name: + + ```lua + [aos]> Send({ Target = ao.id, Action = "HelloWorld" }) + ``` + +4. **Observe the Output:** + You should see two things happen in your `aos` terminal: + * The `print` statement from your handler: `Handler triggered by message from: ` + * A notification about the reply message: `New Message From : Data = Hello back from your process!` + +5. **Inspect the Reply Message:** + The reply message sent by your handler is now in your process's inbox. You can inspect its data like this: + + ```lua + [aos]> Inbox[#Inbox].Data + ``` + This should output: `"Hello back from your process!"` + +You've successfully created a handler, loaded it into your AO process, triggered it with a message, and received a reply! + +## Next Steps + +This is just the beginning. To dive deeper: + +* **AO Cookbook:** Explore practical examples and recipes for common tasks: [AO Cookbook](https://cookbook_ao.arweave.net/) +* **Expose Process State:** Learn how to make your process data accessible via HTTP using the `patch` device: [Exposing Process State](./exposing-process-state.md) +* **Serverless Compute:** Discover how to run WASM or Lua computations within your processes: [Serverless Decentralized Compute](./serverless-decentralized-compute.md) +* **aos Documentation:** Refer to the official `aos` documentation for detailed commands and usage. + +--- END OF FILE: docs/build/get-started-building-on-ao-core.md --- + +--- START OF FILE: docs/build/serverless-decentralized-compute.md --- +# Serverless Decentralized Compute on AO + +AO enables powerful "serverless" computation patterns by allowing you to run code (WASM, Lua) directly within decentralized processes, triggered by messages. Furthermore, if computations are performed on nodes running in Trusted Execution Environments (TEEs), you can obtain cryptographic attestations verifying the execution integrity. + +## Core Concept: Compute Inside Processes + +Instead of deploying code to centralized servers, you deploy code *to* the Arweave permaweb and instantiate it as an AO process. Interactions happen by sending messages to this process ID. + +* **Code Deployment:** Your WASM binary or Lua script is uploaded to Arweave, getting a permanent transaction ID. +* **Process Spawning:** You create an AO process, associating it with your code's transaction ID and specifying the appropriate compute device ([`~wasm64@1.0`](../devices/wasm64-at-1-0.md) or [`~lua@5.3a`](../devices/lua-at-5-3a.md)). +* **Execution via Messages:** Sending a message to the process ID triggers the HyperBEAM node (that picks up the message) to: + 1. Load the process state. + 2. Fetch the associated WASM/Lua code from Arweave. + 3. Execute the code using the relevant device ([`dev_wasm`](../resources/source-code/dev_wasm.md) or [`dev_lua`](../resources/source-code/dev_lua.md)), passing the message data and current state. + 4. Update the process state based on the execution results. + + +## TEE Attestations (via [`~snp@1.0`](../resources/source-code/dev_snp.md)) + +If a HyperBEAM node performing these computations runs within a supported Trusted Execution Environment (like AMD SEV-SNP), it can provide cryptographic proof of execution. + +* **How it works:** The [`~snp@1.0`](../resources/source-code/dev_snp.md) device interacts with the TEE hardware. +* **Signed Responses:** When a TEE-enabled node processes your message (e.g., executes your WASM function), the HTTP response containing the result can be cryptographically signed by a key that *provably* only exists inside the TEE. +* **Verification:** Clients receiving this response can verify the signature against the TEE platform's attestation mechanism (e.g., AMD's KDS) to gain high confidence that the computation was performed correctly and confidentially within the secure environment, untampered by the node operator. + +**Obtaining Attested Responses:** + +This usually involves interacting with nodes specifically advertised as TEE-enabled. The exact mechanism for requesting and verifying attestations depends on the specific TEE technology and node configuration. + +* The HTTP response headers might contain specific signature or attestation data (e.g., using HTTP Message Signatures RFC-9421 via [`dev_codec_httpsig`](../resources/source-code/dev_codec_httpsig.md)). +* You might query the [`~snp@1.0`](../resources/source-code/dev_snp.md) device directly on the node to get its attestation report. -## Events +Refer to documentation on [TEE Nodes](./run/tee-nodes.md) and the [`~snp@1.0`](../resources/source-code/dev_snp.md) device for details. - +By leveraging WASM, Lua, and optional TEE attestations, AO provides a powerful platform for building complex, verifiable, and truly decentralized serverless applications. - +--- END OF FILE: docs/build/serverless-decentralized-compute.md --- + +--- START OF FILE: docs/devices/json-at-1-0.md --- +# Device: ~json@1.0 + +## Overview + +The [`~json@1.0`](../resources/source-code/dev_json_iface.md) device provides a mechanism to interact with JSON (JavaScript Object Notation) data structures using HyperPATHs. It allows treating a JSON document or string as a stateful entity against which HyperPATH queries can be executed. + +This device is useful for: + +* Serializing and deserializing JSON data. +* Querying and modifying JSON objects. +* Integrating with other devices and operations via HyperPATH chaining. + +## Core Functions (Keys) + +### Serialization + +* **`GET /~json@1.0/serialize` (Direct Serialize Action)** + * **Action:** Serializes the input message or data into a JSON string. + * **Example:** `GET /~json@1.0/serialize` - serializes the current message as JSON. + * **HyperPATH:** The path segment `/serialize` directly follows the device identifier. + +* **`GET //~json@1.0/serialize` (Chained Serialize Action)** + * **Action:** Takes arbitrary data output from `` (another device or operation) and returns its serialized JSON string representation. + * **Example:** `GET /~meta@1.0/info/~json@1.0/serialize` - fetches node info from the meta device and then pipes it to the JSON device to serialize the result as JSON. + * **HyperPATH:** This segment (`/~json@1.0/serialize`) is appended to a previous HyperPATH segment. + +## HyperPATH Chaining Example + +The JSON device is particularly useful in HyperPATH chains to convert output from other devices into JSON format: + +``` +GET /~meta@1.0/info/~json@1.0/serialize +``` + +This retrieves the node configuration from the meta device and serializes it to JSON. + +## See Also + +- [Message Device](../resources/source-code/dev_message.md) - Works well with JSON serialization +- [Meta Device](../resources/source-code/dev_meta.md) - Can provide configuration data to serialize [json module](../resources/source-code/dev_codec_json.md) --- END OF FILE: docs/devices/json-at-1-0.md --- @@ -106,20 +471,18 @@ Assuming `json` is mounted at `/data/myJson` and holds the following JSON: ## Overview -The `~lua@5.3a` device enables the execution of Lua scripts within an AO process. It utilizes the `luerl` Erlang library to provide a Lua 5.3 compatible environment. - -**Status:** Stable +The [`~lua@5.3a`](../resources/source-code/dev_lua.md) device enables the execution of Lua scripts within the HyperBEAM environment. It provides an isolated sandbox where Lua code can process incoming messages, interact with other devices, and manage state. ## Core Concept: Lua Script Execution -This device allows processes to perform computations defined in Lua scripts. Similar to the `~wasm64@1.0` device, it manages the lifecycle of a Lua execution state associated with the process. +This device allows processes to perform computations defined in Lua scripts. Similar to the [`~wasm64@1.0`](../resources/source-code/dev_wasm.md) device, it manages the lifecycle of a Lua execution state associated with the process. ## Key Functions (Keys) -These keys are typically used within an execution stack (managed by `dev_stack`) for an AO process. +These keys are typically used within an execution stack (managed by [`dev_stack`](../resources/source-code/dev_stack.md)) for an AO process. * **`init`** - * **Action:** Initializes the Lua environment for the process. It finds and loads the Lua script(s) associated with the process, creates a `luerl` state, applies sandboxing rules if specified, installs the `dev_lua_lib` (providing AO-specific functions like `ao.send`), and stores the initialized state in the process's private area (`priv/state`). + * **Action:** Initializes the Lua environment for the process. It finds and loads the Lua script(s) associated with the process, creates a `luerl` state, applies sandboxing rules if specified, installs the [`dev_lua_lib`](../resources/source-code/dev_lua_lib.md) (providing AO-specific functions like `ao.send`), and stores the initialized state in the process's private area (`priv/state`). * **Inputs (Expected in Process Definition or `init` Message):** * `script`: Can be: * An Arweave Transaction ID of the Lua script file. @@ -154,19 +517,19 @@ The `sandbox` option in the process definition restricts potentially harmful Lua ## AO Library (`dev_lua_lib`) -The `init` function automatically installs a helper library (`dev_lua_lib`) into the Lua state. This library typically provides functions for interacting with the AO environment from within the Lua script, such as: +The `init` function automatically installs a helper library ([`dev_lua_lib`](../resources/source-code/dev_lua_lib.md)) into the Lua state. This library typically provides functions for interacting with the AO environment from within the Lua script, such as: * `ao.send({ Target = ..., ... })`: To send messages from the process. * Access to message tags and data. ## Usage within `dev_stack` -Like `~wasm64@1.0`, the `~lua@5.3a` device is typically used within an execution stack. +Like [`~wasm64@1.0`](../resources/source-code/dev_wasm.md), the `~lua@5.3a` device is typically used within an execution stack. ```text # Example Process Definition Snippet Execution-Device: stack@1.0 -Execution-Stack: "scheduler@1.0", "lua@5.3a" +Execution-Stack: scheduler@1.0, lua@5.3a Script: Sandbox: true ``` @@ -184,14 +547,12 @@ This device offers a lightweight, integrated scripting capability for AO process The [`~message@1.0`](../resources/source-code/dev_message.md) device is a fundamental built-in device in HyperBEAM. It serves as the identity device for standard AO-Core messages, which are represented as Erlang maps internally. Its primary function is to allow manipulation and inspection of these message maps directly via HyperPATH requests, without needing a persistent process state. -**Status:** Stable - This device is particularly useful for: * Creating and modifying transient messages on the fly using query parameters. * Retrieving specific values from a message map. * Inspecting the keys of a message. -* Handling message commitments and verification (though often delegated to specialized commitment devices like `httpsig@1.0`). +* Handling message commitments and verification (though often delegated to specialized commitment devices like [`httpsig@1.0`](../resources/source-code/dev_codec_httpsig.md)). ## Core Functionality @@ -204,7 +565,7 @@ To retrieve the value associated with a specific key in the message map, simply **Example:** ``` -http://localhost:10000/~message@1.0&hello=world&Key=Value/key +GET /~message@1.0&hello=world&Key=Value/key ``` **Response:** @@ -225,8 +586,8 @@ The `message@1.0` device reserves several keys for specific operations: * **`set_path`**: A special case for setting the `path` key itself, which cannot be done via the standard `set` operation. * **`remove`**: Removes one or more specified keys from the message. Requires an `item` or `items` parameter. * **`keys`**: Returns a list of all public (non-private) keys present in the message map. -* **`id`**: Calculates and returns the ID (hash) of the message. Considers active commitments based on specified `committers`. May delegate ID calculation to a device specified by the message's `id-device` key or the default (`httpsig@1.0`). -* **`commit`**: Creates a commitment (e.g., a signature) for the message. Requires parameters like `commitment-device` and potentially committer information. Delegates the actual commitment generation to the specified device (default `httpsig@1.0`). +* **`id`**: Calculates and returns the ID (hash) of the message. Considers active commitments based on specified `committers`. May delegate ID calculation to a device specified by the message\'s `id-device` key or the default ([`httpsig@1.0`](../resources/source-code/dev_codec_httpsig.md)). +* **`commit`**: Creates a commitment (e.g., a signature) for the message. Requires parameters like `commitment-device` and potentially committer information. Delegates the actual commitment generation to the specified device (default [`httpsig@1.0`](../resources/source-code/dev_codec_httpsig.md)). * **`committers`**: Returns a list of committers associated with the commitments in the message. Can be filtered by request parameters. * **`commitments`**: Used internally and in requests to filter or specify which commitments to operate on (e.g., for `id` or `verify`). * **`verify`**: Verifies the commitments attached to the message. Can be filtered by `committers` or specific `commitment` IDs in the request. Delegates verification to the device specified in each commitment (`commitment-device`). @@ -240,7 +601,7 @@ Keys prefixed with `priv` (e.g., `priv_key`, `private.data`) are considered priv This example demonstrates creating a transient message and retrieving a value: ``` -http://localhost:10000/~message@1.0&hello=world&k=v/k +GET /~message@1.0&hello=world&k=v/k ``` **Breakdown:** @@ -261,9 +622,9 @@ http://localhost:10000/~message@1.0&hello=world&k=v/k ## Overview -The `~meta@1.0` device serves as the primary configuration and information endpoint for a HyperBEAM node. It's the default entry point for processing requests and allows querying or modifying the node's settings. +The [`~meta@1.0`](../resources/source-code/dev_meta.md) device provides access to metadata and configuration information about the local HyperBEAM node and the broader AO network. -**Status:** Stable +This device is essential for: ## Core Functions (Keys) @@ -289,7 +650,7 @@ While the `info` key is the primary interaction point, the `NodeMsg` managed by * `operator`: The address designated as the node operator (defaults to the address derived from `priv_wallet`). * `initialized`: Status indicating if the node setup is temporary or permanent. * `preprocessor` / `postprocessor`: Optional messages defining pre/post-processing logic for requests. -* `routes`: Routing table used by `dev_router`. +* `routes`: Routing table used by [`dev_router`](../resources/source-code/dev_router.md). * `store`: Configuration for data storage. * `trace`: Debug tracing options. * `p4_*`: Payment configuration. @@ -299,7 +660,7 @@ While the `info` key is the primary interaction point, the `NodeMsg` managed by ## Utility Functions (Internal/Module Level) -The `dev_meta.erl` module also contains helper functions used internally or callable from other Erlang modules: +The [`dev_meta.erl`](../resources/source-code/dev_meta.md) module also contains helper functions used internally or callable from other Erlang modules: * `is_operator(, ) -> boolean()`: Checks if the signer of `RequestMsg` matches the configured `operator` in `NodeMsg`. @@ -312,25 +673,52 @@ The `~meta` device applies the node's configured `preprocessor` message before r Before a node can process general requests, it usually needs to be initialized. Attempts to access devices other than `~meta@1.0/info` before initialization typically result in an error. Initialization often involves setting essential parameters like the operator key via a `POST` to `info`. [meta module](../resources/source-code/dev_meta.md) - --- END OF FILE: docs/devices/meta-at-1-0.md --- +--- START OF FILE: docs/devices/overview.md --- +# Devices + +Devices are the core functional units within HyperBEAM and AO-Core. They define how messages are processed and what actions can be performed. + +Each device listed here represents a specific capability available to AO processes and nodes. Understanding these devices is key to building complex applications and configuring your HyperBEAM node effectively. + +## Available Devices + +Below is a list of documented built-in devices. Each page details the device's purpose, available functions (keys), and usage examples where applicable. + +* **[`~message@1.0`](./message-at-1-0.md):** Base message handling and manipulation. +* **[`~meta@1.0`](./meta-at-1-0.md):** Node configuration and metadata. +* **[`~process@1.0`](./process-at-1-0.md):** Persistent, shared process execution environment. +* **[`~scheduler@1.0`](./scheduler-at-1-0.md):** Message scheduling and execution ordering for processes. +* **[`~wasm64@1.0`](./wasm64-at-1-0.md):** WebAssembly (WASM) execution engine. +* **[`~lua@5.3a`](./lua-at-5-3a.md):** Lua script execution engine. +* **[`~relay@1.0`](./relay-at-1-0.md):** Relaying messages to other nodes or HTTP endpoints. +* **[`~json@1.0`](./json-at-1-0.md):** Provides access to JSON data structures using HyperPATHs. + +*(More devices will be documented here as specifications are finalized and reviewed.)* + +## Device Naming and Versioning + +Devices are typically referenced using a name and version, like `~@` (e.g., `~process@1.0`). The tilde (`~`) often indicates a primary, user-facing device, while internal or utility devices might use a `dev_` prefix in the source code (e.g., `dev_router`). + +Versioning indicates the specific interface and behavior of the device. Changes to a device that break backward compatibility usually result in a version increment. + +--- END OF FILE: docs/devices/overview.md --- + --- START OF FILE: docs/devices/process-at-1-0.md --- # Device: ~process@1.0 ## Overview -The `~process@1.0` device provides the core abstraction for persistent, shared computations within AO, analogous to smart contracts in other systems but with greater flexibility. It orchestrates the interaction between scheduling, state management, and computation execution for a specific process instance. - -**Status:** Stable +The [`~process@1.0`](../resources/source-code/dev_process.md) device represents a persistent, shared execution environment within HyperBEAM, analogous to a process or actor in other systems. It allows for stateful computation and interaction over time. ## Core Concept: Orchestration A message tagged with `Device: process@1.0` (the "Process Definition Message") doesn't typically perform computation itself. Instead, it defines *which other devices* should be used for key aspects of its lifecycle: -* **Scheduler Device:** Determines the order of incoming messages (assignments) to be processed. (Defaults to `~scheduler@1.0`). -* **Execution Device:** Executes the actual computation based on the current state and the scheduled message. Often configured as `dev_stack` to allow multiple computational steps (e.g., running WASM, applying cron jobs, handling proofs). -* **Push Device:** Handles the injection of new messages into the process's schedule. (Defaults to `~push@1.0`). +* **Scheduler Device:** Determines the order of incoming messages (assignments) to be processed. (Defaults to [`~scheduler@1.0`](../resources/source-code/dev_scheduler.md)). +* **Execution Device:** Executes the actual computation based on the current state and the scheduled message. Often configured as [`dev_stack`](../resources/source-code/dev_stack.md) to allow multiple computational steps (e.g., running WASM, applying cron jobs, handling proofs). +* **Push Device:** Handles the injection of new messages into the process\'s schedule. (Defaults to [`~push@1.0`](../resources/source-code/dev_push.md)). The `~process@1.0` device acts as a router, intercepting requests and delegating them to the appropriate configured device (scheduler, executor, etc.) by temporarily swapping the device tag on the message before resolving. @@ -348,7 +736,7 @@ These keys are accessed via HyperPATHs relative to the Process Definition Messag * **`GET /~process@1.0/compute/`** * **Action:** Computes the process state up to a specific point identified by `` (either a slot number or a message ID within the schedule). It retrieves assignments from the Scheduler Device and applies them sequentially using the configured Execution Device. * **Response:** The process state message after executing up to the target slot/message. - * **Caching:** Results are cached aggressively (see `dev_process_cache`) to avoid recomputation. + * **Caching:** Results are cached aggressively (see [`dev_process_cache`](../resources/source-code/dev_process_cache.md)) to avoid recomputation. * **`GET /~process@1.0/now`** * **Action:** Computes and returns the `Results` key from the *latest* known state of the process. This typically involves computing all pending assignments. * **Response:** The value of the `Results` key from the final state. @@ -365,13 +753,13 @@ A typical process definition message might look like this (represented conceptua ```text Device: process@1.0 -Scheduler-Device: scheduler@1.0 -Execution-Device: stack@1.0 -Execution-Stack: "scheduler@1.0", "cron@1.0", "wasm64@1.0", "PoDA@1.0" +Scheduler-Device: [`scheduler@1.0`](../resources/source-code/dev_scheduler.md) +Execution-Device: [`stack@1.0`](../resources/source-code/dev_stack.md) +Execution-Stack: "[`scheduler@1.0`](../resources/source-code/dev_scheduler.md)", "[`cron@1.0`](../resources/source-code/dev_cron.md)", "[`wasm64@1.0`](../resources/source-code/dev_wasm.md)", "[`PoDA@1.0`](../resources/source-code/dev_poda.md)" Cron-Frequency: 10-Minutes WASM-Image: PoDA: - Device: PoDA/1.0 + Device: [`PoDA/1.0`](../resources/source-code/dev_poda.md) Authority: Authority: Quorum: 2 @@ -383,7 +771,7 @@ This defines a process that uses: ## State Management & Caching -`~process@1.0` relies heavily on caching (`dev_process_cache`) to optimize performance. Full state snapshots and intermediate results are cached periodically (configurable via `Cache-Frequency` and `Cache-Keys` options) to avoid recomputing the entire history for every request. +`~process@1.0` relies heavily on caching ([`dev_process_cache`](../resources/source-code/dev_process_cache.md)) to optimize performance. Full state snapshots and intermediate results are cached periodically (configurable via `Cache-Frequency` and `Cache-Keys` options) to avoid recomputing the entire history for every request. ## Initialization (`init`) @@ -398,9 +786,7 @@ Processes often require an initialization step before they can process messages. ## Overview -The `~relay@1.0` device is responsible for forwarding messages (HTTP requests) from one HyperBEAM node to another node or to any external HTTP(S) endpoint. - -**Status:** Stable +The [`~relay@1.0`](../resources/source-code/dev_relay.md) device enables HyperBEAM nodes to send messages to external HTTP endpoints or other AO nodes. ## Core Concept: Message Forwarding @@ -427,7 +813,7 @@ This device acts as an HTTP client within the AO ecosystem. It allows a node or * **Inputs:** Same as `call`. * **Response:** `{ok, <<"OK">>}`. * **`preprocess`** - * **Action:** This function is designed to be used as a node's global `preprocessor` (configured via `~meta@1.0`). When configured, it intercepts *all* incoming requests to the node and automatically rewrites them to be relayed via the `call` key. This effectively turns the node into a pure forwarding proxy, using its routing table (`dev_router`) to determine the destination. + * **Action:** This function is designed to be used as a node's global `preprocessor` (configured via [`~meta@1.0`](../resources/source-code/dev_meta.md)). When configured, it intercepts *all* incoming requests to the node and automatically rewrites them to be relayed via the `call` key. This effectively turns the node into a pure forwarding proxy, using its routing table ([`dev_router`](../resources/source-code/dev_router.md)) to determine the destination. * **Response:** A message structure that invokes `/~relay@1.0/call` with the original request as the target body. ## Use Cases @@ -439,7 +825,7 @@ This device acts as an HTTP client within the AO ecosystem. It allows a node or ## Interaction with Routing -When `call` or `cast` is invoked, the actual HTTP request dispatch is handled by `hb_http:request/2`. This function often utilizes the node's routing configuration (`dev_router`) to determine the specific peer/URL to send the request to, especially if the target path is an AO process ID or another internal identifier rather than a full external URL. +When `call` or `cast` is invoked, the actual HTTP request dispatch is handled by `hb_http:request/2`. This function often utilizes the node's routing configuration ([`dev_router`](../resources/source-code/dev_router.md)) to determine the specific peer/URL to send the request to, especially if the target path is an AO process ID or another internal identifier rather than a full external URL. [relay module](../resources/source-code/dev_relay.md) @@ -450,19 +836,33 @@ When `call` or `cast` is invoked, the actual HTTP request dispatch is handled by ## Overview -The `~scheduler@1.0` device is responsible for managing the order of message execution for an AO process. It maintains the list of pending messages (assignments) and provides them sequentially to the process's Execution Device. - -**Status:** Stable +The [`~scheduler@1.0`](../resources/source-code/dev_scheduler.md) device manages the queueing and ordering of messages targeted at a specific process ([`~process@1.0`](../resources/source-code/dev_process.md)). It ensures that messages are processed according to defined scheduling rules. ## Core Concept: Message Ordering -When messages are sent to an AO process (typically via the `~push@1.0` device or a `POST` to the process's `/schedule` endpoint), they are added to a queue managed by the Scheduler Device associated with that process. The scheduler ensures that messages are processed one after another in a deterministic order, typically based on arrival time and potentially other factors like message nonces or timestamps (depending on the specific scheduler implementation details). +When messages are sent to an AO process (typically via the [`~push@1.0`](../resources/source-code/dev_push.md) device or a `POST` to the process's `/schedule` endpoint), they are added to a queue managed by the Scheduler Device associated with that process. The scheduler ensures that messages are processed one after another in a deterministic order, typically based on arrival time and potentially other factors like message nonces or timestamps (depending on the specific scheduler implementation details). + +The [`~process@1.0`](../resources/source-code/dev_process.md) device interacts with its configured Scheduler Device (which defaults to `~scheduler@1.0`) primarily through the `next` key to retrieve the next message to be executed. + +## Slot System -The `~process@1.0` device interacts with its configured Scheduler Device (which defaults to `~scheduler@1.0`) primarily through the `next` key to retrieve the next message to be executed. +Slots are a fundamental concept in the `~scheduler@1.0` device, providing a structured mechanism for organizing and sequencing computation. + +* **Sequential Ordering:** Slots act as numbered containers (starting at 0) that hold specific messages or tasks to be processed in a deterministic order. +* **State Tracking:** The `at-slot` key in a process's state (or a similar internal field like `current-slot` within the scheduler itself) tracks execution progress, indicating which messages have been processed and which are pending. The `slot` function can be used to query this. +* **Assignment Storage:** Each slot contains an "assignment" - the cryptographically verified message waiting to be executed. These assignments are retrieved using the `schedule` function or internally via `next`. +* **Schedule Organization:** The collection of all slots for a process forms its "schedule". +* **Application Scenarios:** + * **Scheduling Messages:** When a message is posted to a process (e.g., via `register`), it's assigned to the next available slot. + * **Status Monitoring:** Clients can query a process's current slot (via the `slot` function) to check progress. + * **Task Retrieval:** Processes find their next task by requesting the next assignment via the `next` function, which implicitly uses the next slot number based on the current state. + * **Distributed Consistency:** Slots ensure deterministic execution order across nodes, crucial for maintaining consistency in AO. + +This slotting mechanism is central to AO processes built on HyperBEAM, allowing for deterministic, verifiable computation. ## Key Functions (Keys) -These keys are typically accessed via the `~process@1.0` device, which delegates the calls to its configured scheduler. +These keys are typically accessed via the [`~process@1.0`](../resources/source-code/dev_process.md) device, which delegates the calls to its configured scheduler. * **`schedule` (Handler for `GET /~process@1.0/schedule`)** * **Action:** Retrieves the list of pending assignments (messages) for the process. May support cursor-based traversal for long schedules. @@ -477,7 +877,7 @@ These keys are typically accessed via the `~process@1.0` device, which delegates * **`status` (Handler for `GET /~process@1.0/status`)** * **Action:** Retrieves status information about the scheduler for the process. * **Response:** A status message. -* **`next` (Internal Key used by `~process@1.0`)** +* **`next` (Internal Key used by [`~process@1.0`](../resources/source-code/dev_process.md))** * **Action:** Retrieves the next assignment message from the schedule based on the process's current `at-slot` state. * **State Management:** Requires the current process state (`Msg1`) containing the `at-slot` key. * **Response:** `{ok, #{ "body" => , "state" => }}` or `{error, Reason}` if no next assignment is found. @@ -489,8 +889,8 @@ These keys are typically accessed via the `~process@1.0` device, which delegates ## Interaction with Other Components -* **`~process@1.0`:** The primary user of the scheduler, calling `next` to drive process execution. -* **`~push@1.0`:** Often used to add messages to the schedule via `POST /schedule`. +* **[`~process@1.0`](../resources/source-code/dev_process.md):** The primary user of the scheduler, calling `next` to drive process execution. +* **[`~push@1.0`](../resources/source-code/dev_push.md):** Often used to add messages to the schedule via `POST /schedule`. * **`dev_scheduler_cache`:** Internal module used for caching assignments locally on the node to reduce latency. * **Scheduling Unit (SU):** Schedulers may interact with external entities (like Arweave gateways or dedicated SU nodes) to fetch or commit schedules, although `~scheduler@1.0` aims for a simpler, often node-local or SU-client model. @@ -505,9 +905,7 @@ These keys are typically accessed via the `~process@1.0` device, which delegates ## Overview -The `~wasm64@1.0` device enables the execution of WebAssembly (WASM) code within an AO process, specifically targeting the WASM Memory64 specification. It uses `hb_beamr`, an Erlang wrapper for the WebAssembly Micro Runtime (WAMR), as its backend. - -**Status:** Stable +The [`~wasm64@1.0`](../resources/source-code/dev_wasm.md) device enables the execution of 64-bit WebAssembly (WASM) code within the HyperBEAM environment. It provides a sandboxed environment for running compiled code from various languages (like Rust, C++, Go) that target WASM. ## Core Concept: WASM Execution @@ -517,7 +915,7 @@ The device manages the lifecycle of a WASM instance associated with the process ## Key Functions (Keys) -These keys are typically used within an execution stack (managed by `dev_stack`) for an AO process. +These keys are typically used within an execution stack (managed by [`dev_stack`](../resources/source-code/dev_stack.md)) for an AO process. * **`init`** * **Action:** Initializes the WASM environment for the process. It locates the WASM image (binary), starts a WAMR instance, and stores the instance handle and helper functions (for reading/writing WASM memory) in the process's private state (`priv/...`). @@ -554,12 +952,12 @@ These keys are typically used within an execution stack (managed by `dev_stack`) ## Usage within `dev_stack` -The `~wasm64@1.0` device is almost always used as part of an execution stack configured in the Process Definition Message and managed by `dev_stack`. `dev_stack` ensures that `init` is called on the first pass, `compute` on subsequent passes, and potentially `snapshot` or `terminate` as needed. +The `~wasm64@1.0` device is almost always used as part of an execution stack configured in the Process Definition Message and managed by [`dev_stack`](../resources/source-code/dev_stack.md). [`dev_stack`](../resources/source-code/dev_stack.md) ensures that `init` is called on the first pass, `compute` on subsequent passes, and potentially `snapshot` or `terminate` as needed. ```text # Example Process Definition Snippet -Execution-Device: stack@1.0 -Execution-Stack: "scheduler@1.0", "wasm64@1.0" +Execution-Device: [`stack@1.0`](../resources/source-code/dev_stack.md) +Execution-Stack: "[`scheduler@1.0`](../resources/source-code/dev_scheduler.md)", "wasm64@1.0" WASM-Image: ``` @@ -569,6 +967,274 @@ This setup allows AO processes to leverage the computational power and language --- END OF FILE: docs/devices/wasm64-at-1-0.md --- +--- START OF FILE: docs/introduction/ao-devices.md --- +# AO Devices + +In AO-Core and its implementation HyperBEAM, **Devices** are modular components responsible for processing and interpreting [Messages](./what-is-ao-core.md#core-concepts). They define the specific logic for how computations are performed, data is handled, or interactions occur within the AO ecosystem. + +Think of Devices as specialized engines or services that can be plugged into the AO framework. This modularity is key to AO's flexibility and extensibility. + +## Purpose of Devices + +* **Define Computation:** Devices dictate *how* a message's instructions are executed. One device might run WASM code, another might manage process state, and yet another might simply relay data. +* **Enable Specialization:** Nodes running HyperBEAM can choose which Devices to support, allowing them to specialize in certain tasks (e.g., high-compute tasks, storage-focused tasks, secure TEE operations). +* **Promote Modularity:** New functionalities can be added to AO by creating new Devices, without altering the core protocol. +* **Distribute Workload:** Different Devices can handle different parts of a complex task, enabling parallel processing and efficient resource utilization across the network. + +## Familiar Examples + +HyperBEAM includes many preloaded devices that provide core functionality. Some key examples include: + +* **[`~meta@1.0`](../devices/meta-at-1-0.md):** Configures the node itself (hardware specs, supported devices, payment info). +* **[`~process@1.0`](../devices/process-at-1-0.md):** Manages persistent, shared computational states (like traditional smart contracts, but more flexible). +* **[`~scheduler@1.0`](../devices/scheduler-at-1-0.md):** Handles the ordering and execution of messages within a process. +* **[`~wasm64@1.0`](../devices/wasm64-at-1-0.md):** Executes WebAssembly (WASM) code, allowing for complex computations written in languages like Rust, C++, etc. +* **[`~lua@5.3a`](../devices/lua-at-5-3a.md):** Executes Lua scripts. +* **[`~relay@1.0`](../devices/relay-at-1-0.md):** Forwards messages between AO nodes or to external HTTP endpoints. +* **[`~json@1.0`](../devices/json-at-1-0.md):** Provides access to JSON data structures using HyperPATHs. +* **[`~message@1.0`](../devices/message-at-1-0.md):** Manages message state and processing. +* **[`~patch@1.0`](../guides/exposing-process-state.md):** Applies state updates directly to a process, often used for migrating or managing process data. + +## Beyond the Basics + +Devices aren't limited to just computation or state management. They can represent more abstract concepts: + +* **Security Devices ([`~snp@1.0`](../resources/source-code/dev_snp.md), [`dev_codec_httpsig`](../resources/source-code/dev_codec_httpsig.md)):** Handle tasks related to Trusted Execution Environments (TEEs) or message signing, adding layers of security and verification. +* **Payment/Access Control Devices ([`~p4@1.0`](../resources/source-code/dev_p4.md), [`~faff@1.0`](../resources/source-code/dev_faff.md)):** Manage metering, billing, or access control for node services. +* **Workflow/Utility Devices ([`dev_cron`](../resources/source-code/dev_cron.md), [`dev_stack`](../resources/source-code/dev_stack.md), [`dev_monitor`](../resources/source-code/dev_monitor.md)):** Coordinate complex execution flows, schedule tasks, or monitor process activity. + +## Using Devices + +Devices are typically invoked via [HyperPATHs](./pathing-in-ao-core.md). The path specifies which Device should interpret the subsequent parts of the path or the request body. + +``` +# Example: Execute the 'now' key on the process device for a specific process +/~process@1.0/now + +# Example: Relay a GET request via the relay device +/~relay@1.0/call?method=GET&path=https://example.com +``` + +The specific functions or 'keys' available for each Device are documented individually. See the [Devices section](../devices/index.md) for details on specific built-in devices. + +## The Potential of Devices + +The modular nature of AO Devices opens up vast possibilities for future expansion and innovation. The current set of preloaded and community devices is just the beginning. As the AO ecosystem evolves, we can anticipate the development of new devices catering to increasingly specialized needs: + +* **Specialized Hardware Integration:** Devices could be created to interface directly with specialized hardware accelerators like GPUs (for AI/ML tasks such as running large language models), TPUs, or FPGAs, allowing AO processes to leverage high-performance computing resources securely and verifiably. +* **Advanced Cryptography:** New devices could implement cutting-edge cryptographic techniques, such as zero-knowledge proofs (ZKPs) or fully homomorphic encryption (FHE), enabling enhanced privacy and complex computations on encrypted data. +* **Cross-Chain & Off-Chain Bridges:** Devices could act as secure bridges to other blockchain networks or traditional Web2 APIs, facilitating seamless interoperability and data exchange between AO and the wider digital world. +* **AI/ML Specific Devices:** Beyond raw GPU access, specialized devices could offer higher-level AI/ML functionalities, like optimized model inference engines or distributed training frameworks. +* **Domain-Specific Logic:** Communities or organizations could develop devices tailored to specific industries or use cases, such as decentralized finance (DeFi) primitives, scientific computing libraries, or decentralized identity management systems. + +The Device framework ensures that AO can adapt and grow, incorporating new technologies and computational paradigms without requiring fundamental changes to the core protocol. This extensibility is key to AO's long-term vision of becoming a truly global, decentralized computer. + +--- END OF FILE: docs/introduction/ao-devices.md --- + +--- START OF FILE: docs/introduction/pathing-in-ao-core.md --- +# Pathing in AO-Core + +## Overview + +Understanding how to construct and interpret paths in AO-Core is fundamental to working with HyperBEAM. This guide explains the structure and components of AO-Core paths, enabling you to effectively interact with processes and access their data. + +## HyperPATH Structure + +Let's examine a typical HyperBEAM endpoint piece-by-piece: + +``` +https://router-1.forward.computer/~process@1.0/now +``` + +### Node URL (`router-1.forward.computer`) + +The HTTP response from this node includes a signature from the host's key. By accessing the [`~snp@1.0`](../resources/source-code/dev_snp.md) device, you can verify that the node is running in a genuine Trusted Execution Environment (TEE), ensuring computation integrity. You can replace `router-1.forward.computer` with any HyperBEAM TEE node operated by any party while maintaining trustless guarantees. + +### Process Path (`/~process@1.0`) + +Every path in AO-Core represents a program. Think of the URL bar as a Unix-style command-line interface, providing access to AO's trustless and verifiable compute. Each path component (between `/` characters) represents a step in the computation. In this example, we instruct the AO-Core node to: + +1. Load a specific message from its caches (local, another node, or Arweave) +2. Interpret it with the [`~process@1.0`](../devices/process-at-1-0.md) device +3. The process device implements a shared computing environment with consistent state between users + +### State Access (`/now` or `/compute`) + +Devices in AO-Core expose keys accessible via path components. Each key executes a function on the device: + +- `now`: Calculates real-time process state +- `compute`: Serves the latest known state (faster than checking for new messages) + +Under the surface, these keys represent AO-Core messages. As we progress through the path, AO-Core applies each message to the existing state. You can access the full process state by visiting: +``` +/~process@1.0/now +``` + +### State Navigation + +You can browse through sub-messages and data fields by accessing them as keys. For example, if a process stores its interaction count in a field named `cache`, you can access it like this: +``` +/~process@1.0/compute/cache +``` +This shows the 'cache' of your process. Each response is: + +- A message with a signature attesting to its correctness +- A hashpath describing its generation +- Transferable to other AO-Core nodes for uninterrupted execution + +### Query Parameters and Type Casting + +Beyond path segments, HyperBEAM URLs can include query parameters that utilize a special type casting syntax. This allows specifying the desired data type for a parameter directly within the URL using the format `key+type=value`. + +- **Syntax**: A `+` symbol separates the parameter key from its intended type (e.g., `count+integer=42`, `items+list="apple",7`). +- **Mechanism**: The HyperBEAM node identifies the `+type` suffix (e.g., `+integer`, `+list`, `+map`, `+float`, `+atom`, `+resolve`). It then uses internal functions ([`hb_singleton:maybe_typed`](../resources/source-code/hb_singleton.md) and [`dev_codec_structured:decode_value`](../resources/source-code/dev_codec_structured.md)) to decode and cast the provided value string into the corresponding Erlang data type before incorporating it into the message. +- **Supported Types**: Common types include `integer`, `float`, `list`, `map`, `atom`, `binary` (often implicit), and `resolve` (for path resolution). List values often follow the [HTTP Structured Fields format (RFC 8941)](https://www.rfc-editor.org/rfc/rfc8941.html). + +This powerful feature enables the expression of complex data structures directly in URLs. + +## Examples + +The following examples illustrate using HyperPATH with various AO-Core processes and devices. While these cover a few specific use cases, HyperBEAM's extensible nature allows interaction with any device or process via HyperPATH. For a deeper understanding, we encourage exploring the [source code](https://github.com/permaweb/hyperbeam) and experimenting with different paths. + +### Example 1: Accessing Full Process State + +To get the complete, real-time state of a process identified by ``, use the `/now` path component with the [`~process@1.0`](../devices/process-at-1-0.md) device: + +``` +GET /~process@1.0/now +``` + +This instructs the AO-Core node to load the process and execute the `now` function on the [`~process@1.0`](../devices/process-at-1-0.md) device. + +### Example 2: Navigating to Specific Process Data + +If a process maintains its state in a map and you want to access a specific field, like `at-slot`, using the faster `/compute` endpoint: + +``` +GET /~process@1.0/compute/cache +``` + +This accesses the `compute` key on the [`~process@1.0`](../devices/process-at-1-0.md) device and then navigates to the `cache` key within the resulting state map. Using this path, you will see the latest 'cache' of your process (the number of interactions it has received). Every piece of relevant information about your process can be accessed similarly, effectively providing a native API. + +(Note: This represents direct navigation within the process state structure. For accessing data specifically published via the `~patch@1.0` device, see the documentation on [Exposing Process State](../build/exposing-process-state.md), which typically uses the `/cache/` path.) + +### Example 3: Basic `~message@1.0` Usage + +Here's a simple example of using [`~message@1.0`](../devices/message-at-1-0.md) to create a message and retrieve a value: + +``` +GET /~message@1.0&greeting="Hello"&count+integer=42/count +``` + +1. **Base:** `/` - The base URL of the HyperBEAM node. +2. **Root Device:** [`~message@1.0`](../devices/message-at-1-0.md) +3. **Query Params:** `greeting="Hello"` (binary) and `count+integer=42` (integer), forming the message `#{ <<"greeting">> => <<"Hello">>, <<"count">> => 42 }`. +4. **Path:** `/count` tells `~message@1.0` to retrieve the value associated with the key `count`. + +**Response:** The integer `42`. + +### Example 4: Using the `~message@1.0` Device with Type Casting + +The [`~message@1.0`](../devices/message-at-1-0.md) device can be used to construct and query transient messages, utilizing type casting in query parameters. + +Consider the following URL: + +``` +GET /~message@1.0&name="Alice"&age+integer=30&items+list="apple",1,"banana"&config+map=key1="val1";key2=true/[PATH] +``` + +HyperBEAM processes this as follows: + +1. **Base:** `/` - The base URL of the HyperBEAM node. +2. **Root Device:** [`~message@1.0`](../devices/message-at-1-0.md) +3. **Query Parameters (with type casting):** + * `name="Alice"` -> `#{ <<"name">> => <<"Alice">> }` (binary) + * `age+integer=30` -> `#{ <<"age">> => 30 }` (integer) + * `items+list="apple",1,"banana"` -> `#{ <<"items">> => [<<"apple">>, 1, <<"banana">>] }` (list) + * `config+map=key1="val1";key2=true` -> `#{ <<"config">> => #{<<"key1">> => <<"val1">>, <<"key2">> => true} }` (map) +4. **Initial Message Map:** A combination of the above key-value pairs. +5. **Path Evaluation:** + * If `[PATH]` is `/items/1`, the response is the integer `1`. + * If `[PATH]` is `/config/key1`, the response is the binary `<<"val1">>`. + +## Best Practices + +1. Always verify cryptographic signatures on responses +2. Use appropriate caching strategies for frequently accessed data +3. Implement proper error handling for network requests +4. Consider rate limits and performance implications +5. Keep sensitive data secure and use appropriate authentication methods +--- END OF FILE: docs/introduction/pathing-in-ao-core.md --- + +--- START OF FILE: docs/introduction/what-is-ao-core.md --- +# What is AO-Core? + +AO-Core is the foundational protocol underpinning the [AO Computer](https://ao.arweave.net). It defines a minimal, generalized model for decentralized computation built around standard web technologies like HTTP. Think of it as a way to interpret the Arweave permaweb not just as static storage, but as a dynamic, programmable, and infinitely scalable computing environment. + +## Core Concepts + +AO-Core revolves around three fundamental components: + +1. **Messages:** The smallest units of data and computation. Messages can be simple data blobs or maps of named functions. They are the primary means of communication and triggering execution within the system. Messages are cryptographically linked, forming a verifiable computation graph. +2. **Devices:** Modules responsible for interpreting and processing messages. Each device defines specific logic for how messages are handled (e.g., executing WASM, storing data, relaying information). This modular design allows nodes to specialize and the system to be highly extensible. +3. **Paths:** Structures that link messages over time, creating a verifiable history of computations. Paths allow users to navigate the computation graph and access specific states or results. They leverage `HashPaths`, cryptographic fingerprints representing the sequence of operations leading to a specific message state, ensuring traceability and integrity. + +## Key Principles + +* **Minimalism:** AO-Core provides the simplest possible representation of data and computation, avoiding prescriptive consensus mechanisms or specific VM requirements. +* **HTTP Native:** Designed for compatibility with HTTP protocols, making it accessible via standard web tools and infrastructure. +* **Scalability:** By allowing parallel message processing and modular device execution, AO-Core enables hyper-parallel computing, overcoming the limitations of traditional sequential blockchains. +* **Permissionlessness & Trustlessness:** While AO-Core itself is minimal, it provides the framework upon which higher-level protocols like AO can build systems that allow anyone to participate (`permissionlessness`) without needing to trust intermediaries (`trustlessness`). Users can choose their desired security and performance trade-offs. + +AO-Core transforms the permanent data storage of Arweave into a global, shared computation space, enabling the creation of complex, autonomous, and scalable decentralized applications. + + +--- END OF FILE: docs/introduction/what-is-ao-core.md --- + +--- START OF FILE: docs/introduction/what-is-hyperbeam.md --- +# What is HyperBEAM? + +HyperBEAM is the primary, production-ready implementation of the [AO-Core protocol](./what-is-ao-core.md), built on the robust Erlang/OTP framework. It serves as a decentralized operating system, powering the AO Computer—a scalable, trust-minimized, distributed supercomputer built on permanent storage. HyperBEAM provides the runtime environment and essential services to execute AO-Core computations across a network of distributed nodes. + +## Why HyperBEAM Matters + +HyperBEAM transforms the abstract concepts of AO-Core—such as [Messages](./what-is-ao-core.md#core-concepts), [Devices](./what-is-ao-core.md#core-concepts), and [Paths](./what-is-ao-core.md#core-concepts)—into a concrete, operational system. Here's why it's pivotal to the AO ecosystem: + +- **Modularity via Devices:** HyperBEAM introduces a uniquely modular architecture centered around [Devices](./ao-devices.md). These pluggable components define specific computational logic (like running WASM, managing state, or relaying data), allowing for unprecedented flexibility, specialization, and extensibility in a decentralized system. +- **Decentralized OS:** It equips nodes with the infrastructure to join the AO network, manage resources, execute computations, and communicate seamlessly. +- **Erlang/OTP Powerhouse:** Leveraging the BEAM virtual machine, HyperBEAM inherits Erlang's concurrency, fault tolerance, and scalability—perfect for distributed systems with lightweight processes and message passing. +- **Hardware Independence:** It abstracts underlying hardware, allowing diverse nodes to contribute resources without compatibility issues. +- **Node Coordination:** It governs how nodes join the network, offer services through specific Devices, and interact with one another. +- **Verifiable Computation:** Through hashpaths and the Converge Protocol, HyperBEAM ensures computation results are cryptographically verified and trustworthy. + +In essence, HyperBEAM is the engine that drives the AO Computer, enabling a vision of decentralized, verifiable computing at scale. + +## Core Components & Features + +- **Pluggable Devices:** The heart of HyperBEAM's extensibility. It includes essential built-in devices like [`~meta`](../devices/meta-at-1-0.md), [`~relay`](../devices/relay-at-1-0.md), [`~process`](../devices/process-at-1-0.md), [`~scheduler`](../devices/scheduler-at-1-0.md), and [`~wasm64`](../devices/wasm64-at-1-0.md) for core functionality, but the system is designed for easy addition of new custom devices. +- **Message System:** Everything in HyperBEAM is a "Message"—a map of named functions or binary data that can be processed, transformed, and cryptographically verified. +- **HTTP Interface:** Nodes expose an HTTP server for interaction via standard web requests and HyperPATHs, structured URLs that represent computation paths. +- **Modularity:** Its design supports easy extension, allowing new devices and functionalities to be added effortlessly. + +## Architecture + +* **Initialization Flow:** When a HyperBEAM node starts, it initializes the name service, scheduler registry, timestamp server, and HTTP server, establishing core services for process management, timing, communication, and storage. +* **Compute Model:** Computation follows the pattern \`Message1(Message2) => Message3\`, where messages are resolved through their devices and [paths](./pathing-in-ao-core.md). The integrity and history of these computations are ensured by **hashpaths**, which serves as a cryptographic audit trail. +* **Scheduler System:** The scheduler component manages execution order using ["slots"](../devices/scheduler-at-1-0.md#slot-system) — sequential positions that guarantee deterministic computation. +* **Process Slots:** Each process has numbered slots starting from 0 that track message execution order, ensuring consistent computation even across distributed nodes. + +## HTTP API and Pathing + +HyperBEAM exposes a powerful HTTP API that allows for interacting with processes and accessing data through structured URL patterns. We call URLs that represent computation paths "HyperPATHs". The URL bar effectively functions as a command-line interface for AO's trustless and verifiable compute. + +For a comprehensive guide on constructing and interpreting paths in AO-Core, including detailed examples and best practices, see [Pathing in AO-Core](./pathing-in-ao-core.md). + +In essence, HyperBEAM is the engine that powers the AO Computer, enabling the vision of a scalable, trust-minimized, decentralized supercomputer built on permanent storage. + +*See also: [HyperBEAM GitHub Repository](https://github.com/permaweb/HyperBEAM)* + +--- END OF FILE: docs/introduction/what-is-hyperbeam.md --- + --- START OF FILE: docs/resources/llms.md --- # LLM Context Files @@ -621,7 +1287,11 @@ You can build a wide range of applications, including: ### Is HyperBEAM open source? -Yes, HyperBEAM is open-source software licensed under the MIT License. +Yes, HyperBEAM is open-source software licensed under the Business Source License License. + +### What is the current focus or phase of HyperBEAM development? + +The initial development phase focuses on integrating AO processes more deeply with HyperBEAM. A key part of this is phasing out the reliance on traditional "dryrun" simulations for reading process state. Instead, processes are encouraged to use the [~patch@1.0 device](../../resources/source-code/dev_patch.md) to expose specific parts of their state directly via HyperPATH GET requests. This allows for more efficient and direct state access, particularly for web interfaces and external integrations. You can learn more about this mechanism in the [Exposing Process State with the Patch Device](../../build/exposing-process-state.md) guide. ## Installation and Setup @@ -818,7 +1488,7 @@ For a more comprehensive glossary of terms used in the permaweb, try the [Permaw
@@ -1930,7 +2600,7 @@ Verify the given transaction actually has the given identifier. ## Function Index ## -
compress_ecdsa_pubkey/1*
hash_address/1*
hmac/1
hmac/2
load_key/1Read the keyfile for the key with the given address from disk.
load_keyfile/1Extract the public and private key from a keyfile.
new/0
new/1
new_keyfile/2Generate a new wallet public and private key, with a corresponding keyfile.
sign/2Sign some data with a private key.
sign/3sign some data, hashed using the provided DigestType.
to_address/1Generate an address from a public key.
to_address/2
to_rsa_address/1*
verify/3Verify that a signature is correct.
verify/4
wallet_filepath/1*
wallet_filepath/3*
wallet_filepath2/1*
wallet_name/3*
+
compress_ecdsa_pubkey/1*
hash_address/1*
hmac/1
hmac/2
load_key/1Read the keyfile for the key with the given address from disk.
load_keyfile/1Extract the public and private key from a keyfile.
new/0
new/1
new_keyfile/2Generate a new wallet public and private key, with a corresponding keyfile.
sign/2Sign some data with a private key.
sign/3sign some data, hashed using the provided DigestType.
to_address/1Generate an address from a public key.
to_address/2
to_ecdsa_address/1*
to_rsa_address/1*
verify/3Verify that a signature is correct.
verify/4
wallet_filepath/1*
wallet_filepath/3*
wallet_filepath2/1*
wallet_name/3*
@@ -2030,6 +2700,12 @@ Generate an address from a public key. `to_address(PubKey, X2) -> any()` + + +### to_ecdsa_address/1 * ### + +`to_ecdsa_address(PubKey) -> any()` + ### to_rsa_address/1 * ### @@ -2270,7 +2946,7 @@ records to and from TABMs.
commit/3Sign a message using the priv_wallet key in the options.
committed/3Return a list of committed keys from an ANS-104 message.
committed_from_trusted_keys/3*
content_type/1Return the content type for the codec.
deduplicating_from_list/1*Deduplicate a list of key-value pairs by key, generating a list of -values for each normalized key if there are duplicates.
deserialize/1Deserialize a binary ans104 message to a TABM.
do_from/1*
duplicated_tag_name_test/0*
encoded_tags_to_map/1*Convert an ANS-104 encoded tag list into a HyperBEAM-compatible map.
from/1Convert a #tx record into a message map recursively.
from_maintains_tag_name_case_test/0*
id/1Return the ID of a message.
normal_tags/1*Check whether a list of key-value pairs contains only normalized keys.
normal_tags_test/0*
only_committed_maintains_target_test/0*
restore_tag_name_case_from_cache_test/0*
serialize/1Serialize a message or TX to a binary.
signed_duplicated_tag_name_test/0*
simple_to_conversion_test/0*
tag_map_to_encoded_tags/1*Convert a HyperBEAM-compatible map into an ANS-104 encoded tag list, +values for each normalized key if there are duplicates.
deserialize/1Deserialize a binary ans104 message to a TABM.
do_from/1*
duplicated_tag_name_test/0*
encoded_tags_to_map/1*Convert an ANS-104 encoded tag list into a HyperBEAM-compatible map.
from/1Convert a #tx record into a message map recursively.
from_maintains_tag_name_case_test/0*
id/1Return the ID of a message.
normal_tags/1*Check whether a list of key-value pairs contains only normalized keys.
normal_tags_test/0*
only_committed_maintains_target_test/0*
quantity_field_is_ignored_in_from_test/0*
quantity_key_encoded_as_tag_test/0*
restore_tag_name_case_from_cache_test/0*
serialize/1Serialize a message or TX to a binary.
signed_duplicated_tag_name_test/0*
simple_to_conversion_test/0*
tag_map_to_encoded_tags/1*Convert a HyperBEAM-compatible map into an ANS-104 encoded tag list, recreating the original order of the tags.
to/1Internal helper to translate a message to its #tx record representation, which can then be used by ar_bundles to serialize the message.
verify/3Verify an ANS-104 commitment.
@@ -2388,6 +3064,18 @@ Check whether a list of key-value pairs contains only normalized keys. `only_committed_maintains_target_test() -> any()` + + +### quantity_field_is_ignored_in_from_test/0 * ### + +`quantity_field_is_ignored_in_from_test() -> any()` + + + +### quantity_key_encoded_as_tag_test/0 * ### + +`quantity_key_encoded_as_tag_test() -> any()` + ### restore_tag_name_case_from_cache_test/0 * ### @@ -2461,13 +3149,19 @@ their value. ## Function Index ## -
commit/3
committed/3
deep_nesting_test/0*
deserialize/1
empty_map_test/0*
from/1Convert a flat map to a TABM.
inject_at_path/3*
multiple_paths_test/0*
nested_conversion_test/0*
path_list_test/0*
serialize/1
simple_conversion_test/0*
to/1Convert a TABM to a flat map.
verify/3
+
binary_passthrough_test/0*
commit/3
committed/3
deep_nesting_test/0*
deserialize/1
empty_map_test/0*
from/1Convert a flat map to a TABM.
inject_at_path/3*
multiple_paths_test/0*
nested_conversion_test/0*
path_list_test/0*
serialize/1
simple_conversion_test/0*
to/1Convert a TABM to a flat map.
verify/3
## Function Details ## + + +### binary_passthrough_test/0 * ### + +`binary_passthrough_test() -> any()` + ### commit/3 ### @@ -2809,7 +3503,6 @@ Decode the `ao-ids` key into a map. -* [Data Types](#types) This module implements HTTP Message Signatures as described in RFC-9421 (https://datatracker.ietf.org/doc/html/rfc9421), as an AO-Core device. @@ -4102,12 +4795,21 @@ endpoint. - +The green zone device, which provides secure communication and identity +management between trusted nodes. + + + +## Description ## +It handles node initialization, joining existing green zones, key exchange, +and node identity cloning. All operations are protected by hardware +commitment and encryption. ## Function Index ## -
add_trusted_node/4*
become/3
calculate_node_message/3*
decrypt_zone_key/2*
default_zone_required_opts/1*
encrypt_payload/2*
finalize_become/5*
init/3
join/3
join_peer/5*
key/3
maybe_set_zone_opts/4*
rsa_wallet_integration_test/0*
try_mount_encrypted_volume/2*
validate_join/3*
validate_peer_opts/2*
+
add_trusted_node/4*Adds a node to the trusted nodes list with its commitment report.
become/3Clones the identity of a target node in the green zone.
calculate_node_message/3*Generate the node message that should be set prior to joining +a green zone.
decrypt_zone_key/2*Decrypts an AES key using the node's RSA private key.
default_zone_required_opts/1*Provides the default required options for a green zone.
encrypt_payload/2*Encrypts an AES key with a node's RSA public key.
finalize_become/5*
info/1Controls which functions are exposed via the device API.
info/3Provides information about the green zone device and its API.
init/3Initialize the green zone for a node.
join/3Initiates the join process for a node to enter an existing green zone.
join_peer/5*Processes a join request to a specific peer node.
key/3Encrypts and provides the node's private key for secure sharing.
maybe_set_zone_opts/4*Adopts configuration from a peer when joining a green zone.
rsa_wallet_integration_test/0*Test RSA operations with the existing wallet structure.
try_mount_encrypted_volume/2*Attempts to mount an encrypted volume using the green zone AES key.
validate_join/3*Validates an incoming join request from another node.
validate_peer_opts/2*Validates that a peer's configuration matches required options.
@@ -4123,6 +4825,17 @@ add_trusted_node(NodeAddr::binary(), Report::map(), RequesterPubKey::term(), Opt
+`NodeAddr`: The joining node's address
`Report`: The commitment report provided by the joining node
`RequesterPubKey`: The joining node's public key
`Opts`: A map of configuration options
+ +returns: ok + +Adds a node to the trusted nodes list with its commitment report. + +This function updates the trusted nodes configuration: +1. Retrieves the current trusted nodes map +2. Adds the new node with its report and public key +3. Updates the node configuration with the new trusted nodes list + ### become/3 ### @@ -4132,26 +4845,85 @@ become(M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, binary()
+`Opts`: A map of configuration options
+ +returns: `{ok, Map}` on success with confirmation details, or +`{error, Binary}` if the node is not part of a green zone or +identity adoption fails. + +Clones the identity of a target node in the green zone. + +This function performs the following operations: +1. Retrieves target node location and ID from the configuration +2. Verifies that the local node has a valid shared AES key +3. Requests the target node's encrypted key via its key endpoint +4. Verifies the response is from the expected peer +5. Decrypts the target node's private key using the shared AES key +6. Updates the local node's wallet with the target node's identity + +Required configuration in Opts map: +- green_zone_peer_location: Target node's address +- green_zone_peer_id: Target node's unique identifier +- priv_green_zone_aes: The shared AES key for the green zone + ### calculate_node_message/3 * ### -`calculate_node_message(RequiredOpts, Req, List) -> any()` +`calculate_node_message(RequiredOpts, Req, List) -> any()` + +Generate the node message that should be set prior to joining +a green zone. + +This function takes a required opts message, a request message, and an +`adopt-config` value. The `adopt-config` value can be a boolean, a list of +fields that should be included in the node message from the request, or a +binary string of fields to include, separated by commas. + + + +### decrypt_zone_key/2 * ### + +

+decrypt_zone_key(EncZoneKey::binary(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`EncZoneKey`: The encrypted zone AES key (Base64 encoded or binary)
`Opts`: A map of configuration options
+ +returns: {ok, DecryptedKey} on success with the decrypted AES key + +Decrypts an AES key using the node's RSA private key. + +This function handles decryption of the zone key: +1. Decodes the encrypted key if it's in Base64 format +2. Extracts the RSA private key components from the wallet +3. Creates an RSA private key record +4. Performs private key decryption on the encrypted key - + -### decrypt_zone_key/2 * ### +### default_zone_required_opts/1 * ###

-decrypt_zone_key(EncZoneKey::binary(), Opts::map()) -> {ok, binary()} | {error, binary()}
+default_zone_required_opts(Opts::map()) -> map()
 

- +`Opts`: A map of configuration options from which to derive defaults
-### default_zone_required_opts/1 * ### +returns: A map of required configuration options for the green zone -`default_zone_required_opts(Opts) -> any()` +Provides the default required options for a green zone. + +This function defines the baseline security requirements for nodes in a green zone: +1. Restricts loading of remote devices and only allows trusted signers +2. Limits to preloaded devices from the initiating machine +3. Enforces specific store configuration +4. Prevents route changes from the defaults +5. Requires matching hooks across all peers +6. Disables message scheduling to prevent conflicts +7. Enforces a permanent state to prevent further configuration changes @@ -4162,21 +4934,75 @@ encrypt_payload(AESKey::binary(), RequesterPubKey::term()) -> binary()
+`AESKey`: The shared AES key (256-bit binary)
`RequesterPubKey`: The node's public RSA key
+ +returns: The encrypted AES key + +Encrypts an AES key with a node's RSA public key. + +This function securely encrypts the shared key for transmission: +1. Extracts the RSA public key components +2. Creates an RSA public key record +3. Performs public key encryption on the AES key + ### finalize_become/5 * ### `finalize_become(KeyResp, NodeLocation, NodeID, GreenZoneAES, Opts) -> any()` + + +### info/1 ### + +`info(X1) -> any()` + +Controls which functions are exposed via the device API. + +This function defines the security boundary for the green zone device by +explicitly listing which functions are available through the API. + + + +### info/3 ### + +`info(Msg1, Msg2, Opts) -> any()` + +Provides information about the green zone device and its API. + +This function returns detailed documentation about the device, including: +1. A high-level description of the device's purpose +2. Version information +3. Available API endpoints with their parameters and descriptions + ### init/3 ###

-init(M1::term(), M2::term(), Opts::map()) -> {ok, binary()}
+init(M1::term(), M2::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
 

+`Opts`: A map of configuration options
+ +returns: `{ok, Binary}` on success with confirmation message, or +`{error, Binary}` on failure with error message. + +Initialize the green zone for a node. + +This function performs the following operations: +1. Validates the node's history to ensure this is a valid initialization +2. Retrieves or creates a required configuration for the green zone +3. Ensures a wallet (keypair) exists or creates a new one +4. Generates a new 256-bit AES key for secure communication +5. Updates the node's configuration with these cryptographic identities + +Config options in Opts map: +- green_zone_required_config: (Optional) Custom configuration requirements +- priv_wallet: (Optional) Existing wallet to use instead of creating a new one +- priv_green_zone_aes: (Optional) Existing AES key, if already part of a zone + ### join/3 ### @@ -4186,15 +5012,55 @@ join(M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, binary()}
+`M1`: The join request message with target peer information
`M2`: Additional request details, may include adoption preferences
`Opts`: A map of configuration options for join operations
+ +returns: `{ok, Map}` on success with join response details, or +`{error, Binary}` on failure with error message. + +Initiates the join process for a node to enter an existing green zone. + +This function performs the following operations depending on the state: +1. Validates the node's history to ensure proper initialization +2. Checks for target peer information (location and ID) +3. If target peer is specified: +a. Generates a commitment report for the peer +b. Prepares and sends a POST request to the target peer +c. Verifies the response and decrypts the returned zone key +d. Updates local configuration with the shared AES key +4. If no peer is specified, processes the join request locally + +Config options in Opts map: +- green_zone_peer_location: Target peer's address +- green_zone_peer_id: Target peer's unique identifier +- green_zone_adopt_config: +(Optional) Whether to adopt peer's configuration (default: true) + ### join_peer/5 * ###

-join_peer(PeerLocation::binary(), PeerID::binary(), M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, map()}
+join_peer(PeerLocation::binary(), PeerID::binary(), M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, map() | binary()}
 

+`PeerLocation`: The target peer's address
`PeerID`: The target peer's unique identifier
`M2`: May contain ShouldMount flag to enable encrypted volume mounting
+ +returns: `{ok, Map}` on success with confirmation message, or +`{error, Map|Binary}` on failure with error details + +Processes a join request to a specific peer node. + +This function handles the client-side join flow when connecting to a peer: +1. Verifies the node is not already in a green zone +2. Optionally adopts configuration from the target peer +3. Generates a hardware-backed commitment report +4. Sends a POST request to the peer's join endpoint +5. Verifies the response signature +6. Decrypts the returned AES key +7. Updates local configuration with the shared key +8. Optionally mounts an encrypted volume using the shared key + ### key/3 ### @@ -4204,11 +5070,49 @@ key(M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, binary()}
+`Opts`: A map of configuration options
+ +returns: `{ok, Map}` containing the encrypted key and IV on success, or +`{error, Binary}` if the node is not part of a green zone + +Encrypts and provides the node's private key for secure sharing. + +This function performs the following operations: +1. Retrieves the shared AES key and the node's wallet +2. Verifies that the node is part of a green zone (has a shared AES key) +3. Generates a random initialization vector (IV) for encryption +4. Encrypts the node's private key using AES-256-GCM with the shared key +5. Returns the encrypted key and IV for secure transmission + +Required configuration in Opts map: +- priv_green_zone_aes: The shared AES key for the green zone +- priv_wallet: The node's wallet containing the private key to encrypt + ### maybe_set_zone_opts/4 * ### -`maybe_set_zone_opts(PeerLocation, PeerID, Req, InitOpts) -> any()` +

+maybe_set_zone_opts(PeerLocation::binary(), PeerID::binary(), Req::map(), InitOpts::map()) -> {ok, map()} | {error, binary()}
+
+
+ +`PeerLocation`: The location of the peer node to join
`PeerID`: The ID of the peer node to join
`Req`: The request message with adoption preferences
`InitOpts`: A map of initial configuration options
+ +returns: `{ok, Map}` with updated configuration on success, or +`{error, Binary}` if configuration retrieval fails + +Adopts configuration from a peer when joining a green zone. + +This function handles the conditional adoption of peer configuration: +1. Checks if adoption is enabled (default: true) +2. Requests required configuration from the peer +3. Verifies the authenticity of the configuration +4. Creates a node message with appropriate settings +5. Updates the local node configuration + +Config options: +- green_zone_adopt_config: Controls configuration adoption (boolean, list, or binary) @@ -4216,12 +5120,28 @@ key(M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, binary()} `rsa_wallet_integration_test() -> any()` +Test RSA operations with the existing wallet structure. + +This test function verifies that encryption and decryption using the RSA keys +from the wallet work correctly. It creates a new wallet, encrypts a test +message with the RSA public key, and then decrypts it with the RSA private +key, asserting that the decrypted message matches the original. + ### try_mount_encrypted_volume/2 * ### `try_mount_encrypted_volume(AESKey, Opts) -> any()` +Attempts to mount an encrypted volume using the green zone AES key. + +This function handles the complete process of secure storage setup by +delegating to the dev_volume module, which provides a unified interface +for volume management. + +The encryption key used for the volume is the same AES key used for green zone +communication, ensuring that only nodes in the green zone can access the data. + ### validate_join/3 * ### @@ -4231,15 +5151,217 @@ validate_join(M1::term(), Req::map(), Opts::map()) -> {ok, map()} | {error, b
+`M1`: Ignored parameter
`Req`: The join request containing commitment report and public key
`Opts`: A map of configuration options
+ +returns: `{ok, Map}` on success with encrypted AES key, or +`{error, Binary}` on failure with error message + +Validates an incoming join request from another node. + +This function handles the server-side join flow when receiving a connection +request: +1. Validates the peer's configuration meets required standards +2. Extracts the commitment report and public key from the request +3. Verifies the hardware-backed commitment report +4. Adds the joining node to the trusted nodes list +5. Encrypts the shared AES key with the peer's public key +6. Returns the encrypted key to the requesting node + ### validate_peer_opts/2 * ### -`validate_peer_opts(Req, Opts) -> any()` +

+validate_peer_opts(Req::map(), Opts::map()) -> boolean()
+
+
+ +`Req`: The request message containing the peer's configuration
`Opts`: A map of the local node's configuration options
+ +returns: true if the peer's configuration is valid, false otherwise + +Validates that a peer's configuration matches required options. + +This function ensures the peer node meets configuration requirements: +1. Retrieves the local node's required configuration +2. Gets the peer's options from its message +3. Adds required configuration to peer's required options list +4. Verifies the peer's node history is valid +5. Checks that the peer's options match the required configuration --- END OF FILE: docs/resources/source-code/dev_green_zone.md --- +--- START OF FILE: docs/resources/source-code/dev_hook.md --- +# [Module dev_hook.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_hook.erl) + + + + +A generalized interface for `hooking` into HyperBEAM nodes. + + + +## Description ## + +This module allows users to define `hooks` that are executed at various +points in the lifecycle of nodes and message evaluations. + +Hooks are maintained in the `node message` options, under the key `on` +key. Each `hook` may have zero or many `handlers` which their request is +executed against. A new `handler` of a hook can be registered by simply +adding a new key to that message. If multiple hooks need to be executed for +a single event, the key's value can be set to a list of hooks. + +`hook`s themselves do not need to be added explicitly. Any device can add +a hook by simply executing `dev_hook:on(HookName, Req, Opts)`. This +function is does not affect the hashpath of a message and is not exported on +the device`s API, such that it is not possible to call it directly with +AO-Core resolution. + +All handlers are expressed in the form of a message, upon which the hook's +request is evaluated: + +AO(HookMsg, Req, Opts) => {Status, Result} + +The `Status` and `Result` of the evaluation can be used at the `hook` caller's +discretion. If multiple handlers are to be executed for a single `hook`, the +result of each is used as the input to the next, on the assumption that the +status of the previous is `ok`. If a non-`ok` status is encountered, the +evaluation is halted and the result is returned to the caller. This means +that in most cases, hooks take the form of chainable pipelines of functions, +passing the most pertinent data in the `body` key of both the request and +result. Hook definitions can also set the `hook/result` key to `ignore`, if +the result of the execution should be discarded and the prior value (the +input to the hook) should be used instead. The `hook/commit-request` key can +also be set to `true` if the request should be committed by the node before +execution of the hook. + +The default HyperBEAM node implements several useful hooks. They include: + +start: Executed when the node starts. +Req/body: The node's initial configuration. +Result/body: The node's possibly updated configuration. +request: Executed when a request is received via the HTTP API. +Req/body: The sequence of messages that the node will evaluate. +Req/request: The raw, unparsed singleton request. +Result/body: The sequence of messages that the node will evaluate. +step: Executed after each message in a sequence has been evaluated. +Req/body: The result of the evaluation. +Result/body: The result of the evaluation. +response: Executed when a response is sent via the HTTP API. +Req/body: The result of the evaluation. +Req/request: The raw, unparsed singleton request that was used to +generate the response. +Result/body: The message to be sent in response to the request. + +Additionally, this module implements a traditional device API, allowing the +node operator to register hooks to the node and find those that are +currently active. + +## Function Index ## + + +
execute_handler/4*Execute a single handler +Handlers are expressed as messages that can be resolved via AO.
execute_handlers/4*Execute a list of handlers in sequence.
find/2Get all handlers for a specific hook from the node message options.
find/3
halt_on_error_test/0*Test that pipeline execution halts on error.
info/1Device API information.
multiple_handlers_test/0*Test that multiple handlers form a pipeline.
no_handlers_test/0*Test that hooks with no handlers return the original request.
on/3Execute a named hook with the provided request and options +This function finds all handlers for the hook and evaluates them in sequence.
single_handler_test/0*Test that a single handler is executed correctly.
+ + + + +## Function Details ## + + + +### execute_handler/4 * ### + +`execute_handler(HookName, Handler, Req, Opts) -> any()` + +Execute a single handler +Handlers are expressed as messages that can be resolved via AO. + + + +### execute_handlers/4 * ### + +`execute_handlers(HookName, Rest, Req, Opts) -> any()` + +Execute a list of handlers in sequence. +The result of each handler is used as input to the next handler. +If a handler returns a non-ok status, execution is halted. + + + +### find/2 ### + +`find(HookName, Opts) -> any()` + +Get all handlers for a specific hook from the node message options. +Handlers are stored in the `on` key of this message. The `find/2` variant of +this function only takes a hook name and node message, and is not called +directly via the device API. Instead it is used by `on/3` and other internal +functionality to find handlers when necessary. The `find/3` variant can, +however, be called directly via the device API. + + + +### find/3 ### + +`find(Base, Req, Opts) -> any()` + + + +### halt_on_error_test/0 * ### + +`halt_on_error_test() -> any()` + +Test that pipeline execution halts on error + + + +### info/1 ### + +`info(X1) -> any()` + +Device API information + + + +### multiple_handlers_test/0 * ### + +`multiple_handlers_test() -> any()` + +Test that multiple handlers form a pipeline + + + +### no_handlers_test/0 * ### + +`no_handlers_test() -> any()` + +Test that hooks with no handlers return the original request + + + +### on/3 ### + +`on(HookName, Req, Opts) -> any()` + +Execute a named hook with the provided request and options +This function finds all handlers for the hook and evaluates them in sequence. +The result of each handler is used as input to the next handler. + + + +### single_handler_test/0 * ### + +`single_handler_test() -> any()` + +Test that a single handler is executed correctly + + +--- END OF FILE: docs/resources/source-code/dev_hook.md --- + --- START OF FILE: docs/resources/source-code/dev_hyperbuddy.md --- # [Module dev_hyperbuddy.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_hyperbuddy.erl) @@ -4944,7 +6066,7 @@ Check if a string terminates with a given suffix. -A device that calls a Lua script upon a request and returns the result. +A device that calls a Lua module upon a request and returns the result. @@ -4952,10 +6074,10 @@ A device that calls a Lua script upon a request and returns the result.
ao_core_resolution_from_lua_test/0*Run an AO-Core resolution from the Lua environment.
ao_core_sandbox_test/0*Run an AO-Core resolution from the Lua environment.
aos_authority_not_trusted_test/0*
aos_process_benchmark_test_/0*Benchmark the performance of Lua executions.
compute/4*Call the Lua script with the given arguments.
decode/1Decode a Lua result into a HyperBEAM structured@1.0 message.
decode_params/2*Decode a list of Lua references, as found in a stack trace, into a -list of Erlang terms.
decode_stacktrace/2*Parse a Lua stack trace into a list of messages.
decode_stacktrace/3*
direct_benchmark_test/0*Benchmark the performance of Lua executions.
encode/1Encode a HyperBEAM structured@1.0 message into a Lua term.
ensure_initialized/3*Initialize the Lua VM if it is not already initialized.
error_response_test/0*
execute_aos_call/1*
execute_aos_call/2*
find_scripts/2*Find the script in the base message, either by ID or by string.
functions/3Return a list of all functions in the Lua environment.
generate_lua_process/1*Generate a Lua process message.
generate_stack/1*Generate a stack message for the Lua process.
generate_test_message/1*Generate a test message for a Lua process.
info/1All keys that are not directly available in the base message are -resolved by calling the Lua function in the script of the same name.
init/3Initialize the device state, loading the script into memory if it is -a reference.
initialize/3*Initialize a new Lua state with a given base message and script.
invoke_aos_test/0*
invoke_non_compute_key_test/0*Call a non-compute key on a Lua device message and ensure that the -function of the same name in the script is called.
load_scripts/2*Load a list of scripts for installation into the Lua VM.
load_scripts/3*
load_scripts_by_id_test/0*
lua_http_preprocessor_test/0*Use a Lua script as a preprocessor on the HTTP server via ~meta@1.0.
multiple_scripts_test/0*
normalize/3Restore the Lua state from a snapshot, if it exists.
process_response/2*Process a response to a Luerl invocation.
pure_lua_process_benchmark_test_/0*
pure_lua_process_test/0*Call a process whose execution-device is set to lua@5.3a.
sandbox/3*Sandbox (render inoperable) a set of Lua functions.
sandboxed_failure_test/0*
simple_invocation_test/0*
snapshot/3Snapshot the Lua state from a live computation.
+list of Erlang terms.decode_stacktrace/2*Parse a Lua stack trace into a list of messages.decode_stacktrace/3*direct_benchmark_test/0*Benchmark the performance of Lua executions.encode/1Encode a HyperBEAM structured@1.0 message into a Lua term.ensure_initialized/3*Initialize the Lua VM if it is not already initialized.error_response_test/0*find_modules/2*Find the script in the base message, either by ID or by string.functions/3Return a list of all functions in the Lua environment.generate_lua_process/1*Generate a Lua process message.generate_stack/1*Generate a stack message for the Lua process.generate_test_message/1*Generate a test message for a Lua process.info/1All keys that are not directly available in the base message are +resolved by calling the Lua function in the module of the same name.init/3Initialize the device state, loading the script into memory if it is +a reference.initialize/3*Initialize a new Lua state with a given base message and module.invoke_aos_test/0*invoke_non_compute_key_test/0*Call a non-compute key on a Lua device message and ensure that the +function of the same name in the script is called.load_modules/2*Load a list of modules for installation into the Lua VM.load_modules/3*load_modules_by_id_test/0*lua_http_hook_test/0*Use a Lua module as a hook on the HTTP server via ~meta@1.0.multiple_modules_test/0*normalize/3Restore the Lua state from a snapshot, if it exists.process_response/2*Process a response to a Luerl invocation.pure_lua_process_benchmark_test_/0*pure_lua_process_test/0*Call a process whose execution-device is set to lua@5.3a.sandbox/3*Sandbox (render inoperable) a set of Lua functions.sandboxed_failure_test/0*simple_invocation_test/0*snapshot/3Snapshot the Lua state from a live computation. @@ -5054,7 +6176,7 @@ Encode a HyperBEAM `structured@1.0` message into a Lua term. `ensure_initialized(Base, Req, Opts) -> any()` Initialize the Lua VM if it is not already initialized. Optionally takes -the script as a Binary string. If not provided, the script will be loaded +the script as a Binary string. If not provided, the module will be loaded from the base message. @@ -5063,23 +6185,11 @@ from the base message. `error_response_test() -> any()` - - -### execute_aos_call/1 * ### - -`execute_aos_call(Base) -> any()` - - - -### execute_aos_call/2 * ### - -`execute_aos_call(Base, Req) -> any()` + - +### find_modules/2 * ### -### find_scripts/2 * ### - -`find_scripts(Base, Opts) -> any()` +`find_modules(Base, Opts) -> any()` Find the script in the base message, either by ID or by string. @@ -5122,7 +6232,7 @@ Generate a test message for a Lua process. `info(Base) -> any()` All keys that are not directly available in the base message are -resolved by calling the Lua function in the script of the same name. +resolved by calling the Lua function in the module of the same name. Additionally, we exclude the `keys`, `set`, `encode` and `decode` functions which are `message@1.0` core functions, and Lua public utility functions. @@ -5139,9 +6249,9 @@ a reference. ### initialize/3 * ### -`initialize(Base, Scripts, Opts) -> any()` +`initialize(Base, Modules, Opts) -> any()` -Initialize a new Lua state with a given base message and script. +Initialize a new Lua state with a given base message and module. @@ -5158,39 +6268,39 @@ Initialize a new Lua state with a given base message and script. Call a non-compute key on a Lua device message and ensure that the function of the same name in the script is called. - + -### load_scripts/2 * ### +### load_modules/2 * ### -`load_scripts(Scripts, Opts) -> any()` +`load_modules(Modules, Opts) -> any()` -Load a list of scripts for installation into the Lua VM. +Load a list of modules for installation into the Lua VM. - + -### load_scripts/3 * ### +### load_modules/3 * ### -`load_scripts(Rest, Opts, Acc) -> any()` +`load_modules(Rest, Opts, Acc) -> any()` - + -### load_scripts_by_id_test/0 * ### +### load_modules_by_id_test/0 * ### -`load_scripts_by_id_test() -> any()` +`load_modules_by_id_test() -> any()` - + -### lua_http_preprocessor_test/0 * ### +### lua_http_hook_test/0 * ### -`lua_http_preprocessor_test() -> any()` +`lua_http_hook_test() -> any()` -Use a Lua script as a preprocessor on the HTTP server via `~meta@1.0`. +Use a Lua module as a hook on the HTTP server via `~meta@1.0`. - + -### multiple_scripts_test/0 * ### +### multiple_modules_test/0 * ### -`multiple_scripts_test() -> any()` +`multiple_modules_test() -> any()` @@ -5337,7 +6447,7 @@ implement a case-insensitive key lookup rather than delegating to maps:get/2.case_insensitive_get_test/0*commit/3Commit to a message, using the commitment-device key to specify the device that should be used to commit to the message.commitment_ids_from_committers/2*Returns a list of commitment IDs in a commitments map that are relevant for a list of given committer addresses.commitment_ids_from_request/3*Implements a standardized form of specifying commitment IDs for a -message request.committed/3Return the list of committed keys from a message.committers/1Return the committers of a message that are present in the given request.committers/2committers/3exec_for_commitment/5*Execute a function for a single commitment in the context of its +message request.committed/3Return the list of committed keys from a message.committers/1Return the committers of a message that are present in the given request.committers/2committers/3deep_unset_test/0*exec_for_commitment/5*Execute a function for a single commitment in the context of its parent message.get/2Return the value associated with the key as it exists in the message's underlying Erlang map.get/3get_keys_mod_test/0*id/1Return the ID of a message, using the committers list if it exists.id/2id/3id_device/1*Locate the ID device of a message.info/0Return the info for the identity device.is_private_mod_test/0*key_from_device_test/0*keys/1Get the public keys of a message.keys_from_device_test/0*private_keys_are_filtered_test/0*remove/2Remove a key or keys from a message.remove_test/0*run_test/0*set/3Deep merge keys in a message.set_conflicting_keys_test/0*set_ignore_undefined_test/0*set_path/3Special case of set/3 for setting the path key.unset_with_set_test/0*verify/3Verify a message.verify_test/0*with_relevant_commitments/3*Return a message with only the relevant commitments for a given request. @@ -5434,6 +6544,12 @@ Return the committers of a message that are present in the given request. `committers(X1, X2, NodeOpts) -> any()` + + +### deep_unset_test/0 * ### + +`deep_unset_test() -> any()` + ### exec_for_commitment/5 * ### @@ -5659,8 +6775,9 @@ the AO-Core resolver has returned a result.
add_dynamic_keys/1*Add dynamic keys to the node message.
adopt_node_message/2Attempt to adopt changes to a node message.
authorized_set_node_msg_succeeds_test/0*Test that we can set the node message if the request is signed by the -owner of the node.
claim_node_test/0*Test that we can claim the node correctly and set the node message after.
config_test/0*Test that we can get the node message.
embed_status/1*Wrap the result of a device call in a status.
filter_node_msg/1*Remove items from the node message that are not encodable into a -message.
halt_request_test/0*Test that we can halt a request if the preprocessor returns an error.
handle/2Normalize and route messages downstream based on their path.
handle_initialize/2*
handle_resolve/3*Handle an AO-Core request, which is a list of messages.
info/1Ensure that the helper function adopt_node_message/2 is not exported.
info/3Get/set the node message.
is/2Check if the request in question is signed by a given role on the node.
is/3
maybe_sign/2*Sign the result of a device call if the node is configured to do so.
message_to_status/1*Get the HTTP status code from a transaction (if it exists).
modify_request_test/0*Test that a preprocessor can modify a request.
permanent_node_message_test/0*Test that a permanent node message cannot be changed.
priv_inaccessible_test/0*Test that we can't get the node message if the requested key is private.
resolve_processor/5*Execute a message from the node message upon the user's request.
status_code/1*Calculate the appropriate HTTP status code for an AO-Core result.
unauthorized_set_node_msg_fails_test/0*Test that we can't set the node message if the request is not signed by +owner of the node.
build/3Emits the version number and commit hash of the HyperBEAM node source, +if available.
buildinfo_test/0*Test that version information is available and returned correctly.
claim_node_test/0*Test that we can claim the node correctly and set the node message after.
config_test/0*Test that we can get the node message.
embed_status/1*Wrap the result of a device call in a status.
filter_node_msg/1*Remove items from the node message that are not encodable into a +message.
halt_request_test/0*Test that we can halt a request if the hook returns an error.
handle/2Normalize and route messages downstream based on their path.
handle_initialize/2*
handle_resolve/3*Handle an AO-Core request, which is a list of messages.
info/1Ensure that the helper function adopt_node_message/2 is not exported.
info/3Get/set the node message.
is/2Check if the request in question is signed by a given role on the node.
is/3
maybe_sign/2*Sign the result of a device call if the node is configured to do so.
message_to_status/1*Get the HTTP status code from a transaction (if it exists).
modify_request_test/0*Test that a hook can modify a request.
permanent_node_message_test/0*Test that a permanent node message cannot be changed.
priv_inaccessible_test/0*Test that we can't get the node message if the requested key is private.
request_response_hooks_test/0*
resolve_hook/4*Execute a hook from the node message upon the user's request.
status_code/1*Calculate the appropriate HTTP status code for an AO-Core result.
unauthorized_set_node_msg_fails_test/0*Test that we can't set the node message if the request is not signed by the owner of the node.
uninitialized_node_test/0*Test that an uninitialized node will not run computation.
update_node_message/2*Validate that the request is signed by the operator of the node, then allow them to update the node message.
@@ -5694,6 +6811,29 @@ Attempt to adopt changes to a node message. Test that we can set the node message if the request is signed by the owner of the node. + + +### build/3 ### + +`build(X1, X2, NodeMsg) -> any()` + +Emits the version number and commit hash of the HyperBEAM node source, +if available. + +We include the short hash separately, as the length of this hash may change in +the future, depending on the git version/config used to build the node. +Subsequently, rather than embedding the `git-short-hash-length`, for the +avoidance of doubt, we include the short hash separately, as well as its long +hash. + + + +### buildinfo_test/0 * ### + +`buildinfo_test() -> any()` + +Test that version information is available and returned correctly. + ### claim_node_test/0 * ### @@ -5733,7 +6873,7 @@ message. `halt_request_test() -> any()` -Test that we can halt a request if the preprocessor returns an error. +Test that we can halt a request if the hook returns an error. @@ -5760,7 +6900,7 @@ other messages are routed to the `handle_resolve/2` function. Handle an AO-Core request, which is a list of messages. We apply the node's pre-processor to the request first, and then resolve the request using the node's AO-Core implementation if its response was `ok`. -After execution, we run the node's `postprocessor` message on the result of +After execution, we run the node's `response` hook on the result of the request before returning the result it grants back to the user. @@ -5826,7 +6966,7 @@ Get the HTTP status code from a transaction (if it exists). `modify_request_test() -> any()` -Test that a preprocessor can modify a request. +Test that a hook can modify a request. @@ -5844,20 +6984,26 @@ Test that a permanent node message cannot be changed. Test that we can't get the node message if the requested key is private. - + + +### request_response_hooks_test/0 * ### + +`request_response_hooks_test() -> any()` -### resolve_processor/5 * ### + -`resolve_processor(PathKey, Processor, Req, Query, NodeMsg) -> any()` +### resolve_hook/4 * ### -Execute a message from the node message upon the user's request. The -invocation of the processor provides a request of the following form: +`resolve_hook(HookName, InitiatingRequest, Body, NodeMsg) -> any()` + +Execute a hook from the node message upon the user's request. The +invocation of the hook provides a request of the following form: ``` - /path => preprocess | postprocess + /path => request | response /request => the original request singleton - /body => list of messages the user wishes to process + /body => parsed sequence of messages to process | the execution result ``` @@ -6246,10 +7392,10 @@ key may return `infinity` if the node will not serve a user under any circumstances. Else, the value returned by the `price` key will be passed to the ledger device as the `amount` key. -The ledger device should implement the following keys: +A ledger device should implement the following keys: ``` -POST /credit?message=PaymentMessage&request=RequestMessagePOST /debit?amount=PriceMessage&type=pre|post&request=RequestMessage +POST /credit?message=PaymentMessage&request=RequestMessagePOST /debit?amount=PriceMessage&request=RequestMessageGET /balance?request=RequestMessage ``` The `type` key is optional and defaults to `pre`. If `type` is set to `post`, @@ -6260,8 +7406,8 @@ check whether the debit would succeed before execution.
balance/3Get the balance of a user in the ledger.
faff_test/0*Simple test of p4's capabilities with the faff@1.0 device.
is_chargable_req/2*The node operator may elect to make certain routes non-chargable, using -the routes syntax also used to declare routes in router@1.0.
non_chargable_route_test/0*Test that a non-chargable route is not charged for.
postprocess/3Postprocess the request after it has been fulfilled.
preprocess/3Estimate the cost of a transaction and decide whether to proceed with -a request.
test_opts/1*
test_opts/2*
test_opts/3*
+the routes syntax also used to declare routes in router@1.0.lua_pricing_test/0*Ensure that Lua modules can be used as pricing and ledger devices.non_chargable_route_test/0*Test that a non-chargable route is not charged for.request/3Estimate the cost of a transaction and decide whether to proceed with +a request.response/3Postprocess the request after it has been fulfilled.test_opts/1*test_opts/2*test_opts/3* @@ -6293,6 +7439,19 @@ Simple test of p4's capabilities with the `faff@1.0` device. The node operator may elect to make certain routes non-chargable, using the `routes` syntax also used to declare routes in `router@1.0`. + + +### lua_pricing_test/0 * ### + +`lua_pricing_test() -> any()` + +Ensure that Lua modules can be used as pricing and ledger devices. Our +modules come in two parts: +- A `process` module which is executed as a persistent `local-process` on the +node, and which maintains the state of the ledger. +- A `client` module, which is executed as a `p4@1.0` device, marshalling +requests to the `process` module. + ### non_chargable_route_test/0 * ### @@ -6301,23 +7460,23 @@ the `routes` syntax also used to declare routes in `router@1.0`. Test that a non-chargable route is not charged for. - + -### postprocess/3 ### +### request/3 ### -`postprocess(State, RawResponse, NodeMsg) -> any()` +`request(State, Raw, NodeMsg) -> any()` -Postprocess the request after it has been fulfilled. +Estimate the cost of a transaction and decide whether to proceed with +a request. The default behavior if `pricing-device` or `p4_balances` are +not set is to proceed, so it is important that a user initialize them. - + -### preprocess/3 ### +### response/3 ### -`preprocess(State, Raw, NodeMsg) -> any()` +`response(State, RawResponse, NodeMsg) -> any()` -Estimate the cost of a transaction and decide whether to proceed with -a request. The default behavior if `pricing-device` or `p4_balances` are -not set is to proceed, so it is important that a user initialize them. +Postprocess the request after it has been fulfilled. @@ -6346,43 +7505,85 @@ not set is to proceed, so it is important that a user initialize them. -A device that finds `PATCH` requests in the `results/outbox` -of its message, and applies them to it. +A device that can be used to reorganize a message: Moving data from +one path inside it to another. ## Description ## -This can be useful for processes -whose computation would like to manipulate data outside of the `results` key -of its message. + +This device's function runs in two modes: + +1. When using `all` to move all data at the path given in `from` to the +path given in `to`. +2. When using `patches` to move all submessages in the source to the target, +_if_ they have a `method` key of `PATCH` or a `device` key of `patch@1.0`. + +Source and destination paths may be prepended by `base:` or `req:` keys to +indicate that they are relative to either of the message`s that the +computation is being performed on. + +The search order for finding the source and destination keys is as follows, +where `X` is either `from` or `to`: + +1. The `patch-X` key of the execution message. +2. The `X` key of the execution message. +3. The `patch-X` key of the request message. +4. The `X` key of the request message. + +Additionally, this device implements the standard computation device keys, +allowing it to be used as an element of an execution stack pipeline, etc. ## Function Index ## -
compute/3Find PATCH requests in the results/outbox of the message, and apply -them to the state.
init/3Default process device hooks.
normalize/3
patch_to_submessage_test/0*
snapshot/3
uninitialized_patch_test/0*
+
all/3Get the value found at the patch-from key of the message, or the +from key if the former is not present.
all_mode_test/0*
compute/3
init/3Necessary hooks for compliance with the execution-device standard.
move/4*Unified executor for the all and patches modes.
normalize/3
patch_to_submessage_test/0*
patches/3Find relevant PATCH messages in the given source key of the execution +and request messages, and apply them to the given destination key of the +request.
req_prefix_test/0*
snapshot/3
uninitialized_patch_test/0*
## Function Details ## + + +### all/3 ### + +`all(Msg1, Msg2, Opts) -> any()` + +Get the value found at the `patch-from` key of the message, or the +`from` key if the former is not present. Remove it from the message and set +the new source to the value found. + + + +### all_mode_test/0 * ### + +`all_mode_test() -> any()` + ### compute/3 ### `compute(Msg1, Msg2, Opts) -> any()` -Find `PATCH` requests in the `results/outbox` of the message, and apply -them to the state. - ### init/3 ### `init(Msg1, Msg2, Opts) -> any()` -Default process device hooks. +Necessary hooks for compliance with the `execution-device` standard. + + + +### move/4 * ### + +`move(Mode, Msg1, Msg2, Opts) -> any()` + +Unified executor for the `all` and `patches` modes. @@ -6396,6 +7597,22 @@ Default process device hooks. `patch_to_submessage_test() -> any()` + + +### patches/3 ### + +`patches(Msg1, Msg2, Opts) -> any()` + +Find relevant `PATCH` messages in the given source key of the execution +and request messages, and apply them to the given destination key of the +request. + + + +### req_prefix_test/0 * ### + +`req_prefix_test() -> any()` + ### snapshot/3 ### @@ -7249,7 +8466,7 @@ continues until the there are no remaining messages to push.
additional_keys/3*Set the necessary keys in order for the recipient to know where the -message came from.
do_push/3*Push a message or slot number.
extract/2*Return either the target or the hint.
find_type/2*
full_push_test_/0*
is_async/3*Determine if the push is asynchronous.
multi_process_push_test_disabled/0*
normalize_message/2*Augment the message with from-* keys, if it doesn't already have them.
parse_redirect/1*
ping_pong_script/1*
push/3Push either a message or an assigned slot number.
push_prompts_encoding_change_test/0*
push_result_message/5*
push_with_mode/3*
push_with_redirect_hint_test_disabled/0*
remote_schedule_result/3*
reply_script/0*
schedule_initial_message/3*Push a message or a process, prior to pushing the resulting slot number.
schedule_result/3*
schedule_result/4*
split_target/1*
target_process/2*Find the target process ID for a message to push.
+message came from.do_push/3*Push a message or slot number, including its downstream results.extract/2*Return either the target or the hint.find_type/2*full_push_test_/0*is_async/3*Determine if the push is asynchronous.multi_process_push_test_/0*normalize_message/2*Augment the message with from-* keys, if it doesn't already have them.parse_redirect/1*ping_pong_script/1*push/3Push either a message or an assigned slot number.push_prompts_encoding_change_test/0*push_result_message/4*Push a downstream message result.push_with_mode/3*push_with_redirect_hint_test_disabled/0*remote_schedule_result/3*reply_script/0*schedule_initial_message/3*Push a message or a process, prior to pushing the resulting slot number.schedule_result/4*Add the necessary keys to the message to be scheduled, then schedule it.schedule_result/5*split_target/1*Split the target into the process ID and the optional query string.target_process/2*Find the target process ID for a message to push. @@ -7260,7 +8477,7 @@ message came from.do_push/3* ### additional_keys/3 * ### -`additional_keys(FromMsg, ToSched, Opts) -> any()` +`additional_keys(Origin, ToSched, Opts) -> any()` Set the necessary keys in order for the recipient to know where the message came from. @@ -7269,9 +8486,9 @@ message came from. ### do_push/3 * ### -`do_push(Base, Assignment, Opts) -> any()` +`do_push(Process, Assignment, Opts) -> any()` -Push a message or slot number. +Push a message or slot number, including its downstream results. @@ -7297,15 +8514,15 @@ Return either the `target` or the `hint`. ### is_async/3 * ### -`is_async(Base, Req, Opts) -> any()` +`is_async(Process, Req, Opts) -> any()` Determine if the push is asynchronous. - + -### multi_process_push_test_disabled/0 * ### +### multi_process_push_test_/0 * ### -`multi_process_push_test_disabled() -> any()` +`multi_process_push_test_() -> any()` @@ -7333,7 +8550,18 @@ Augment the message with from-* keys, if it doesn't already have them. `push(Base, Req, Opts) -> any()` -Push either a message or an assigned slot number. +Push either a message or an assigned slot number. If a `Process` is +provided in the `body` of the request, it will be scheduled (initializing +it if it does not exist). Otherwise, the message specified by the given +`slot` key will be pushed. + +Optional parameters: +`/result-depth`: The depth to which the full contents of the result +will be included in the response. Default: 1, returning +the full result of the first message, but only the 'tree' +of downstream messages. +`/push-mode`: Whether or not the push should be done asynchronously. +Default: `sync`, pushing synchronously. @@ -7341,17 +8569,22 @@ Push either a message or an assigned slot number. `push_prompts_encoding_change_test() -> any()` - + + +### push_result_message/4 * ### -### push_result_message/5 * ### +`push_result_message(TargetProcess, MsgToPush, Origin, Opts) -> any()` -`push_result_message(Base, FromSlot, Key, MsgToPush, Opts) -> any()` +Push a downstream message result. The `Origin` map contains information +about the origin of the message: The process that originated the message, +the slot number from which it was sent, and the outbox key of the message, +and the depth to which downstream results should be included in the message. ### push_with_mode/3 * ### -`push_with_mode(Base, Req, Opts) -> any()` +`push_with_mode(Process, Req, Opts) -> any()` @@ -7379,17 +8612,21 @@ Push either a message or an assigned slot number. Push a message or a process, prior to pushing the resulting slot number. - + -### schedule_result/3 * ### +### schedule_result/4 * ### -`schedule_result(Base, MsgToPush, Opts) -> any()` +`schedule_result(TargetProcess, MsgToPush, Origin, Opts) -> any()` - +Add the necessary keys to the message to be scheduled, then schedule it. +If the remote scheduler does not support the given codec, it will be +downgraded and re-signed. -### schedule_result/4 * ### + -`schedule_result(Base, MsgToPush, Codec, Opts) -> any()` +### schedule_result/5 * ### + +`schedule_result(TargetProcess, MsgToPush, Codec, Origin, Opts) -> any()` @@ -7397,6 +8634,8 @@ Push a message or a process, prior to pushing the resulting slot number. `split_target(RawTarget) -> any()` +Split the target into the process ID and the optional query string. + ### target_process/2 * ### @@ -7438,7 +8677,7 @@ Example usage: ## Function Index ## -
call/3Execute a call request using a node's routes.
call_get_test/0*
cast/3Execute a request in the same way as call/3, but asynchronously.
preprocess/3Preprocess a request to check if it should be relayed to a different node.
preprocessor_reroute_to_nearest_test/0*Test that the preprocess/3 function re-routes a request to remote +
call/3Execute a call request using a node's routes.
call_get_test/0*
cast/3Execute a request in the same way as call/3, but asynchronously.
request/3Preprocess a request to check if it should be relayed to a different node.
request_hook_reroute_to_nearest_test/0*Test that the preprocess/3 function re-routes a request to remote peers, according to the node's routing table.
@@ -7476,19 +8715,19 @@ Defaults to `false`. Execute a request in the same way as `call/3`, but asynchronously. Always returns `<<"OK">>`. - + -### preprocess/3 ### +### request/3 ### -`preprocess(Msg1, Msg2, Opts) -> any()` +`request(Msg1, Msg2, Opts) -> any()` Preprocess a request to check if it should be relayed to a different node. - + -### preprocessor_reroute_to_nearest_test/0 * ### +### request_hook_reroute_to_nearest_test/0 * ### -`preprocessor_reroute_to_nearest_test() -> any()` +`request_hook_reroute_to_nearest_test() -> any()` Test that the `preprocess/3` function re-routes a request to remote peers, according to the node's routing table. @@ -7541,12 +8780,12 @@ The structure of the routes should be as follows:
add_route_test/0*
apply_route/2*Apply a node map's rules for transforming the path of the message.
apply_routes/3*Generate a uri key for each node in a route.
binary_to_bignum/1*Cast a human-readable or native-encoded ID to a big integer.
by_base_determinism_test/0*Ensure that By-Base always chooses the same node for the same -hashpath.
choose/5*Implements the load distribution strategies if given a cluster.
choose_1_test/1*
choose_n_test/1*
device_call_from_singleton_test/0*
dynamic_route_provider_test/0*
dynamic_router_test/0*Example of a Lua script being used as the route_provider for a +hashpath.
choose/5*Implements the load distribution strategies if given a cluster.
choose_1_test/1*
choose_n_test/1*
device_call_from_singleton_test/0*
dynamic_route_provider_test/0*
dynamic_router_test/0*Example of a Lua module being used as the route_provider for a HyperBEAM node.
dynamic_routing_by_performance/0*
dynamic_routing_by_performance_test_/0*Demonstrates routing tables being dynamically created and adjusted according to the real-time performance of nodes.
explicit_route_test/0*
extract_base/2*Extract the base message ID from a request message.
field_distance/2*Calculate the minimum distance between two numbers (either progressing backwards or forwards), assuming a -256-bit field.
find_target_path/2*Find the target path to route for a request message.
generate_hashpaths/1*
generate_nodes/1*
get_routes_test/0*
is_relevant/3is_relevant looks at the relevant_routes paths opt and if any incoming message path matches it will -make the request relevant for preprocessing.
load_routes/1*Load the current routes for the node.
local_dynamic_router_test/0*Example of a Lua script being used as the route_provider for a +256-bit field.
find_target_path/2*Find the target path to route for a request message.
generate_hashpaths/1*
generate_nodes/1*
get_routes_test/0*
info/1Exported function for getting device info, controls which functions are +exposed via the device API.
info/3HTTP info response providing information about this device.
load_routes/1*Load the current routes for the node.
local_dynamic_router_test/0*Example of a Lua module being used as the route_provider for a HyperBEAM node.
local_process_route_provider_test/0*
lowest_distance/1*Find the node with the lowest distance to the given hashpath.
lowest_distance/2*
match/3Find the first matching template in a list of known routes.
match_routes/3*
match_routes/4*
preprocess/3Preprocess a request to check if it should be relayed to a different node.
register/3
relay_nearest_test/0*
route/2Find the appropriate route for the given message.
route/3
route_provider_test/0*
route_regex_matches_test/0*
route_template_message_matches_test/0*
routes/3Device function that returns all known routes.
simulate/4*
simulation_distribution/2*
simulation_occurences/2*
strategy_suite_test_/0*
template_matches/3*Check if a message matches a message template or path regex.
unique_nodes/1*
unique_test/1*
weighted_random_strategy_test/0*
within_norms/3*
@@ -7636,8 +8875,8 @@ Implements the load distribution strategies if given a cluster. `dynamic_router_test() -> any()` -Example of a Lua script being used as the `route_provider` for a -HyperBEAM node. The script utilized in this example dynamically adjusts the +Example of a Lua module being used as the `route_provider` for a +HyperBEAM node. The module utilized in this example dynamically adjusts the likelihood of routing to a given node, depending upon price and performance. also include preprocessing support for routing @@ -7709,14 +8948,22 @@ Find the target path to route for a request message. `get_routes_test() -> any()` - + + +### info/1 ### + +`info(X1) -> any()` + +Exported function for getting device info, controls which functions are +exposed via the device API. -### is_relevant/3 ### + + +### info/3 ### -`is_relevant(Msg1, Msg2, Opts) -> any()` +`info(Msg1, Msg2, Opts) -> any()` -is_relevant looks at the relevant_routes paths opt and if any incoming message path matches it will -make the request relevant for preprocessing. +HTTP info response providing information about this device @@ -7734,8 +8981,8 @@ the node message's `routes` key, or dynamic routes generated by resolving the `local_dynamic_router_test() -> any()` -Example of a Lua script being used as the `route_provider` for a -HyperBEAM node. The script utilized in this example dynamically adjusts the +Example of a Lua module being used as the `route_provider` for a +HyperBEAM node. The module utilized in this example dynamically adjusts the likelihood of routing to a given node, depending upon price and performance. @@ -7985,7 +9232,7 @@ Write an assignment message into the cache. ### write_location/2 ### -`write_location(LocationMsg, Opts) -> any()` +`write_location(LocMsg, Opts) -> any()` Write the latest known scheduler location for an address. @@ -8352,12 +9599,13 @@ Process: `#{ id, Scheduler: #{ Authority } }`
benchmark_suite/2*
benchmark_suite_test_/0*
cache_remote_schedule/2*Cache a schedule received from a remote scheduler.
check_lookahead_and_local_cache/4*Check if we have a result from a lookahead worker or from our local cache.
checkpoint/1Returns the current state of the scheduler.
do_get_remote_schedule/6*Get a schedule from a remote scheduler, unless we already have already read all of the assignments from the local cache.
do_post_schedule/4*Post schedule the message.
filter_json_assignments/3*Filter JSON assignment results from a remote legacy scheduler.
find_message_to_schedule/3*Search the given base and request message pair to find the message to -schedule.
find_remote_scheduler/3*Use the SchedulerLocation to the remote path and return a redirect.
find_server/3*Locate the correct scheduling server for a given process.
find_server/4*
find_target_id/3*Find the schedule ID from a given request.
generate_local_schedule/5*Generate a GET /schedule response for a process.
generate_redirect/3*Generate a redirect message to a scheduler.
get_hint/2*If a hint is present in the string, return it.
get_local_assignments/4*Get the assignments for a process, and whether the request was truncated.
get_local_schedule_test/0*
get_remote_schedule/5*Get a schedule from a remote scheduler, but first read all of the +schedule.
find_remote_scheduler/3*Use the SchedulerLocation to the remote path and return a redirect.
find_server/3*Locate the correct scheduling server for a given process.
find_server/4*
find_target_id/3*Find the schedule ID from a given request.
generate_local_schedule/5*Generate a GET /schedule response for a process.
generate_redirect/3*Generate a redirect message to a scheduler.
get_hint/2*If a hint is present in the string, return it.
get_local_assignments/4*Get the assignments for a process, and whether the request was truncated.
get_local_schedule_test/0*
get_location/3*Search for the location of the scheduler in the scheduler-location +cache.
get_remote_schedule/5*Get a schedule from a remote scheduler, but first read all of the assignments from the local cache that we already know about.
get_schedule/3*Generate and return a schedule for a process, optionally between two slots -- labelled as from and to.
http_get_json_schedule_test_/0*
http_get_legacy_schedule_as_aos2_test_/0*
http_get_legacy_schedule_slot_range_test_/0*
http_get_legacy_schedule_test_/0*
http_get_legacy_slot_test_/0*
http_get_schedule/4*
http_get_schedule/5*
http_get_schedule_redirect_test/0*
http_get_schedule_test_/0*
http_get_slot/2*
http_init/0*
http_init/1*
http_post_legacy_schedule_test_/0*
http_post_schedule_sign/4*
http_post_schedule_test/0*
info/0This device uses a default_handler to route requests to the correct -function.
many_clients/1*
message_cached_assignments/2*Non-device exported helper to get the cached assignments held in a +function.
location/3Router for record requests.
many_clients/1*
message_cached_assignments/2*Non-device exported helper to get the cached assignments held in a process.
next/3Load the schedule for a process into the cache, then return the next -assignment.
node_from_redirect/2*Get the node URL from a redirect.
post_legacy_schedule/4*
post_remote_schedule/4*
post_schedule/3*Schedules a new message on the SU.
read_local_assignments/4*Get the assignments for a process.
redirect_from_graphql_test/0*
redirect_to_hint_test/0*
register/3Generate a new scheduler location record and register it.
register_new_process_test/0*
register_scheduler_test/0*
remote_slot/3*Get the current slot from a remote scheduler.
remote_slot/4*Get the current slot from a remote scheduler, based on the variant of +assignment.
node_from_redirect/2*Get the node URL from a redirect.
post_legacy_schedule/4*
post_location/3*Generate a new scheduler location record and register it.
post_remote_schedule/4*
post_schedule/3*Schedules a new message on the SU.
read_local_assignments/4*Get the assignments for a process.
redirect_from_graphql_test/0*
redirect_to_hint_test/0*
register_location_on_boot_test/0*Test that a scheduler location is registered on boot.
register_new_process_test/0*
register_scheduler_test/0*
remote_slot/3*Get the current slot from a remote scheduler.
remote_slot/4*Get the current slot from a remote scheduler, based on the variant of the process's scheduler.
router/4The default handler for the scheduler device.
schedule/3A router for choosing between getting the existing schedule, or scheduling a new message.
schedule_message_and_get_slot_test/0*
single_resolution/1*
slot/3Returns information about the current slot for a process.
spawn_lookahead_worker/3*Spawn a new Erlang process to fetch the next assignments from the local cache, if we have them available.
start/0Helper to ensure that the environment is started.
status/3Returns information about the entire scheduler.
status_test/0*
test_process/0Generate a _transformed_ process message, not as they are generated @@ -8522,6 +9770,17 @@ Get the assignments for a process, and whether the request was truncated. `get_local_schedule_test() -> any()` + + +### get_location/3 * ### + +`get_location(Msg1, Req, Opts) -> any()` + +Search for the location of the scheduler in the scheduler-location +cache. If an address is provided, we search for the location of that +specific scheduler. Otherwise, we return the location record for the current +node's scheduler, if it has been established. + ### get_remote_schedule/5 * ### @@ -8640,6 +9899,14 @@ we redirect to the remote scheduler or proxy based on the node opts. This device uses a default_handler to route requests to the correct function. + + +### location/3 ### + +`location(Msg1, Msg2, Opts) -> any()` + +Router for `record` requests. Expects either a `POST` or `GET` request. + ### many_clients/1 * ### @@ -8680,6 +9947,15 @@ Get the node URL from a redirect. `post_legacy_schedule(ProcID, OnlyCommitted, Node, Opts) -> any()` + + +### post_location/3 * ### + +`post_location(Msg1, RawReq, Opts) -> any()` + +Generate a new scheduler location record and register it. We both send +the new scheduler-location to the given registry, and return it to the caller. + ### post_remote_schedule/4 * ### @@ -8716,14 +9992,13 @@ Get the assignments for a process. `redirect_to_hint_test() -> any()` - + -### register/3 ### +### register_location_on_boot_test/0 * ### -`register(Msg1, Req, Opts) -> any()` +`register_location_on_boot_test() -> any()` -Generate a new scheduler location record and register it. We both send -the new scheduler-location to the given registry, and return it to the caller. +Test that a scheduler location is registered on boot. @@ -8871,7 +10146,7 @@ definition.
balance/3Get the balance of a user in the ledger.
debit/3Preprocess a request by checking the ledger and charging the user.
estimate/3Estimate the cost of a request by counting the number of messages in -the request, then multiplying by the per-message price.
get_balance/2*Get the balance of a user in the ledger.
get_balance_and_top_up_test/0*
is_operator/2*Check if the request is from the operator.
set_balance/3*Adjust a user's balance, normalizing their wallet ID first.
test_opts/0*
test_opts/1*
topup/3Top up the user's balance in the ledger.
+the request, then multiplying by the per-message price.
get_balance/2*Get the balance of a user in the ledger.
get_balance_and_top_up_test/0*
is_operator/2*Check if the request is from the operator.
set_balance/3*Adjust a user's balance, normalizing their wallet ID first.
test_opts/1*
topup/3Top up the user's balance in the ledger.
@@ -8936,12 +10211,6 @@ Check if the request is from the operator. Adjust a user's balance, normalizing their wallet ID first. - - -### test_opts/0 * ### - -`test_opts() -> any()` - ### test_opts/1 * ### @@ -9063,8 +10332,8 @@ as well as generating them, if called in an appropriate environment.
execute_is_trusted/3*Ensure that all of the software hashes are trusted.
generate/3Generate an commitment report and emit it as a message, including all of the necessary data to generate the nonce (ephemeral node address + node message ID), as well as the expected measurement (firmware, kernel, and VMSAs -hashes).
generate_nonce/2*Generate the nonce to use in the commitment report.
init/3Should take in options to set for the device such as kernel, initrd, firmware, -and append hashes and make them available to the device.
is_debug/1*Ensure that the node's debug policy is disabled.
real_node_test/0*
report_data_matches/3*Ensure that the report data matches the expected report data.
trusted/3Default implementation of a resolver for trusted software.
verify/3Verify an commitment report message; validating the identity of a +hashes).
generate_nonce/2*Generate the nonce to use in the commitment report.
is_debug/1*Ensure that the node's debug policy is disabled.
real_node_test/0*
report_data_matches/3*Ensure that the report data matches the expected report data.
trusted/3Validates if a given message parameter matches a trusted value from the SNP trusted list +Returns {ok, true} if the message is trusted, {ok, false} otherwise.
verify/3Verify an commitment report message; validating the identity of a remote node, its ephemeral private address, and the integrity of the report.
@@ -9095,22 +10364,11 @@ hashes). -### generate_nonce/2 * ### - -`generate_nonce(RawAddress, RawNodeMsgID) -> any()` - -Generate the nonce to use in the commitment report. - - - -### init/3 ### +### generate_nonce/2 * ### -`init(M1, M2, Opts) -> any()` +`generate_nonce(RawAddress, RawNodeMsgID) -> any()` -Should take in options to set for the device such as kernel, initrd, firmware, -and append hashes and make them available to the device. Only runnable once, -and only if the operator is not set to an address (and thus, the node has not -had any priviledged access). +Generate the nonce to use in the commitment report. @@ -9140,9 +10398,8 @@ Ensure that the report data matches the expected report data. `trusted(Msg1, Msg2, NodeOpts) -> any()` -Default implementation of a resolver for trusted software. Searches the -`trusted` key in the base message for a list of trusted values, and checks -if the value in the request message is a member of that list. +Validates if a given message parameter matches a trusted value from the SNP trusted list +Returns {ok, true} if the message is trusted, {ok, false} otherwise @@ -9542,10 +10799,10 @@ keyInDevice executed on DeviceName against Msg1.
compute/3Example implementation of a compute handler.
compute_test/0*
delay/3Does nothing, just sleeps Req/duration or 750 ms and returns the -appropriate form in order to be used as preprocessor.
device_with_function_key_module_test/0*Tests the resolution of a default function.
increment_counter/3Find a test worker's PID and send it an increment message.
info/1Exports a default_handler function that can be used to test the +appropriate form in order to be used as a hook.
device_with_function_key_module_test/0*Tests the resolution of a default function.
increment_counter/3Find a test worker's PID and send it an increment message.
info/1Exports a default_handler function that can be used to test the handler resolution mechanism.
info/3Exports a default_handler function that can be used to test the handler resolution mechanism.
init/3Example init/3 handler.
mul/2Example implementation of an imported function for a WASM -executor.
postprocess/3Set the postprocessor-called key to true in the HTTP server.
restore/3Example restore/3 handler.
restore_test/0*
snapshot/3Do nothing when asked to snapshot.
test_func/1
update_state/3Find a test worker's PID and send it an update message.
+executor.
restore/3Example restore/3 handler.
restore_test/0*
snapshot/3Do nothing when asked to snapshot.
test_func/1
update_state/3Find a test worker's PID and send it an update message.
@@ -9575,7 +10832,7 @@ slot number in the results key. `delay(Msg1, Req, Opts) -> any()` Does nothing, just sleeps `Req/duration or 750` ms and returns the -appropriate form in order to be used as preprocessor. +appropriate form in order to be used as a hook. @@ -9628,14 +10885,6 @@ Example `init/3` handler. Sets the `Already-Seen` key to an empty list. Example implementation of an `imported` function for a WASM executor. - - -### postprocess/3 ### - -`postprocess(Msg, X2, Opts) -> any()` - -Set the `postprocessor-called` key to true in the HTTP server. - ### restore/3 ### @@ -9676,6 +10925,281 @@ Find a test worker's PID and send it an update message. --- END OF FILE: docs/resources/source-code/dev_test.md --- +--- START OF FILE: docs/resources/source-code/dev_volume.md --- +# [Module dev_volume.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_volume.erl) + + + + +Secure Volume Management for HyperBEAM Nodes. + + + +## Description ## + +This module handles encrypted storage operations for HyperBEAM, providing +a robust and secure approach to data persistence. It manages the complete +lifecycle of encrypted volumes from detection to creation, formatting, and +mounting. + +Key responsibilities: +- Volume detection and initialization +- Encrypted partition creation and formatting +- Secure mounting using cryptographic keys +- Store path reconfiguration to use mounted volumes +- Automatic handling of various system states +(new device, existing partition, etc.) + +The primary entry point is the `mount/3` function, which orchestrates the +entire process based on the provided configuration parameters. This module +works alongside `hb_volume` which provides the low-level operations for +device manipulation. + +Security considerations: +- Ensures data at rest is protected through LUKS encryption +- Provides proper volume sanitization and secure mounting +- IMPORTANT: This module only applies configuration set in node options and +does NOT accept disk operations via HTTP requests. It cannot format arbitrary +disks as all operations are safeguarded by host operating system permissions +enforced upon the HyperBEAM environment. + +## Function Index ## + + +
check_base_device/8*Check if the base device exists and if it does, check if the partition exists.
check_partition/8*Check if the partition exists.
create_and_mount_partition/8*Create, format and mount a new partition.
decrypt_volume_key/2*Decrypts an encrypted volume key using the node's private key.
format_and_mount/6*Format and mount a newly created partition.
info/1Exported function for getting device info, controls which functions are +exposed via the device API.
info/3HTTP info response providing information about this device.
mount/3Handles the complete process of secure encrypted volume mounting.
mount_existing_partition/6*Mount an existing partition.
mount_formatted_partition/6*Mount a newly formatted partition.
public_key/3Returns the node's public key for secure key exchange.
update_node_config/2*Update the node's configuration with the new store.
update_store_path/2*Update the store path to use the mounted volume.
+ + + + +## Function Details ## + + + +### check_base_device/8 * ### + +

+check_base_device(Device::term(), Partition::term(), PartitionType::term(), VolumeName::term(), MountPoint::term(), StorePath::term(), Key::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Device`: The base device to check.
`Partition`: The partition to check.
`PartitionType`: The type of partition to check.
`VolumeName`: The name of the volume to check.
`MountPoint`: The mount point to check.
`StorePath`: The store path to check.
`Key`: The key to check.
`Opts`: The options to check.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Check if the base device exists and if it does, check if the partition exists. + + + +### check_partition/8 * ### + +

+check_partition(Device::term(), Partition::term(), PartitionType::term(), VolumeName::term(), MountPoint::term(), StorePath::term(), Key::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Device`: The base device to check.
`Partition`: The partition to check.
`PartitionType`: The type of partition to check.
`VolumeName`: The name of the volume to check.
`MountPoint`: The mount point to check.
`StorePath`: The store path to check.
`Key`: The key to check.
`Opts`: The options to check.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Check if the partition exists. If it does, attempt to mount it. +If it doesn't exist, create it, format it with encryption and mount it. + + + +### create_and_mount_partition/8 * ### + +

+create_and_mount_partition(Device::term(), Partition::term(), PartitionType::term(), Key::term(), MountPoint::term(), VolumeName::term(), StorePath::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Device`: The device to create the partition on.
`Partition`: The partition to create.
`PartitionType`: The type of partition to create.
`Key`: The key to create the partition with.
`MountPoint`: The mount point to mount the partition to.
`VolumeName`: The name of the volume to mount.
`StorePath`: The store path to mount.
`Opts`: The options to mount.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Create, format and mount a new partition. + + + +### decrypt_volume_key/2 * ### + +

+decrypt_volume_key(EncryptedKeyBase64::binary(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Opts`: A map of configuration options.
+ +returns: `{ok, DecryptedKey}` on successful decryption, or +`{error, Binary}` if decryption fails. + +Decrypts an encrypted volume key using the node's private key. + +This function takes an encrypted key (typically sent by a client who encrypted +it with the node's public key) and decrypts it using the node's private RSA key. + + + +### format_and_mount/6 * ### + +

+format_and_mount(Partition::term(), Key::term(), MountPoint::term(), VolumeName::term(), StorePath::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Partition`: The partition to format and mount.
`Key`: The key to format and mount the partition with.
`MountPoint`: The mount point to mount the partition to.
`VolumeName`: The name of the volume to mount.
`StorePath`: The store path to mount.
`Opts`: The options to mount.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Format and mount a newly created partition. + + + +### info/1 ### + +`info(X1) -> any()` + +Exported function for getting device info, controls which functions are +exposed via the device API. + + + +### info/3 ### + +`info(Msg1, Msg2, Opts) -> any()` + +HTTP info response providing information about this device + + + +### mount/3 ### + +

+mount(M1::term(), M2::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`M1`: Base message for context.
`M2`: Request message with operation details.
`Opts`: A map of configuration options for volume operations.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Handles the complete process of secure encrypted volume mounting. + +This function performs the following operations depending on the state: +1. Validates the encryption key is present +2. Checks if the base device exists +3. Checks if the partition exists on the device +4. If the partition exists, attempts to mount it +5. If the partition doesn't exist, creates it, formats it with encryption +and mounts it +6. Updates the node's store configuration to use the mounted volume + +Config options in Opts map: +- volume_key: (Required) The encryption key +- volume_device: Base device path +- volume_partition: Partition path +- volume_partition_type: Filesystem type +- volume_name: Name for encrypted volume +- volume_mount_point: Where to mount +- volume_store_path: Store path on volume + + + +### mount_existing_partition/6 * ### + +

+mount_existing_partition(Partition::term(), Key::term(), MountPoint::term(), VolumeName::term(), StorePath::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Partition`: The partition to mount.
`Key`: The key to mount.
`MountPoint`: The mount point to mount.
`VolumeName`: The name of the volume to mount.
`StorePath`: The store path to mount.
`Opts`: The options to mount.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Mount an existing partition. + + + +### mount_formatted_partition/6 * ### + +

+mount_formatted_partition(Partition::term(), Key::term(), MountPoint::term(), VolumeName::term(), StorePath::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Partition`: The partition to mount.
`Key`: The key to mount the partition with.
`MountPoint`: The mount point to mount the partition to.
`VolumeName`: The name of the volume to mount.
`StorePath`: The store path to mount.
`Opts`: The options to mount.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Mount a newly formatted partition. + + + +### public_key/3 ### + +

+public_key(M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, binary()}
+
+
+ +`Opts`: A map of configuration options.
+ +returns: `{ok, Map}` containing the node's public key on success, or +`{error, Binary}` if the node's wallet is not available. + +Returns the node's public key for secure key exchange. + +This function retrieves the node's wallet and extracts the public key +for encryption purposes. It allows users to securely exchange encryption keys +by first encrypting their volume key with the node's public key. + +The process ensures that sensitive keys are never transmitted in plaintext. +The encrypted key can then be securely sent to the node, which will decrypt it +using its private key before using it for volume encryption. + + + +### update_node_config/2 * ### + +

+update_node_config(NewStore::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`NewStore`: The new store to update the node's configuration with.
`Opts`: The options to update the node's configuration with.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Update the node's configuration with the new store. + + + +### update_store_path/2 * ### + +

+update_store_path(StorePath::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`StorePath`: The store path to update.
`Opts`: The options to update.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Update the store path to use the mounted volume. + + +--- END OF FILE: docs/resources/source-code/dev_volume.md --- + --- START OF FILE: docs/resources/source-code/dev_wasi.md --- # [Module dev_wasi.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_wasi.erl) @@ -10081,7 +11605,7 @@ execution under different circumstances.
as_path_test/1*
basic_get_test/1*
basic_set_test/1*
continue_as_test/1*
deep_recursive_get_test/1*
deep_set_new_messages_test/0*
deep_set_test/1*
deep_set_with_device_test/1*
denormalized_device_key_test/1*
device_excludes_test/1*
device_exports_test/1*
device_with_default_handler_function_test/1*
device_with_handler_function_test/1*
exec_dummy_device/2*Ensure that we can read a device from the cache then execute it.
gen_default_device/0*Create a simple test device that implements the default handler.
gen_handler_device/0*Create a simple test device that implements the handler key.
generate_device_with_keys_using_args/0*Generates a test device with three keys, each of which uses -progressively more of the arguments that can be passed to a device key.
get_as_with_device_test/1*
get_with_device_test/1*
key_from_id_device_with_args_test/1*Test that arguments are passed to a device key as expected.
key_to_binary_test/1*
list_transform_test/1*
load_as_test/1*
load_device_test/0*
recursive_get_test/1*
resolve_binary_key_test/1*
resolve_from_multiple_keys_test/1*
resolve_id_test/1*
resolve_key_twice_test/1*
resolve_path_element_test/1*
resolve_simple_test/1*
run_all_test_/0*Run each test in the file with each set of options.
run_test/0*
set_with_device_test/1*
start_as_test/1*
start_as_with_parameters_test/1*
test_opts/0*
test_suite/0*
untrusted_load_device_test/0*
+progressively more of the arguments that can be passed to a device key.get_as_with_device_test/1*get_with_device_test/1*key_from_id_device_with_args_test/1*Test that arguments are passed to a device key as expected.key_to_binary_test/1*list_transform_test/1*load_as_test/1*load_device_test/0*recursive_get_test/1*resolve_binary_key_test/1*resolve_from_multiple_keys_test/1*resolve_id_test/1*resolve_key_twice_test/1*resolve_path_element_test/1*resolve_simple_test/1*run_all_test_/0*Run each test in the file with each set of options.run_test/0*set_with_device_test/1*start_as_test/1*start_as_with_parameters_test/1*step_hook_test/1*test_opts/0*test_suite/0*untrusted_load_device_test/0* @@ -10322,6 +11846,12 @@ the store for each test. `start_as_with_parameters_test(Opts) -> any()` + + +### step_hook_test/1 * ### + +`step_hook_test(InitOpts) -> any()` + ### test_opts/0 * ### @@ -10441,7 +11971,7 @@ HyperBEAM device implementations are defined as follows:
deep_set/4Recursively search a map, resolving keys, and set the value of the key at the given path.
default_module/0*The default device is the identity device, which simply returns the -value associated with any key as it exists in its Erlang map.
device_set/4*Call the device's set function.
do_resolve_many/2*
ensure_loaded/2*Ensure that the message is loaded from the cache if it is an ID.
error_execution/5*Handle an error in a device call.
error_infinite/3*Catch all return if we are in an infinite loop.
error_invalid_intermediate_status/5*
error_invalid_message/3*Catch all return if the message is invalid.
find_exported_function/5Find the function with the highest arity that has the given name, if it +value associated with any key as it exists in its Erlang map.
device_set/4*Call the device's set function.
device_set/5*
do_resolve_many/2*
ensure_loaded/2*Ensure that the message is loaded from the cache if it is an ID.
error_execution/5*Handle an error in a device call.
error_infinite/3*Catch all return if we are in an infinite loop.
error_invalid_intermediate_status/5*
error_invalid_message/3*Catch all return if the message is invalid.
find_exported_function/5Find the function with the highest arity that has the given name, if it exists.
force_message/2
get/2Shortcut for resolving a key in a message without its status if it is ok.
get/3
get/4
get_first/2take a sequence of base messages and paths, then return the value of the first message that can be resolved using a path.
get_first/3
info/2Get the info map for a device, optionally giving it a message if the @@ -10464,7 +11994,10 @@ actually takes.
@@ -10485,6 +12018,12 @@ according to the `Message2` passed to it. Call the device's `set` function. + + +### device_set/5 * ### + +`device_set(Msg, Key, Value, Mode, Opts) -> any()` + ### do_resolve_many/2 * ### @@ -10776,11 +12315,13 @@ The resolver is composed of a series of discrete phases: 4: Persistent-resolver lookup. 5: Device lookup. 6: Execution. -7: Cryptographic linking. -8: Result caching. -9: Notify waiters. -10: Fork worker. -11: Recurse or terminate. +7: Execution of the `step` hook. +8: Subresolution. +9: Cryptographic linking. +10: Result caching. +11: Notify waiters. +12: Fork worker. +13: Recurse or terminate. @@ -11573,7 +13114,7 @@ A module that helps to render given Key graphs into the .dot files. ## Function Index ## -
add_arc/4*Add an arc to the graph.
add_node/3*Add a node to the graph.
cache_path_to_dot/2Generate a dot file from a cache path and options/store.
cache_path_to_dot/3
cache_path_to_graph/3*Main function to collect graph elements.
collect_output/2*Helper function to collect output from port.
dot_to_svg/1Convert a dot graph to SVG format.
extract_label/1*Extract a label from a path.
graph_to_dot/1*Generate the DOT file from the graph.
prepare_deeply_nested_complex_message/0
prepare_signed_data/0
prepare_unsigned_data/0
process_composite_node/6*Process a composite (directory) node.
process_simple_node/6*Process a simple (leaf) node.
render/1Render the given Key into svg.
render/2
test_signed/2*
test_unsigned/1*
traverse_store/4*Traverse the store recursively to build the graph.
+
add_arc/4*Add an arc to the graph.
add_node/3*Add a node to the graph.
cache_path_to_dot/2Generate a dot file from a cache path and options/store.
cache_path_to_dot/3
cache_path_to_graph/3Main function to collect graph elements.
collect_output/2*Helper function to collect output from port.
dot_to_svg/1Convert a dot graph to SVG format.
extract_label/1*Extract a label from a path.
get_graph_data/1Get graph data for the Three.js visualization.
get_label/1*Extract a readable label from a path.
get_node_type/1*Convert node color from hb_cache_render to node type for visualization.
graph_to_dot/1*Generate the DOT file from the graph.
prepare_deeply_nested_complex_message/0
prepare_signed_data/0
prepare_unsigned_data/0
process_composite_node/6*Process a composite (directory) node.
process_simple_node/6*Process a simple (leaf) node.
render/1Render the given Key into svg.
render/2
test_signed/2*
test_unsigned/1*
traverse_store/4*Traverse the store recursively to build the graph.
@@ -11612,7 +13153,7 @@ Generate a dot file from a cache path and options/store -### cache_path_to_graph/3 * ### +### cache_path_to_graph/3 ### `cache_path_to_graph(ToRender, GraphOpts, StoreOrOpts) -> any()` @@ -11642,6 +13183,30 @@ Convert a dot graph to SVG format Extract a label from a path + + +### get_graph_data/1 ### + +`get_graph_data(Opts) -> any()` + +Get graph data for the Three.js visualization + + + +### get_label/1 * ### + +`get_label(Path) -> any()` + +Extract a readable label from a path + + + +### get_node_type/1 * ### + +`get_node_type(Color) -> any()` + +Convert node color from hb_cache_render to node type for visualization + ### graph_to_dot/1 * ### @@ -13126,8 +14691,9 @@ the execution parameters of all downstream requests to be controlled.
allowed_methods/2Return the list of allowed methods for the HTTP server.
cors_reply/2*Reply to CORS preflight requests.
get_opts/1
handle_request/3*Handle all non-CORS preflight requests as AO-Core requests.
http3_conn_sup_loop/0*
init/2Entrypoint for all HTTP requests.
new_server/1*
read_body/1*Helper to grab the full body of a HTTP request, even if it's chunked.
read_body/2*
set_default_opts/1
set_opts/1Update the Opts map that the HTTP server uses for all future -requests.
start/0Starts the HTTP server.
start/1
start_http2/3*
start_http3/3*
start_node/0Test that we can start the server, send a message, and get a response.
start_node/1
+
allowed_methods/2Return the list of allowed methods for the HTTP server.
cors_reply/2*Reply to CORS preflight requests.
get_opts/1
handle_request/3*Handle all non-CORS preflight requests as AO-Core requests.
http3_conn_sup_loop/0*
init/2Entrypoint for all HTTP requests.
new_server/1*Trigger the creation of a new HTTP server node.
read_body/1*Helper to grab the full body of a HTTP request, even if it's chunked.
read_body/2*
set_default_opts/1
set_node_opts_test/0*Ensure that the start hook can be used to modify the node options.
set_opts/1Merges the provided Opts with uncommitted values from Request, +preserves the http_server value, and updates node_history by prepending +the Request.
set_opts/2
start/0Starts the HTTP server.
start/1
start_http2/3*
start_http3/3*
start_node/0Test that we can start the server, send a message, and get a response.
start_node/1
@@ -13188,6 +14754,12 @@ the server ID, which can be used to lookup the node message. `new_server(RawNodeMsg) -> any()` +Trigger the creation of a new HTTP server node. Accepts a `NodeMsg` +message, which is used to configure the server. This function executed the +`start` hook on the node, giving it the opportunity to modify the `NodeMsg` +before it is used to configure the server. The `start` hook expects gives and +expects the node message to be in the `body` key. + ### read_body/1 * ### @@ -13208,14 +14780,34 @@ Helper to grab the full body of a HTTP request, even if it's chunked. `set_default_opts(Opts) -> any()` + + +### set_node_opts_test/0 * ### + +`set_node_opts_test() -> any()` + +Ensure that the `start` hook can be used to modify the node options. We +do this by creating a message with a device that has a `start` key. This +key takes the message's body (the anticipated node options) and returns a +modified version of that body, which will be used to configure the node. We +then check that the node options were modified as we expected. + ### set_opts/1 ### `set_opts(Opts) -> any()` -Update the `Opts` map that the HTTP server uses for all future -requests. +Merges the provided `Opts` with uncommitted values from `Request`, +preserves the http_server value, and updates node_history by prepending +the `Request`. If a server reference exists, updates the Cowboy environment +variable 'node_msg' with the resulting options map. + + + +### set_opts/2 ### + +`set_opts(Request, Opts) -> any()` @@ -13712,6 +15304,87 @@ Takes a term in Erlang's native form and encodes it as a JSON string. --- END OF FILE: docs/resources/source-code/hb_json.md --- +--- START OF FILE: docs/resources/source-code/hb_keccak.md --- +# [Module hb_keccak.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_keccak.erl) + + + + + + +## Function Index ## + + +
hash_to_checksum_address/2*
init/0*
keccak_256/1
keccak_256_key_test/0*
keccak_256_key_to_address_test/0*
keccak_256_test/0*
key_to_ethereum_address/1
sha3_256/1
sha3_256_test/0*
to_hex/1*
+ + + + +## Function Details ## + + + +### hash_to_checksum_address/2 * ### + +`hash_to_checksum_address(Last40, Hash) -> any()` + + + +### init/0 * ### + +`init() -> any()` + + + +### keccak_256/1 ### + +`keccak_256(Bin) -> any()` + + + +### keccak_256_key_test/0 * ### + +`keccak_256_key_test() -> any()` + + + +### keccak_256_key_to_address_test/0 * ### + +`keccak_256_key_to_address_test() -> any()` + + + +### keccak_256_test/0 * ### + +`keccak_256_test() -> any()` + + + +### key_to_ethereum_address/1 ### + +`key_to_ethereum_address(Key) -> any()` + + + +### sha3_256/1 ### + +`sha3_256(Bin) -> any()` + + + +### sha3_256_test/0 * ### + +`sha3_256_test() -> any()` + + + +### to_hex/1 * ### + +`to_hex(Bin) -> any()` + + +--- END OF FILE: docs/resources/source-code/hb_keccak.md --- + --- START OF FILE: docs/resources/source-code/hb_logger.md --- # [Module hb_logger.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_logger.erl) @@ -14731,10 +16404,10 @@ with a refusal to execute.
cached_os_env/2*Cache the result of os:getenv/1 in the process dictionary, as it never -changes during the lifetime of a node.
config_lookup/2*An abstraction for looking up configuration variables.
default_message/0The default configuration options of the hyperbeam node.
get/1Get an option from the global options, optionally overriding with a +changes during the lifetime of a node.
check_required_opts/2Utility function to check for required options in a list.
config_lookup/2*An abstraction for looking up configuration variables.
default_message/0The default configuration options of the hyperbeam node.
get/1Get an option from the global options, optionally overriding with a local Opts map if prefer or only is set to local.
get/2
get/3
global_get/2*Get an environment variable or configuration key.
load/1Parse a flat@1.0 encoded file into a map, matching the types of the keys to those in the default message.
load_bin/1
mimic_default_types/2Mimic the types of the default message for a given map.
normalize_default/1*Get an option from environment variables, optionally consulting the -hb_features of the node if a conditional default tuple is provided.
+hb_features of the node if a conditional default tuple is provided.validate_node_history/1Validate that the node_history length is within an acceptable range.validate_node_history/3 @@ -14750,6 +16423,26 @@ keys to those in the default message. + +### check_required_opts/2 ### + +

+check_required_opts(KeyValuePairs::[{binary(), term()}], Opts::map()) -> {ok, map()} | {error, binary()}
+
+
+ +`KeyValuePairs`: A list of {Name, Value} pairs to check.
`Opts`: The original options map to return if validation succeeds.
+ +returns: `{ok, Opts}` if all required options are present, or +`{error, <<"Missing required parameters: ", MissingOptsStr/binary>>}` +where `MissingOptsStr` is a comma-separated list of missing option names. + +Utility function to check for required options in a list. +Takes a list of {Name, Value} pairs and returns: +- {ok, Opts} when all required options are present (Value =/= not_found) +- {error, ErrorMsg} with a message listing all missing options when any are not_found + ### config_lookup/2 * ### @@ -14834,6 +16527,20 @@ Mimic the types of the default message for a given map. Get an option from environment variables, optionally consulting the `hb_features` of the node if a conditional default tuple is provided. + + +### validate_node_history/1 ### + +`validate_node_history(Opts) -> any()` + +Validate that the node_history length is within an acceptable range. + + + +### validate_node_history/3 ### + +`validate_node_history(Opts, MinLength, MaxLength) -> any()` + --- END OF FILE: docs/resources/source-code/hb_opts.md --- @@ -15655,7 +17362,6 @@ Helper function for setting the complete private element of a message. -* [Data Types](#types) A parser that translates AO-Core HTTP API requests in TABM format into an ordered list of messages to evaluate. @@ -15726,10 +17432,10 @@ tabm_message() = map() ## Function Index ## - + @@ -18970,6 +20685,7 @@ Use the navigation menu to dive into specific parts of the codebase. Each module + @@ -18995,6 +20711,7 @@ Use the navigation menu to dive into specific parts of the codebase. Each module + @@ -19027,7 +20744,6 @@ Use the navigation menu to dive into specific parts of the codebase. Each module -* [Data Types](#types) Distributed under the Mozilla Public License v2.0. @@ -19445,7 +21161,7 @@ To successfully build and run a HyperBEAM node, your system needs several softwa ncurses-dev \ libssl-dev \ sudo \ - curl \ + curl ca-certificates ``` @@ -19481,14 +21197,14 @@ Installation methods: ```bash sudo apt install erlang ``` - + === "Source Build" Download from [erlang.org](https://www.erlang.org/downloads) and follow the build instructions for your platform. @@ -19507,13 +21223,13 @@ Installation methods: === "Linux / macOS (Direct Download)" Get the `rebar3` binary from the [official website](https://rebar3.org/). Place the downloaded `rebar3` file in your system's `PATH` (e.g., `/usr/local/bin`) and make it executable (`chmod +x rebar3`). -=== "asdf (Recommended)" + ### Node.js diff --git a/docs/llms.txt b/docs/llms.txt index b5045ccb1..31023fbde 100644 --- a/docs/llms.txt +++ b/docs/llms.txt @@ -1,4 +1,4 @@ -Generated: 2025-05-02T14:36:17Z +Generated: 2025-05-15T13:32:25Z ## HyperBEAM Documentation Summary @@ -7,8 +7,12 @@ Key sections include: Getting Started (begin), Running HyperBEAM (run), Develope ## Documentation Pages by Section -### uegin +### introduction +* [AO Devices](./introduction/ao-devices.html) +* [Pathing in AO-Core](./introduction/pathing-in-ao-core.html) +* [What is AO-Core?](./introduction/what-is-ao-core.html) +* [What is HyperBEAM?](./introduction/what-is-hyperbeam.html) ### run @@ -17,16 +21,20 @@ Key sections include: Getting Started (begin), Running HyperBEAM (run), Develope * [Running a HyperBEAM Node](./run/running-a-hyperbeam-node.html) * [Trusted Execution Environment (TEE)](./run/tee-nodes.html) -### guides +### uuild +* [Exposing Process State with the Patch Device](./build/exposing-process-state.html) +* [Extending HyperBEAM](./build/extending-hyperbeam.html) +* [Getting Started Building on AO-Core](./build/get-started-building-on-ao-core.html) +* [Serverless Decentralized Compute on AO](./build/serverless-decentralized-compute.html) ### devices -* [Devices](./devices/index.html) * [Device: ~json@1.0](./devices/json-at-1-0.html) * [Device: ~lua@5.3a](./devices/lua-at-5-3a.html) * [Device: ~message@1.0](./devices/message-at-1-0.html) * [Device: ~meta@1.0](./devices/meta-at-1-0.html) +* [Devices](./devices/overview.html) * [Device: ~process@1.0](./devices/process-at-1-0.html) * [Device: ~relay@1.0](./devices/relay-at-1-0.html) * [Device: ~scheduler@1.0](./devices/scheduler-at-1-0.html) @@ -59,6 +67,7 @@ Key sections include: Getting Started (begin), Running HyperBEAM (run), Develope * [[Module dev_faff.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_faff.erl)](./resources/source-code/dev_faff.html) * [[Module dev_genesis_wasm.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_genesis_wasm.erl)](./resources/source-code/dev_genesis_wasm.html) * [[Module dev_green_zone.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_green_zone.erl)](./resources/source-code/dev_green_zone.html) +* [[Module dev_hook.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_hook.erl)](./resources/source-code/dev_hook.html) * [[Module dev_hyperbuddy.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_hyperbuddy.erl)](./resources/source-code/dev_hyperbuddy.html) * [[Module dev_json_iface.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_json_iface.erl)](./resources/source-code/dev_json_iface.html) * [[Module dev_local_name.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_local_name.erl)](./resources/source-code/dev_local_name.html) @@ -92,6 +101,7 @@ Key sections include: Getting Started (begin), Running HyperBEAM (run), Develope * [[Module dev_snp_nif.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_snp_nif.erl)](./resources/source-code/dev_snp_nif.html) * [[Module dev_stack.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_stack.erl)](./resources/source-code/dev_stack.html) * [[Module dev_test.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_test.erl)](./resources/source-code/dev_test.html) +* [[Module dev_volume.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_volume.erl)](./resources/source-code/dev_volume.html) * [[Module dev_wasi.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_wasi.erl)](./resources/source-code/dev_wasi.html) * [[Module dev_wasm.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_wasm.erl)](./resources/source-code/dev_wasm.html) * [[Module hb.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb.erl)](./resources/source-code/hb.html) @@ -117,6 +127,7 @@ Key sections include: Getting Started (begin), Running HyperBEAM (run), Develope * [[Module hb_http_client_sup.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_http_client_sup.erl)](./resources/source-code/hb_http_client_sup.html) * [[Module hb_http_server.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_http_server.erl)](./resources/source-code/hb_http_server.html) * [[Module hb_json.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_json.erl)](./resources/source-code/hb_json.html) +* [[Module hb_keccak.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_keccak.erl)](./resources/source-code/hb_keccak.html) * [[Module hb_logger.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_logger.erl)](./resources/source-code/hb_logger.html) * [[Module hb_message.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_message.erl)](./resources/source-code/hb_message.html) * [[Module hb_metrics_collector.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_metrics_collector.erl)](./resources/source-code/hb_metrics_collector.html) diff --git a/docs/resources/reference/faq.md b/docs/resources/reference/faq.md index 4c1ebb489..af7bd79b3 100644 --- a/docs/resources/reference/faq.md +++ b/docs/resources/reference/faq.md @@ -25,7 +25,7 @@ You can build a wide range of applications, including: ### Is HyperBEAM open source? -Yes, HyperBEAM is open-source software licensed under the MIT License. +Yes, HyperBEAM is open-source software licensed under the Business Source License License. ### What is the current focus or phase of HyperBEAM development? diff --git a/docs/resources/reference/glossary.md b/docs/resources/reference/glossary.md index 4191e40e6..cdfe3355e 100644 --- a/docs/resources/reference/glossary.md +++ b/docs/resources/reference/glossary.md @@ -106,7 +106,7 @@ For a more comprehensive glossary of terms used in the permaweb, try the [Permaw
diff --git a/docs/resources/source-code/README.md b/docs/resources/source-code/README.md index c675cb879..7e15fb008 100644 --- a/docs/resources/source-code/README.md +++ b/docs/resources/source-code/README.md @@ -28,6 +28,7 @@ + @@ -61,6 +62,7 @@ + @@ -86,6 +88,7 @@ + diff --git a/docs/resources/source-code/ar_wallet.md b/docs/resources/source-code/ar_wallet.md index 09ce07477..da8b2069e 100644 --- a/docs/resources/source-code/ar_wallet.md +++ b/docs/resources/source-code/ar_wallet.md @@ -8,7 +8,7 @@ ## Function Index ## -
all_path_parts/2*Extract all of the parts from the binary, given (a list of) separators.
append_path/2*
apply_types/1*Step 3: Apply types to values and remove specifiers.
basic_hashpath_test/0*
basic_hashpath_to_test/0*
build_messages/2*Step 5: Merge the base message with the scoped messages.
decode_string/1*Attempt Cowboy URL decode, then sanitize the result.
do_build/3*
from/1Normalize a singleton TABM message into a list of executable AO-Core +
all_path_parts/2*Extract all of the parts from the binary, given (a list of) separators.
append_path/2*
apply_types/1*Step 3: Apply types to values and remove specifiers.
basic_hashpath_test/0*
basic_hashpath_to_test/0*
build/3*
build_messages/2*Step 5: Merge the base message with the scoped messages.
decode_string/1*Attempt Cowboy URL decode, then sanitize the result.
from/1Normalize a singleton TABM message into a list of executable AO-Core messages.
group_scoped/2*Step 4: Group headers/query by N-scope.
inlined_keys_test/0*
inlined_keys_to_test/0*
maybe_join/2*Join a list of items with a separator, or return the first item if there is only one item.
maybe_subpath/1*Check if the string is a subpath, returning it in parsed form, -or the original string with a specifier.
maybe_typed/2*Parse a key's type (applying it to the value) and device name if present.
multiple_inlined_keys_test/0*
multiple_inlined_keys_to_test/0*
multiple_messages_test/0*
multiple_messages_to_test/0*
normalize_base/1*Normalize the base path.
parse_explicit_message_test/0*
parse_full_path/1*Parse the relative reference into path, query, and fragment.
parse_inlined_key_val/1*Extrapolate the inlined key-value pair from a path segment.
parse_part/1*Parse a path part into a message or an ID.
parse_part_mods/2*Parse part modifiers: +or the original string with a specifier.
maybe_typed/2*Parse a key's type (applying it to the value) and device name if present.
multiple_inlined_keys_test/0*
multiple_inlined_keys_to_test/0*
multiple_messages_test/0*
multiple_messages_to_test/0*
normalize_base/1*Normalize the base path.
parse_explicit_message_test/0*
parse_full_path/1*Parse the relative reference into path, query, and fragment.
parse_inlined_key_val/1*Extrapolate the inlined key-value pair from a path segment.
parse_inlined_keys/2*Parse inlined key-value pairs from a path segment.
parse_part/1*Parse a path part into a message or an ID.
parse_part_mods/2*Parse part modifiers: 1.
parse_scope/1*Get the scope of a key.
part/2*Extract the characters from the binary until a separator is found.
part/4*
path_messages/1*Step 2: Decode, split and sanitize the path.
path_parts/2*Split the path into segments, filtering out empty segments and segments that are too long.
path_parts_test/0*
scoped_key_test/0*
scoped_key_to_test/0*
simple_to_test/0*
single_message_test/0*
subpath_in_inlined_test/0*
subpath_in_inlined_to_test/0*
subpath_in_key_test/0*
subpath_in_key_to_test/0*
subpath_in_path_test/0*
subpath_in_path_to_test/0*
to/1Convert a list of AO-Core message into TABM message.
to_suite_test_/0*
type/1*
typed_key_test/0*
typed_key_to_test/0*
@@ -15772,6 +17478,12 @@ Step 3: Apply types to values and remove specifiers. `basic_hashpath_to_test() -> any()` + + +### build/3 * ### + +`build(I, Rest, ScopedKeys) -> any()` + ### build_messages/2 * ### @@ -15788,17 +17500,11 @@ Step 5: Merge the base message with the scoped messages. Attempt Cowboy URL decode, then sanitize the result. - - -### do_build/3 * ### - -`do_build(I, Rest, ScopedKeys) -> any()` - ### from/1 ### -`from(RawMsg) -> any()` +`from(Path) -> any()` Normalize a singleton TABM message into a list of executable AO-Core messages. @@ -15906,6 +17612,15 @@ Extrapolate the inlined key-value pair from a path segment. If the key has a value, it may provide a type (as with typical keys), but if a value is not provided, it is assumed to be a boolean `true`. + + +### parse_inlined_keys/2 * ### + +`parse_inlined_keys(InlinedMsgBin, Msg) -> any()` + +Parse inlined key-value pairs from a path segment. Each key-value pair +is separated by `&` and is of the form `K=V`. + ### parse_part/1 * ### @@ -16476,7 +18191,6 @@ the message is signed. Returns {ok, Path} on HTTP 200, or -* [Data Types](#types) A process wrapper over rocksdb storage. @@ -17102,7 +18816,6 @@ Write a key with a value to the store. -* [Data Types](#types) A module for parsing and converting between Erlang and HTTP Structured Fields, as described in RFC-9651. @@ -18093,7 +19806,8 @@ as well as a standard map of HyperBEAM runtime options. `human_id(Bin) -> any()` Convert a native binary ID to a human readable ID. If the ID is already -a human readable ID, it is returned as is. +a human readable ID, it is returned as is. If it is an ethereum address, it +is returned as is. @@ -18937,6 +20651,7 @@ Use the navigation menu to dive into specific parts of the codebase. Each module
dev_faff
dev_genesis_wasm
dev_green_zone
dev_hook
dev_hyperbuddy
dev_json_iface
dev_local_name
dev_snp_nif
dev_stack
dev_test
dev_volume
dev_wasi
dev_wasm
hb
hb_http_client_sup
hb_http_server
hb_json
hb_keccak
hb_logger
hb_message
hb_metrics_collector
dev_faff
dev_genesis_wasm
dev_green_zone
dev_hook
dev_hyperbuddy
dev_json_iface
dev_local_name
dev_snp_nif
dev_stack
dev_test
dev_volume
dev_wasi
dev_wasm
hb
hb_http_client_sup
hb_http_server
hb_json
hb_keccak
hb_logger
hb_message
hb_metrics_collector
compress_ecdsa_pubkey/1*
hash_address/1*
hmac/1
hmac/2
load_key/1Read the keyfile for the key with the given address from disk.
load_keyfile/1Extract the public and private key from a keyfile.
new/0
new/1
new_keyfile/2Generate a new wallet public and private key, with a corresponding keyfile.
sign/2Sign some data with a private key.
sign/3sign some data, hashed using the provided DigestType.
to_address/1Generate an address from a public key.
to_address/2
to_rsa_address/1*
verify/3Verify that a signature is correct.
verify/4
wallet_filepath/1*
wallet_filepath/3*
wallet_filepath2/1*
wallet_name/3*
+
compress_ecdsa_pubkey/1*
hash_address/1*
hmac/1
hmac/2
load_key/1Read the keyfile for the key with the given address from disk.
load_keyfile/1Extract the public and private key from a keyfile.
new/0
new/1
new_keyfile/2Generate a new wallet public and private key, with a corresponding keyfile.
sign/2Sign some data with a private key.
sign/3sign some data, hashed using the provided DigestType.
to_address/1Generate an address from a public key.
to_address/2
to_ecdsa_address/1*
to_rsa_address/1*
verify/3Verify that a signature is correct.
verify/4
wallet_filepath/1*
wallet_filepath/3*
wallet_filepath2/1*
wallet_name/3*
@@ -108,6 +108,12 @@ Generate an address from a public key. `to_address(PubKey, X2) -> any()` + + +### to_ecdsa_address/1 * ### + +`to_ecdsa_address(PubKey) -> any()` + ### to_rsa_address/1 * ### diff --git a/docs/resources/source-code/dev_codec_ans104.md b/docs/resources/source-code/dev_codec_ans104.md index 710dc5c79..a597b00be 100644 --- a/docs/resources/source-code/dev_codec_ans104.md +++ b/docs/resources/source-code/dev_codec_ans104.md @@ -12,7 +12,7 @@ records to and from TABMs.
commit/3Sign a message using the priv_wallet key in the options.
committed/3Return a list of committed keys from an ANS-104 message.
committed_from_trusted_keys/3*
content_type/1Return the content type for the codec.
deduplicating_from_list/1*Deduplicate a list of key-value pairs by key, generating a list of -values for each normalized key if there are duplicates.
deserialize/1Deserialize a binary ans104 message to a TABM.
do_from/1*
duplicated_tag_name_test/0*
encoded_tags_to_map/1*Convert an ANS-104 encoded tag list into a HyperBEAM-compatible map.
from/1Convert a #tx record into a message map recursively.
from_maintains_tag_name_case_test/0*
id/1Return the ID of a message.
normal_tags/1*Check whether a list of key-value pairs contains only normalized keys.
normal_tags_test/0*
only_committed_maintains_target_test/0*
restore_tag_name_case_from_cache_test/0*
serialize/1Serialize a message or TX to a binary.
signed_duplicated_tag_name_test/0*
simple_to_conversion_test/0*
tag_map_to_encoded_tags/1*Convert a HyperBEAM-compatible map into an ANS-104 encoded tag list, +values for each normalized key if there are duplicates.
deserialize/1Deserialize a binary ans104 message to a TABM.
do_from/1*
duplicated_tag_name_test/0*
encoded_tags_to_map/1*Convert an ANS-104 encoded tag list into a HyperBEAM-compatible map.
from/1Convert a #tx record into a message map recursively.
from_maintains_tag_name_case_test/0*
id/1Return the ID of a message.
normal_tags/1*Check whether a list of key-value pairs contains only normalized keys.
normal_tags_test/0*
only_committed_maintains_target_test/0*
quantity_field_is_ignored_in_from_test/0*
quantity_key_encoded_as_tag_test/0*
restore_tag_name_case_from_cache_test/0*
serialize/1Serialize a message or TX to a binary.
signed_duplicated_tag_name_test/0*
simple_to_conversion_test/0*
tag_map_to_encoded_tags/1*Convert a HyperBEAM-compatible map into an ANS-104 encoded tag list, recreating the original order of the tags.
to/1Internal helper to translate a message to its #tx record representation, which can then be used by ar_bundles to serialize the message.
verify/3Verify an ANS-104 commitment.
@@ -130,6 +130,18 @@ Check whether a list of key-value pairs contains only normalized keys. `only_committed_maintains_target_test() -> any()` + + +### quantity_field_is_ignored_in_from_test/0 * ### + +`quantity_field_is_ignored_in_from_test() -> any()` + + + +### quantity_key_encoded_as_tag_test/0 * ### + +`quantity_key_encoded_as_tag_test() -> any()` + ### restore_tag_name_case_from_cache_test/0 * ### diff --git a/docs/resources/source-code/dev_codec_flat.md b/docs/resources/source-code/dev_codec_flat.md index 1df2cc51e..912442777 100644 --- a/docs/resources/source-code/dev_codec_flat.md +++ b/docs/resources/source-code/dev_codec_flat.md @@ -12,13 +12,19 @@ their value. ## Function Index ## -
commit/3
committed/3
deep_nesting_test/0*
deserialize/1
empty_map_test/0*
from/1Convert a flat map to a TABM.
inject_at_path/3*
multiple_paths_test/0*
nested_conversion_test/0*
path_list_test/0*
serialize/1
simple_conversion_test/0*
to/1Convert a TABM to a flat map.
verify/3
+
binary_passthrough_test/0*
commit/3
committed/3
deep_nesting_test/0*
deserialize/1
empty_map_test/0*
from/1Convert a flat map to a TABM.
inject_at_path/3*
multiple_paths_test/0*
nested_conversion_test/0*
path_list_test/0*
serialize/1
simple_conversion_test/0*
to/1Convert a TABM to a flat map.
verify/3
## Function Details ## + + +### binary_passthrough_test/0 * ### + +`binary_passthrough_test() -> any()` + ### commit/3 ### diff --git a/docs/resources/source-code/dev_codec_httpsig.md b/docs/resources/source-code/dev_codec_httpsig.md index c322dab2f..974533e6a 100644 --- a/docs/resources/source-code/dev_codec_httpsig.md +++ b/docs/resources/source-code/dev_codec_httpsig.md @@ -2,7 +2,6 @@ -* [Data Types](#types) This module implements HTTP Message Signatures as described in RFC-9421 (https://datatracker.ietf.org/doc/html/rfc9421), as an AO-Core device. diff --git a/docs/resources/source-code/dev_green_zone.md b/docs/resources/source-code/dev_green_zone.md index ba72650e8..6ff9b42e7 100644 --- a/docs/resources/source-code/dev_green_zone.md +++ b/docs/resources/source-code/dev_green_zone.md @@ -3,12 +3,21 @@ - +The green zone device, which provides secure communication and identity +management between trusted nodes. + + + +## Description ## +It handles node initialization, joining existing green zones, key exchange, +and node identity cloning. All operations are protected by hardware +commitment and encryption. ## Function Index ## -
add_trusted_node/4*
become/3
calculate_node_message/3*
decrypt_zone_key/2*
default_zone_required_opts/1*
encrypt_payload/2*
finalize_become/5*
init/3
join/3
join_peer/5*
key/3
maybe_set_zone_opts/4*
rsa_wallet_integration_test/0*
try_mount_encrypted_volume/2*
validate_join/3*
validate_peer_opts/2*
+
add_trusted_node/4*Adds a node to the trusted nodes list with its commitment report.
become/3Clones the identity of a target node in the green zone.
calculate_node_message/3*Generate the node message that should be set prior to joining +a green zone.
decrypt_zone_key/2*Decrypts an AES key using the node's RSA private key.
default_zone_required_opts/1*Provides the default required options for a green zone.
encrypt_payload/2*Encrypts an AES key with a node's RSA public key.
finalize_become/5*
info/1Controls which functions are exposed via the device API.
info/3Provides information about the green zone device and its API.
init/3Initialize the green zone for a node.
join/3Initiates the join process for a node to enter an existing green zone.
join_peer/5*Processes a join request to a specific peer node.
key/3Encrypts and provides the node's private key for secure sharing.
maybe_set_zone_opts/4*Adopts configuration from a peer when joining a green zone.
rsa_wallet_integration_test/0*Test RSA operations with the existing wallet structure.
try_mount_encrypted_volume/2*Attempts to mount an encrypted volume using the green zone AES key.
validate_join/3*Validates an incoming join request from another node.
validate_peer_opts/2*Validates that a peer's configuration matches required options.
@@ -24,6 +33,17 @@ add_trusted_node(NodeAddr::binary(), Report::map(), RequesterPubKey::term(), Opt
+`NodeAddr`: The joining node's address
`Report`: The commitment report provided by the joining node
`RequesterPubKey`: The joining node's public key
`Opts`: A map of configuration options
+ +returns: ok + +Adds a node to the trusted nodes list with its commitment report. + +This function updates the trusted nodes configuration: +1. Retrieves the current trusted nodes map +2. Adds the new node with its report and public key +3. Updates the node configuration with the new trusted nodes list + ### become/3 ### @@ -33,12 +53,41 @@ become(M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, binary()
+`Opts`: A map of configuration options
+ +returns: `{ok, Map}` on success with confirmation details, or +`{error, Binary}` if the node is not part of a green zone or +identity adoption fails. + +Clones the identity of a target node in the green zone. + +This function performs the following operations: +1. Retrieves target node location and ID from the configuration +2. Verifies that the local node has a valid shared AES key +3. Requests the target node's encrypted key via its key endpoint +4. Verifies the response is from the expected peer +5. Decrypts the target node's private key using the shared AES key +6. Updates the local node's wallet with the target node's identity + +Required configuration in Opts map: +- green_zone_peer_location: Target node's address +- green_zone_peer_id: Target node's unique identifier +- priv_green_zone_aes: The shared AES key for the green zone + ### calculate_node_message/3 * ### `calculate_node_message(RequiredOpts, Req, List) -> any()` +Generate the node message that should be set prior to joining +a green zone. + +This function takes a required opts message, a request message, and an +`adopt-config` value. The `adopt-config` value can be a boolean, a list of +fields that should be included in the node message from the request, or a +binary string of fields to include, separated by commas. + ### decrypt_zone_key/2 * ### @@ -48,11 +97,41 @@ decrypt_zone_key(EncZoneKey::binary(), Opts::map()) -> {ok, binary()} | {erro
+`EncZoneKey`: The encrypted zone AES key (Base64 encoded or binary)
`Opts`: A map of configuration options
+ +returns: {ok, DecryptedKey} on success with the decrypted AES key + +Decrypts an AES key using the node's RSA private key. + +This function handles decryption of the zone key: +1. Decodes the encrypted key if it's in Base64 format +2. Extracts the RSA private key components from the wallet +3. Creates an RSA private key record +4. Performs private key decryption on the encrypted key + ### default_zone_required_opts/1 * ### -`default_zone_required_opts(Opts) -> any()` +

+default_zone_required_opts(Opts::map()) -> map()
+
+
+ +`Opts`: A map of configuration options from which to derive defaults
+ +returns: A map of required configuration options for the green zone + +Provides the default required options for a green zone. + +This function defines the baseline security requirements for nodes in a green zone: +1. Restricts loading of remote devices and only allows trusted signers +2. Limits to preloaded devices from the initiating machine +3. Enforces specific store configuration +4. Prevents route changes from the defaults +5. Requires matching hooks across all peers +6. Disables message scheduling to prevent conflicts +7. Enforces a permanent state to prevent further configuration changes @@ -63,21 +142,75 @@ encrypt_payload(AESKey::binary(), RequesterPubKey::term()) -> binary()
+`AESKey`: The shared AES key (256-bit binary)
`RequesterPubKey`: The node's public RSA key
+ +returns: The encrypted AES key + +Encrypts an AES key with a node's RSA public key. + +This function securely encrypts the shared key for transmission: +1. Extracts the RSA public key components +2. Creates an RSA public key record +3. Performs public key encryption on the AES key + ### finalize_become/5 * ### `finalize_become(KeyResp, NodeLocation, NodeID, GreenZoneAES, Opts) -> any()` + + +### info/1 ### + +`info(X1) -> any()` + +Controls which functions are exposed via the device API. + +This function defines the security boundary for the green zone device by +explicitly listing which functions are available through the API. + + + +### info/3 ### + +`info(Msg1, Msg2, Opts) -> any()` + +Provides information about the green zone device and its API. + +This function returns detailed documentation about the device, including: +1. A high-level description of the device's purpose +2. Version information +3. Available API endpoints with their parameters and descriptions + ### init/3 ###

-init(M1::term(), M2::term(), Opts::map()) -> {ok, binary()}
+init(M1::term(), M2::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
 

+`Opts`: A map of configuration options
+ +returns: `{ok, Binary}` on success with confirmation message, or +`{error, Binary}` on failure with error message. + +Initialize the green zone for a node. + +This function performs the following operations: +1. Validates the node's history to ensure this is a valid initialization +2. Retrieves or creates a required configuration for the green zone +3. Ensures a wallet (keypair) exists or creates a new one +4. Generates a new 256-bit AES key for secure communication +5. Updates the node's configuration with these cryptographic identities + +Config options in Opts map: +- green_zone_required_config: (Optional) Custom configuration requirements +- priv_wallet: (Optional) Existing wallet to use instead of creating a new one +- priv_green_zone_aes: (Optional) Existing AES key, if already part of a zone + ### join/3 ### @@ -87,15 +220,55 @@ join(M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, binary()}
+`M1`: The join request message with target peer information
`M2`: Additional request details, may include adoption preferences
`Opts`: A map of configuration options for join operations
+ +returns: `{ok, Map}` on success with join response details, or +`{error, Binary}` on failure with error message. + +Initiates the join process for a node to enter an existing green zone. + +This function performs the following operations depending on the state: +1. Validates the node's history to ensure proper initialization +2. Checks for target peer information (location and ID) +3. If target peer is specified: +a. Generates a commitment report for the peer +b. Prepares and sends a POST request to the target peer +c. Verifies the response and decrypts the returned zone key +d. Updates local configuration with the shared AES key +4. If no peer is specified, processes the join request locally + +Config options in Opts map: +- green_zone_peer_location: Target peer's address +- green_zone_peer_id: Target peer's unique identifier +- green_zone_adopt_config: +(Optional) Whether to adopt peer's configuration (default: true) + ### join_peer/5 * ###

-join_peer(PeerLocation::binary(), PeerID::binary(), M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, map()}
+join_peer(PeerLocation::binary(), PeerID::binary(), M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, map() | binary()}
 

+`PeerLocation`: The target peer's address
`PeerID`: The target peer's unique identifier
`M2`: May contain ShouldMount flag to enable encrypted volume mounting
+ +returns: `{ok, Map}` on success with confirmation message, or +`{error, Map|Binary}` on failure with error details + +Processes a join request to a specific peer node. + +This function handles the client-side join flow when connecting to a peer: +1. Verifies the node is not already in a green zone +2. Optionally adopts configuration from the target peer +3. Generates a hardware-backed commitment report +4. Sends a POST request to the peer's join endpoint +5. Verifies the response signature +6. Decrypts the returned AES key +7. Updates local configuration with the shared key +8. Optionally mounts an encrypted volume using the shared key + ### key/3 ### @@ -105,11 +278,49 @@ key(M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, binary()}
+`Opts`: A map of configuration options
+ +returns: `{ok, Map}` containing the encrypted key and IV on success, or +`{error, Binary}` if the node is not part of a green zone + +Encrypts and provides the node's private key for secure sharing. + +This function performs the following operations: +1. Retrieves the shared AES key and the node's wallet +2. Verifies that the node is part of a green zone (has a shared AES key) +3. Generates a random initialization vector (IV) for encryption +4. Encrypts the node's private key using AES-256-GCM with the shared key +5. Returns the encrypted key and IV for secure transmission + +Required configuration in Opts map: +- priv_green_zone_aes: The shared AES key for the green zone +- priv_wallet: The node's wallet containing the private key to encrypt + ### maybe_set_zone_opts/4 * ### -`maybe_set_zone_opts(PeerLocation, PeerID, Req, InitOpts) -> any()` +

+maybe_set_zone_opts(PeerLocation::binary(), PeerID::binary(), Req::map(), InitOpts::map()) -> {ok, map()} | {error, binary()}
+
+
+ +`PeerLocation`: The location of the peer node to join
`PeerID`: The ID of the peer node to join
`Req`: The request message with adoption preferences
`InitOpts`: A map of initial configuration options
+ +returns: `{ok, Map}` with updated configuration on success, or +`{error, Binary}` if configuration retrieval fails + +Adopts configuration from a peer when joining a green zone. + +This function handles the conditional adoption of peer configuration: +1. Checks if adoption is enabled (default: true) +2. Requests required configuration from the peer +3. Verifies the authenticity of the configuration +4. Creates a node message with appropriate settings +5. Updates the local node configuration + +Config options: +- green_zone_adopt_config: Controls configuration adoption (boolean, list, or binary) @@ -117,12 +328,28 @@ key(M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, binary()} `rsa_wallet_integration_test() -> any()` +Test RSA operations with the existing wallet structure. + +This test function verifies that encryption and decryption using the RSA keys +from the wallet work correctly. It creates a new wallet, encrypts a test +message with the RSA public key, and then decrypts it with the RSA private +key, asserting that the decrypted message matches the original. + ### try_mount_encrypted_volume/2 * ### `try_mount_encrypted_volume(AESKey, Opts) -> any()` +Attempts to mount an encrypted volume using the green zone AES key. + +This function handles the complete process of secure storage setup by +delegating to the dev_volume module, which provides a unified interface +for volume management. + +The encryption key used for the volume is the same AES key used for green zone +communication, ensuring that only nodes in the green zone can access the data. + ### validate_join/3 * ### @@ -132,9 +359,41 @@ validate_join(M1::term(), Req::map(), Opts::map()) -> {ok, map()} | {error, b
+`M1`: Ignored parameter
`Req`: The join request containing commitment report and public key
`Opts`: A map of configuration options
+ +returns: `{ok, Map}` on success with encrypted AES key, or +`{error, Binary}` on failure with error message + +Validates an incoming join request from another node. + +This function handles the server-side join flow when receiving a connection +request: +1. Validates the peer's configuration meets required standards +2. Extracts the commitment report and public key from the request +3. Verifies the hardware-backed commitment report +4. Adds the joining node to the trusted nodes list +5. Encrypts the shared AES key with the peer's public key +6. Returns the encrypted key to the requesting node + ### validate_peer_opts/2 * ### -`validate_peer_opts(Req, Opts) -> any()` +

+validate_peer_opts(Req::map(), Opts::map()) -> boolean()
+
+
+ +`Req`: The request message containing the peer's configuration
`Opts`: A map of the local node's configuration options
+ +returns: true if the peer's configuration is valid, false otherwise + +Validates that a peer's configuration matches required options. + +This function ensures the peer node meets configuration requirements: +1. Retrieves the local node's required configuration +2. Gets the peer's options from its message +3. Adds required configuration to peer's required options list +4. Verifies the peer's node history is valid +5. Checks that the peer's options match the required configuration diff --git a/docs/resources/source-code/dev_hook.md b/docs/resources/source-code/dev_hook.md new file mode 100644 index 000000000..477df1e5f --- /dev/null +++ b/docs/resources/source-code/dev_hook.md @@ -0,0 +1,166 @@ +# [Module dev_hook.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_hook.erl) + + + + +A generalized interface for `hooking` into HyperBEAM nodes. + + + +## Description ## + +This module allows users to define `hooks` that are executed at various +points in the lifecycle of nodes and message evaluations. + +Hooks are maintained in the `node message` options, under the key `on` +key. Each `hook` may have zero or many `handlers` which their request is +executed against. A new `handler` of a hook can be registered by simply +adding a new key to that message. If multiple hooks need to be executed for +a single event, the key's value can be set to a list of hooks. + +`hook`s themselves do not need to be added explicitly. Any device can add +a hook by simply executing `dev_hook:on(HookName, Req, Opts)`. This +function is does not affect the hashpath of a message and is not exported on +the device`s API, such that it is not possible to call it directly with +AO-Core resolution. + +All handlers are expressed in the form of a message, upon which the hook's +request is evaluated: + +AO(HookMsg, Req, Opts) => {Status, Result} + +The `Status` and `Result` of the evaluation can be used at the `hook` caller's +discretion. If multiple handlers are to be executed for a single `hook`, the +result of each is used as the input to the next, on the assumption that the +status of the previous is `ok`. If a non-`ok` status is encountered, the +evaluation is halted and the result is returned to the caller. This means +that in most cases, hooks take the form of chainable pipelines of functions, +passing the most pertinent data in the `body` key of both the request and +result. Hook definitions can also set the `hook/result` key to `ignore`, if +the result of the execution should be discarded and the prior value (the +input to the hook) should be used instead. The `hook/commit-request` key can +also be set to `true` if the request should be committed by the node before +execution of the hook. + +The default HyperBEAM node implements several useful hooks. They include: + +start: Executed when the node starts. +Req/body: The node's initial configuration. +Result/body: The node's possibly updated configuration. +request: Executed when a request is received via the HTTP API. +Req/body: The sequence of messages that the node will evaluate. +Req/request: The raw, unparsed singleton request. +Result/body: The sequence of messages that the node will evaluate. +step: Executed after each message in a sequence has been evaluated. +Req/body: The result of the evaluation. +Result/body: The result of the evaluation. +response: Executed when a response is sent via the HTTP API. +Req/body: The result of the evaluation. +Req/request: The raw, unparsed singleton request that was used to +generate the response. +Result/body: The message to be sent in response to the request. + +Additionally, this module implements a traditional device API, allowing the +node operator to register hooks to the node and find those that are +currently active. + +## Function Index ## + + +
execute_handler/4*Execute a single handler +Handlers are expressed as messages that can be resolved via AO.
execute_handlers/4*Execute a list of handlers in sequence.
find/2Get all handlers for a specific hook from the node message options.
find/3
halt_on_error_test/0*Test that pipeline execution halts on error.
info/1Device API information.
multiple_handlers_test/0*Test that multiple handlers form a pipeline.
no_handlers_test/0*Test that hooks with no handlers return the original request.
on/3Execute a named hook with the provided request and options +This function finds all handlers for the hook and evaluates them in sequence.
single_handler_test/0*Test that a single handler is executed correctly.
+ + + + +## Function Details ## + + + +### execute_handler/4 * ### + +`execute_handler(HookName, Handler, Req, Opts) -> any()` + +Execute a single handler +Handlers are expressed as messages that can be resolved via AO. + + + +### execute_handlers/4 * ### + +`execute_handlers(HookName, Rest, Req, Opts) -> any()` + +Execute a list of handlers in sequence. +The result of each handler is used as input to the next handler. +If a handler returns a non-ok status, execution is halted. + + + +### find/2 ### + +`find(HookName, Opts) -> any()` + +Get all handlers for a specific hook from the node message options. +Handlers are stored in the `on` key of this message. The `find/2` variant of +this function only takes a hook name and node message, and is not called +directly via the device API. Instead it is used by `on/3` and other internal +functionality to find handlers when necessary. The `find/3` variant can, +however, be called directly via the device API. + + + +### find/3 ### + +`find(Base, Req, Opts) -> any()` + + + +### halt_on_error_test/0 * ### + +`halt_on_error_test() -> any()` + +Test that pipeline execution halts on error + + + +### info/1 ### + +`info(X1) -> any()` + +Device API information + + + +### multiple_handlers_test/0 * ### + +`multiple_handlers_test() -> any()` + +Test that multiple handlers form a pipeline + + + +### no_handlers_test/0 * ### + +`no_handlers_test() -> any()` + +Test that hooks with no handlers return the original request + + + +### on/3 ### + +`on(HookName, Req, Opts) -> any()` + +Execute a named hook with the provided request and options +This function finds all handlers for the hook and evaluates them in sequence. +The result of each handler is used as input to the next handler. + + + +### single_handler_test/0 * ### + +`single_handler_test() -> any()` + +Test that a single handler is executed correctly + diff --git a/docs/resources/source-code/dev_lua.md b/docs/resources/source-code/dev_lua.md index 816d051ee..9101078aa 100644 --- a/docs/resources/source-code/dev_lua.md +++ b/docs/resources/source-code/dev_lua.md @@ -3,7 +3,7 @@ -A device that calls a Lua script upon a request and returns the result. +A device that calls a Lua module upon a request and returns the result. @@ -11,10 +11,10 @@ A device that calls a Lua script upon a request and returns the result.
ao_core_resolution_from_lua_test/0*Run an AO-Core resolution from the Lua environment.
ao_core_sandbox_test/0*Run an AO-Core resolution from the Lua environment.
aos_authority_not_trusted_test/0*
aos_process_benchmark_test_/0*Benchmark the performance of Lua executions.
compute/4*Call the Lua script with the given arguments.
decode/1Decode a Lua result into a HyperBEAM structured@1.0 message.
decode_params/2*Decode a list of Lua references, as found in a stack trace, into a -list of Erlang terms.
decode_stacktrace/2*Parse a Lua stack trace into a list of messages.
decode_stacktrace/3*
direct_benchmark_test/0*Benchmark the performance of Lua executions.
encode/1Encode a HyperBEAM structured@1.0 message into a Lua term.
ensure_initialized/3*Initialize the Lua VM if it is not already initialized.
error_response_test/0*
execute_aos_call/1*
execute_aos_call/2*
find_scripts/2*Find the script in the base message, either by ID or by string.
functions/3Return a list of all functions in the Lua environment.
generate_lua_process/1*Generate a Lua process message.
generate_stack/1*Generate a stack message for the Lua process.
generate_test_message/1*Generate a test message for a Lua process.
info/1All keys that are not directly available in the base message are -resolved by calling the Lua function in the script of the same name.
init/3Initialize the device state, loading the script into memory if it is -a reference.
initialize/3*Initialize a new Lua state with a given base message and script.
invoke_aos_test/0*
invoke_non_compute_key_test/0*Call a non-compute key on a Lua device message and ensure that the -function of the same name in the script is called.
load_scripts/2*Load a list of scripts for installation into the Lua VM.
load_scripts/3*
load_scripts_by_id_test/0*
lua_http_preprocessor_test/0*Use a Lua script as a preprocessor on the HTTP server via ~meta@1.0.
multiple_scripts_test/0*
normalize/3Restore the Lua state from a snapshot, if it exists.
process_response/2*Process a response to a Luerl invocation.
pure_lua_process_benchmark_test_/0*
pure_lua_process_test/0*Call a process whose execution-device is set to lua@5.3a.
sandbox/3*Sandbox (render inoperable) a set of Lua functions.
sandboxed_failure_test/0*
simple_invocation_test/0*
snapshot/3Snapshot the Lua state from a live computation.
+list of Erlang terms.decode_stacktrace/2*Parse a Lua stack trace into a list of messages.decode_stacktrace/3*direct_benchmark_test/0*Benchmark the performance of Lua executions.encode/1Encode a HyperBEAM structured@1.0 message into a Lua term.ensure_initialized/3*Initialize the Lua VM if it is not already initialized.error_response_test/0*find_modules/2*Find the script in the base message, either by ID or by string.functions/3Return a list of all functions in the Lua environment.generate_lua_process/1*Generate a Lua process message.generate_stack/1*Generate a stack message for the Lua process.generate_test_message/1*Generate a test message for a Lua process.info/1All keys that are not directly available in the base message are +resolved by calling the Lua function in the module of the same name.init/3Initialize the device state, loading the script into memory if it is +a reference.initialize/3*Initialize a new Lua state with a given base message and module.invoke_aos_test/0*invoke_non_compute_key_test/0*Call a non-compute key on a Lua device message and ensure that the +function of the same name in the script is called.load_modules/2*Load a list of modules for installation into the Lua VM.load_modules/3*load_modules_by_id_test/0*lua_http_hook_test/0*Use a Lua module as a hook on the HTTP server via ~meta@1.0.multiple_modules_test/0*normalize/3Restore the Lua state from a snapshot, if it exists.process_response/2*Process a response to a Luerl invocation.pure_lua_process_benchmark_test_/0*pure_lua_process_test/0*Call a process whose execution-device is set to lua@5.3a.sandbox/3*Sandbox (render inoperable) a set of Lua functions.sandboxed_failure_test/0*simple_invocation_test/0*snapshot/3Snapshot the Lua state from a live computation. @@ -113,7 +113,7 @@ Encode a HyperBEAM `structured@1.0` message into a Lua term. `ensure_initialized(Base, Req, Opts) -> any()` Initialize the Lua VM if it is not already initialized. Optionally takes -the script as a Binary string. If not provided, the script will be loaded +the script as a Binary string. If not provided, the module will be loaded from the base message. @@ -122,23 +122,11 @@ from the base message. `error_response_test() -> any()` - + -### execute_aos_call/1 * ### +### find_modules/2 * ### -`execute_aos_call(Base) -> any()` - - - -### execute_aos_call/2 * ### - -`execute_aos_call(Base, Req) -> any()` - - - -### find_scripts/2 * ### - -`find_scripts(Base, Opts) -> any()` +`find_modules(Base, Opts) -> any()` Find the script in the base message, either by ID or by string. @@ -181,7 +169,7 @@ Generate a test message for a Lua process. `info(Base) -> any()` All keys that are not directly available in the base message are -resolved by calling the Lua function in the script of the same name. +resolved by calling the Lua function in the module of the same name. Additionally, we exclude the `keys`, `set`, `encode` and `decode` functions which are `message@1.0` core functions, and Lua public utility functions. @@ -198,9 +186,9 @@ a reference. ### initialize/3 * ### -`initialize(Base, Scripts, Opts) -> any()` +`initialize(Base, Modules, Opts) -> any()` -Initialize a new Lua state with a given base message and script. +Initialize a new Lua state with a given base message and module. @@ -217,39 +205,39 @@ Initialize a new Lua state with a given base message and script. Call a non-compute key on a Lua device message and ensure that the function of the same name in the script is called. - + -### load_scripts/2 * ### +### load_modules/2 * ### -`load_scripts(Scripts, Opts) -> any()` +`load_modules(Modules, Opts) -> any()` -Load a list of scripts for installation into the Lua VM. +Load a list of modules for installation into the Lua VM. - + -### load_scripts/3 * ### +### load_modules/3 * ### -`load_scripts(Rest, Opts, Acc) -> any()` +`load_modules(Rest, Opts, Acc) -> any()` - + -### load_scripts_by_id_test/0 * ### +### load_modules_by_id_test/0 * ### -`load_scripts_by_id_test() -> any()` +`load_modules_by_id_test() -> any()` - + -### lua_http_preprocessor_test/0 * ### +### lua_http_hook_test/0 * ### -`lua_http_preprocessor_test() -> any()` +`lua_http_hook_test() -> any()` -Use a Lua script as a preprocessor on the HTTP server via `~meta@1.0`. +Use a Lua module as a hook on the HTTP server via `~meta@1.0`. - + -### multiple_scripts_test/0 * ### +### multiple_modules_test/0 * ### -`multiple_scripts_test() -> any()` +`multiple_modules_test() -> any()` diff --git a/docs/resources/source-code/dev_message.md b/docs/resources/source-code/dev_message.md index b86dbc3d2..714013bc9 100644 --- a/docs/resources/source-code/dev_message.md +++ b/docs/resources/source-code/dev_message.md @@ -22,7 +22,7 @@ implement a case-insensitive key lookup rather than delegating to maps:get/2.case_insensitive_get_test/0*commit/3Commit to a message, using the commitment-device key to specify the device that should be used to commit to the message.commitment_ids_from_committers/2*Returns a list of commitment IDs in a commitments map that are relevant for a list of given committer addresses.commitment_ids_from_request/3*Implements a standardized form of specifying commitment IDs for a -message request.committed/3Return the list of committed keys from a message.committers/1Return the committers of a message that are present in the given request.committers/2committers/3exec_for_commitment/5*Execute a function for a single commitment in the context of its +message request.committed/3Return the list of committed keys from a message.committers/1Return the committers of a message that are present in the given request.committers/2committers/3deep_unset_test/0*exec_for_commitment/5*Execute a function for a single commitment in the context of its parent message.get/2Return the value associated with the key as it exists in the message's underlying Erlang map.get/3get_keys_mod_test/0*id/1Return the ID of a message, using the committers list if it exists.id/2id/3id_device/1*Locate the ID device of a message.info/0Return the info for the identity device.is_private_mod_test/0*key_from_device_test/0*keys/1Get the public keys of a message.keys_from_device_test/0*private_keys_are_filtered_test/0*remove/2Remove a key or keys from a message.remove_test/0*run_test/0*set/3Deep merge keys in a message.set_conflicting_keys_test/0*set_ignore_undefined_test/0*set_path/3Special case of set/3 for setting the path key.unset_with_set_test/0*verify/3Verify a message.verify_test/0*with_relevant_commitments/3*Return a message with only the relevant commitments for a given request. @@ -119,6 +119,12 @@ Return the committers of a message that are present in the given request. `committers(X1, X2, NodeOpts) -> any()` + + +### deep_unset_test/0 * ### + +`deep_unset_test() -> any()` + ### exec_for_commitment/5 * ### diff --git a/docs/resources/source-code/dev_meta.md b/docs/resources/source-code/dev_meta.md index 56690d553..a7f2d5d51 100644 --- a/docs/resources/source-code/dev_meta.md +++ b/docs/resources/source-code/dev_meta.md @@ -21,8 +21,9 @@ the AO-Core resolver has returned a result.
add_dynamic_keys/1*Add dynamic keys to the node message.
adopt_node_message/2Attempt to adopt changes to a node message.
authorized_set_node_msg_succeeds_test/0*Test that we can set the node message if the request is signed by the -owner of the node.
claim_node_test/0*Test that we can claim the node correctly and set the node message after.
config_test/0*Test that we can get the node message.
embed_status/1*Wrap the result of a device call in a status.
filter_node_msg/1*Remove items from the node message that are not encodable into a -message.
halt_request_test/0*Test that we can halt a request if the preprocessor returns an error.
handle/2Normalize and route messages downstream based on their path.
handle_initialize/2*
handle_resolve/3*Handle an AO-Core request, which is a list of messages.
info/1Ensure that the helper function adopt_node_message/2 is not exported.
info/3Get/set the node message.
is/2Check if the request in question is signed by a given role on the node.
is/3
maybe_sign/2*Sign the result of a device call if the node is configured to do so.
message_to_status/1*Get the HTTP status code from a transaction (if it exists).
modify_request_test/0*Test that a preprocessor can modify a request.
permanent_node_message_test/0*Test that a permanent node message cannot be changed.
priv_inaccessible_test/0*Test that we can't get the node message if the requested key is private.
resolve_processor/5*Execute a message from the node message upon the user's request.
status_code/1*Calculate the appropriate HTTP status code for an AO-Core result.
unauthorized_set_node_msg_fails_test/0*Test that we can't set the node message if the request is not signed by +owner of the node.
build/3Emits the version number and commit hash of the HyperBEAM node source, +if available.
buildinfo_test/0*Test that version information is available and returned correctly.
claim_node_test/0*Test that we can claim the node correctly and set the node message after.
config_test/0*Test that we can get the node message.
embed_status/1*Wrap the result of a device call in a status.
filter_node_msg/1*Remove items from the node message that are not encodable into a +message.
halt_request_test/0*Test that we can halt a request if the hook returns an error.
handle/2Normalize and route messages downstream based on their path.
handle_initialize/2*
handle_resolve/3*Handle an AO-Core request, which is a list of messages.
info/1Ensure that the helper function adopt_node_message/2 is not exported.
info/3Get/set the node message.
is/2Check if the request in question is signed by a given role on the node.
is/3
maybe_sign/2*Sign the result of a device call if the node is configured to do so.
message_to_status/1*Get the HTTP status code from a transaction (if it exists).
modify_request_test/0*Test that a hook can modify a request.
permanent_node_message_test/0*Test that a permanent node message cannot be changed.
priv_inaccessible_test/0*Test that we can't get the node message if the requested key is private.
request_response_hooks_test/0*
resolve_hook/4*Execute a hook from the node message upon the user's request.
status_code/1*Calculate the appropriate HTTP status code for an AO-Core result.
unauthorized_set_node_msg_fails_test/0*Test that we can't set the node message if the request is not signed by the owner of the node.
uninitialized_node_test/0*Test that an uninitialized node will not run computation.
update_node_message/2*Validate that the request is signed by the operator of the node, then allow them to update the node message.
@@ -56,6 +57,29 @@ Attempt to adopt changes to a node message. Test that we can set the node message if the request is signed by the owner of the node. + + +### build/3 ### + +`build(X1, X2, NodeMsg) -> any()` + +Emits the version number and commit hash of the HyperBEAM node source, +if available. + +We include the short hash separately, as the length of this hash may change in +the future, depending on the git version/config used to build the node. +Subsequently, rather than embedding the `git-short-hash-length`, for the +avoidance of doubt, we include the short hash separately, as well as its long +hash. + + + +### buildinfo_test/0 * ### + +`buildinfo_test() -> any()` + +Test that version information is available and returned correctly. + ### claim_node_test/0 * ### @@ -95,7 +119,7 @@ message. `halt_request_test() -> any()` -Test that we can halt a request if the preprocessor returns an error. +Test that we can halt a request if the hook returns an error. @@ -122,7 +146,7 @@ other messages are routed to the `handle_resolve/2` function. Handle an AO-Core request, which is a list of messages. We apply the node's pre-processor to the request first, and then resolve the request using the node's AO-Core implementation if its response was `ok`. -After execution, we run the node's `postprocessor` message on the result of +After execution, we run the node's `response` hook on the result of the request before returning the result it grants back to the user. @@ -188,7 +212,7 @@ Get the HTTP status code from a transaction (if it exists). `modify_request_test() -> any()` -Test that a preprocessor can modify a request. +Test that a hook can modify a request. @@ -206,20 +230,26 @@ Test that a permanent node message cannot be changed. Test that we can't get the node message if the requested key is private. - + + +### request_response_hooks_test/0 * ### + +`request_response_hooks_test() -> any()` + + -### resolve_processor/5 * ### +### resolve_hook/4 * ### -`resolve_processor(PathKey, Processor, Req, Query, NodeMsg) -> any()` +`resolve_hook(HookName, InitiatingRequest, Body, NodeMsg) -> any()` -Execute a message from the node message upon the user's request. The -invocation of the processor provides a request of the following form: +Execute a hook from the node message upon the user's request. The +invocation of the hook provides a request of the following form: ``` - /path => preprocess | postprocess + /path => request | response /request => the original request singleton - /body => list of messages the user wishes to process + /body => parsed sequence of messages to process | the execution result ``` diff --git a/docs/resources/source-code/dev_p4.md b/docs/resources/source-code/dev_p4.md index bfe1daab5..081a4814a 100644 --- a/docs/resources/source-code/dev_p4.md +++ b/docs/resources/source-code/dev_p4.md @@ -33,10 +33,10 @@ key may return `infinity` if the node will not serve a user under any circumstances. Else, the value returned by the `price` key will be passed to the ledger device as the `amount` key. -The ledger device should implement the following keys: +A ledger device should implement the following keys: ``` -POST /credit?message=PaymentMessage&request=RequestMessagePOST /debit?amount=PriceMessage&type=pre|post&request=RequestMessage +POST /credit?message=PaymentMessage&request=RequestMessagePOST /debit?amount=PriceMessage&request=RequestMessageGET /balance?request=RequestMessage ``` The `type` key is optional and defaults to `pre`. If `type` is set to `post`, @@ -47,8 +47,8 @@ check whether the debit would succeed before execution.
balance/3Get the balance of a user in the ledger.
faff_test/0*Simple test of p4's capabilities with the faff@1.0 device.
is_chargable_req/2*The node operator may elect to make certain routes non-chargable, using -the routes syntax also used to declare routes in router@1.0.
non_chargable_route_test/0*Test that a non-chargable route is not charged for.
postprocess/3Postprocess the request after it has been fulfilled.
preprocess/3Estimate the cost of a transaction and decide whether to proceed with -a request.
test_opts/1*
test_opts/2*
test_opts/3*
+the routes syntax also used to declare routes in router@1.0.lua_pricing_test/0*Ensure that Lua modules can be used as pricing and ledger devices.non_chargable_route_test/0*Test that a non-chargable route is not charged for.request/3Estimate the cost of a transaction and decide whether to proceed with +a request.response/3Postprocess the request after it has been fulfilled.test_opts/1*test_opts/2*test_opts/3* @@ -80,6 +80,19 @@ Simple test of p4's capabilities with the `faff@1.0` device. The node operator may elect to make certain routes non-chargable, using the `routes` syntax also used to declare routes in `router@1.0`. + + +### lua_pricing_test/0 * ### + +`lua_pricing_test() -> any()` + +Ensure that Lua modules can be used as pricing and ledger devices. Our +modules come in two parts: +- A `process` module which is executed as a persistent `local-process` on the +node, and which maintains the state of the ledger. +- A `client` module, which is executed as a `p4@1.0` device, marshalling +requests to the `process` module. + ### non_chargable_route_test/0 * ### @@ -88,23 +101,23 @@ the `routes` syntax also used to declare routes in `router@1.0`. Test that a non-chargable route is not charged for. - + -### postprocess/3 ### +### request/3 ### -`postprocess(State, RawResponse, NodeMsg) -> any()` +`request(State, Raw, NodeMsg) -> any()` -Postprocess the request after it has been fulfilled. +Estimate the cost of a transaction and decide whether to proceed with +a request. The default behavior if `pricing-device` or `p4_balances` are +not set is to proceed, so it is important that a user initialize them. - + -### preprocess/3 ### +### response/3 ### -`preprocess(State, Raw, NodeMsg) -> any()` +`response(State, RawResponse, NodeMsg) -> any()` -Estimate the cost of a transaction and decide whether to proceed with -a request. The default behavior if `pricing-device` or `p4_balances` are -not set is to proceed, so it is important that a user initialize them. +Postprocess the request after it has been fulfilled. diff --git a/docs/resources/source-code/dev_patch.md b/docs/resources/source-code/dev_patch.md index ba71c114d..92cf5373c 100644 --- a/docs/resources/source-code/dev_patch.md +++ b/docs/resources/source-code/dev_patch.md @@ -3,43 +3,85 @@ -A device that finds `PATCH` requests in the `results/outbox` -of its message, and applies them to it. +A device that can be used to reorganize a message: Moving data from +one path inside it to another. ## Description ## -This can be useful for processes -whose computation would like to manipulate data outside of the `results` key -of its message. + +This device's function runs in two modes: + +1. When using `all` to move all data at the path given in `from` to the +path given in `to`. +2. When using `patches` to move all submessages in the source to the target, +_if_ they have a `method` key of `PATCH` or a `device` key of `patch@1.0`. + +Source and destination paths may be prepended by `base:` or `req:` keys to +indicate that they are relative to either of the message`s that the +computation is being performed on. + +The search order for finding the source and destination keys is as follows, +where `X` is either `from` or `to`: + +1. The `patch-X` key of the execution message. +2. The `X` key of the execution message. +3. The `patch-X` key of the request message. +4. The `X` key of the request message. + +Additionally, this device implements the standard computation device keys, +allowing it to be used as an element of an execution stack pipeline, etc. ## Function Index ## -
compute/3Find PATCH requests in the results/outbox of the message, and apply -them to the state.
init/3Default process device hooks.
normalize/3
patch_to_submessage_test/0*
snapshot/3
uninitialized_patch_test/0*
+
all/3Get the value found at the patch-from key of the message, or the +from key if the former is not present.
all_mode_test/0*
compute/3
init/3Necessary hooks for compliance with the execution-device standard.
move/4*Unified executor for the all and patches modes.
normalize/3
patch_to_submessage_test/0*
patches/3Find relevant PATCH messages in the given source key of the execution +and request messages, and apply them to the given destination key of the +request.
req_prefix_test/0*
snapshot/3
uninitialized_patch_test/0*
## Function Details ## + + +### all/3 ### + +`all(Msg1, Msg2, Opts) -> any()` + +Get the value found at the `patch-from` key of the message, or the +`from` key if the former is not present. Remove it from the message and set +the new source to the value found. + + + +### all_mode_test/0 * ### + +`all_mode_test() -> any()` + ### compute/3 ### `compute(Msg1, Msg2, Opts) -> any()` -Find `PATCH` requests in the `results/outbox` of the message, and apply -them to the state. - ### init/3 ### `init(Msg1, Msg2, Opts) -> any()` -Default process device hooks. +Necessary hooks for compliance with the `execution-device` standard. + + + +### move/4 * ### + +`move(Mode, Msg1, Msg2, Opts) -> any()` + +Unified executor for the `all` and `patches` modes. @@ -53,6 +95,22 @@ Default process device hooks. `patch_to_submessage_test() -> any()` + + +### patches/3 ### + +`patches(Msg1, Msg2, Opts) -> any()` + +Find relevant `PATCH` messages in the given source key of the execution +and request messages, and apply them to the given destination key of the +request. + + + +### req_prefix_test/0 * ### + +`req_prefix_test() -> any()` + ### snapshot/3 ### diff --git a/docs/resources/source-code/dev_push.md b/docs/resources/source-code/dev_push.md index 465e97efa..f966c2457 100644 --- a/docs/resources/source-code/dev_push.md +++ b/docs/resources/source-code/dev_push.md @@ -16,7 +16,7 @@ continues until the there are no remaining messages to push.
additional_keys/3*Set the necessary keys in order for the recipient to know where the -message came from.
do_push/3*Push a message or slot number.
extract/2*Return either the target or the hint.
find_type/2*
full_push_test_/0*
is_async/3*Determine if the push is asynchronous.
multi_process_push_test_disabled/0*
normalize_message/2*Augment the message with from-* keys, if it doesn't already have them.
parse_redirect/1*
ping_pong_script/1*
push/3Push either a message or an assigned slot number.
push_prompts_encoding_change_test/0*
push_result_message/5*
push_with_mode/3*
push_with_redirect_hint_test_disabled/0*
remote_schedule_result/3*
reply_script/0*
schedule_initial_message/3*Push a message or a process, prior to pushing the resulting slot number.
schedule_result/3*
schedule_result/4*
split_target/1*
target_process/2*Find the target process ID for a message to push.
+message came from.do_push/3*Push a message or slot number, including its downstream results.extract/2*Return either the target or the hint.find_type/2*full_push_test_/0*is_async/3*Determine if the push is asynchronous.multi_process_push_test_/0*normalize_message/2*Augment the message with from-* keys, if it doesn't already have them.parse_redirect/1*ping_pong_script/1*push/3Push either a message or an assigned slot number.push_prompts_encoding_change_test/0*push_result_message/4*Push a downstream message result.push_with_mode/3*push_with_redirect_hint_test_disabled/0*remote_schedule_result/3*reply_script/0*schedule_initial_message/3*Push a message or a process, prior to pushing the resulting slot number.schedule_result/4*Add the necessary keys to the message to be scheduled, then schedule it.schedule_result/5*split_target/1*Split the target into the process ID and the optional query string.target_process/2*Find the target process ID for a message to push. @@ -27,7 +27,7 @@ message came from.do_push/3* ### additional_keys/3 * ### -`additional_keys(FromMsg, ToSched, Opts) -> any()` +`additional_keys(Origin, ToSched, Opts) -> any()` Set the necessary keys in order for the recipient to know where the message came from. @@ -36,9 +36,9 @@ message came from. ### do_push/3 * ### -`do_push(Base, Assignment, Opts) -> any()` +`do_push(Process, Assignment, Opts) -> any()` -Push a message or slot number. +Push a message or slot number, including its downstream results. @@ -64,15 +64,15 @@ Return either the `target` or the `hint`. ### is_async/3 * ### -`is_async(Base, Req, Opts) -> any()` +`is_async(Process, Req, Opts) -> any()` Determine if the push is asynchronous. - + -### multi_process_push_test_disabled/0 * ### +### multi_process_push_test_/0 * ### -`multi_process_push_test_disabled() -> any()` +`multi_process_push_test_() -> any()` @@ -100,7 +100,18 @@ Augment the message with from-* keys, if it doesn't already have them. `push(Base, Req, Opts) -> any()` -Push either a message or an assigned slot number. +Push either a message or an assigned slot number. If a `Process` is +provided in the `body` of the request, it will be scheduled (initializing +it if it does not exist). Otherwise, the message specified by the given +`slot` key will be pushed. + +Optional parameters: +`/result-depth`: The depth to which the full contents of the result +will be included in the response. Default: 1, returning +the full result of the first message, but only the 'tree' +of downstream messages. +`/push-mode`: Whether or not the push should be done asynchronously. +Default: `sync`, pushing synchronously. @@ -108,17 +119,22 @@ Push either a message or an assigned slot number. `push_prompts_encoding_change_test() -> any()` - + + +### push_result_message/4 * ### -### push_result_message/5 * ### +`push_result_message(TargetProcess, MsgToPush, Origin, Opts) -> any()` -`push_result_message(Base, FromSlot, Key, MsgToPush, Opts) -> any()` +Push a downstream message result. The `Origin` map contains information +about the origin of the message: The process that originated the message, +the slot number from which it was sent, and the outbox key of the message, +and the depth to which downstream results should be included in the message. ### push_with_mode/3 * ### -`push_with_mode(Base, Req, Opts) -> any()` +`push_with_mode(Process, Req, Opts) -> any()` @@ -146,17 +162,21 @@ Push either a message or an assigned slot number. Push a message or a process, prior to pushing the resulting slot number. - + + +### schedule_result/4 * ### -### schedule_result/3 * ### +`schedule_result(TargetProcess, MsgToPush, Origin, Opts) -> any()` -`schedule_result(Base, MsgToPush, Opts) -> any()` +Add the necessary keys to the message to be scheduled, then schedule it. +If the remote scheduler does not support the given codec, it will be +downgraded and re-signed. - + -### schedule_result/4 * ### +### schedule_result/5 * ### -`schedule_result(Base, MsgToPush, Codec, Opts) -> any()` +`schedule_result(TargetProcess, MsgToPush, Codec, Origin, Opts) -> any()` @@ -164,6 +184,8 @@ Push a message or a process, prior to pushing the resulting slot number. `split_target(RawTarget) -> any()` +Split the target into the process ID and the optional query string. + ### target_process/2 * ### diff --git a/docs/resources/source-code/dev_relay.md b/docs/resources/source-code/dev_relay.md index 245291781..2828b72eb 100644 --- a/docs/resources/source-code/dev_relay.md +++ b/docs/resources/source-code/dev_relay.md @@ -27,7 +27,7 @@ Example usage: ## Function Index ## -
call/3Execute a call request using a node's routes.
call_get_test/0*
cast/3Execute a request in the same way as call/3, but asynchronously.
preprocess/3Preprocess a request to check if it should be relayed to a different node.
preprocessor_reroute_to_nearest_test/0*Test that the preprocess/3 function re-routes a request to remote +
call/3Execute a call request using a node's routes.
call_get_test/0*
cast/3Execute a request in the same way as call/3, but asynchronously.
request/3Preprocess a request to check if it should be relayed to a different node.
request_hook_reroute_to_nearest_test/0*Test that the preprocess/3 function re-routes a request to remote peers, according to the node's routing table.
@@ -65,19 +65,19 @@ Defaults to `false`. Execute a request in the same way as `call/3`, but asynchronously. Always returns `<<"OK">>`. - + -### preprocess/3 ### +### request/3 ### -`preprocess(Msg1, Msg2, Opts) -> any()` +`request(Msg1, Msg2, Opts) -> any()` Preprocess a request to check if it should be relayed to a different node. - + -### preprocessor_reroute_to_nearest_test/0 * ### +### request_hook_reroute_to_nearest_test/0 * ### -`preprocessor_reroute_to_nearest_test() -> any()` +`request_hook_reroute_to_nearest_test() -> any()` Test that the `preprocess/3` function re-routes a request to remote peers, according to the node's routing table. diff --git a/docs/resources/source-code/dev_router.md b/docs/resources/source-code/dev_router.md index 1e63d56aa..c513e1b3d 100644 --- a/docs/resources/source-code/dev_router.md +++ b/docs/resources/source-code/dev_router.md @@ -42,12 +42,12 @@ The structure of the routes should be as follows:
add_route_test/0*
apply_route/2*Apply a node map's rules for transforming the path of the message.
apply_routes/3*Generate a uri key for each node in a route.
binary_to_bignum/1*Cast a human-readable or native-encoded ID to a big integer.
by_base_determinism_test/0*Ensure that By-Base always chooses the same node for the same -hashpath.
choose/5*Implements the load distribution strategies if given a cluster.
choose_1_test/1*
choose_n_test/1*
device_call_from_singleton_test/0*
dynamic_route_provider_test/0*
dynamic_router_test/0*Example of a Lua script being used as the route_provider for a +hashpath.
choose/5*Implements the load distribution strategies if given a cluster.
choose_1_test/1*
choose_n_test/1*
device_call_from_singleton_test/0*
dynamic_route_provider_test/0*
dynamic_router_test/0*Example of a Lua module being used as the route_provider for a HyperBEAM node.
dynamic_routing_by_performance/0*
dynamic_routing_by_performance_test_/0*Demonstrates routing tables being dynamically created and adjusted according to the real-time performance of nodes.
explicit_route_test/0*
extract_base/2*Extract the base message ID from a request message.
field_distance/2*Calculate the minimum distance between two numbers (either progressing backwards or forwards), assuming a -256-bit field.
find_target_path/2*Find the target path to route for a request message.
generate_hashpaths/1*
generate_nodes/1*
get_routes_test/0*
is_relevant/3is_relevant looks at the relevant_routes paths opt and if any incoming message path matches it will -make the request relevant for preprocessing.
load_routes/1*Load the current routes for the node.
local_dynamic_router_test/0*Example of a Lua script being used as the route_provider for a +256-bit field.
find_target_path/2*Find the target path to route for a request message.
generate_hashpaths/1*
generate_nodes/1*
get_routes_test/0*
info/1Exported function for getting device info, controls which functions are +exposed via the device API.
info/3HTTP info response providing information about this device.
load_routes/1*Load the current routes for the node.
local_dynamic_router_test/0*Example of a Lua module being used as the route_provider for a HyperBEAM node.
local_process_route_provider_test/0*
lowest_distance/1*Find the node with the lowest distance to the given hashpath.
lowest_distance/2*
match/3Find the first matching template in a list of known routes.
match_routes/3*
match_routes/4*
preprocess/3Preprocess a request to check if it should be relayed to a different node.
register/3
relay_nearest_test/0*
route/2Find the appropriate route for the given message.
route/3
route_provider_test/0*
route_regex_matches_test/0*
route_template_message_matches_test/0*
routes/3Device function that returns all known routes.
simulate/4*
simulation_distribution/2*
simulation_occurences/2*
strategy_suite_test_/0*
template_matches/3*Check if a message matches a message template or path regex.
unique_nodes/1*
unique_test/1*
weighted_random_strategy_test/0*
within_norms/3*
@@ -137,8 +137,8 @@ Implements the load distribution strategies if given a cluster. `dynamic_router_test() -> any()` -Example of a Lua script being used as the `route_provider` for a -HyperBEAM node. The script utilized in this example dynamically adjusts the +Example of a Lua module being used as the `route_provider` for a +HyperBEAM node. The module utilized in this example dynamically adjusts the likelihood of routing to a given node, depending upon price and performance. also include preprocessing support for routing @@ -210,14 +210,22 @@ Find the target path to route for a request message. `get_routes_test() -> any()` - + -### is_relevant/3 ### +### info/1 ### -`is_relevant(Msg1, Msg2, Opts) -> any()` +`info(X1) -> any()` -is_relevant looks at the relevant_routes paths opt and if any incoming message path matches it will -make the request relevant for preprocessing. +Exported function for getting device info, controls which functions are +exposed via the device API. + + + +### info/3 ### + +`info(Msg1, Msg2, Opts) -> any()` + +HTTP info response providing information about this device @@ -235,8 +243,8 @@ the node message's `routes` key, or dynamic routes generated by resolving the `local_dynamic_router_test() -> any()` -Example of a Lua script being used as the `route_provider` for a -HyperBEAM node. The script utilized in this example dynamically adjusts the +Example of a Lua module being used as the `route_provider` for a +HyperBEAM node. The module utilized in this example dynamically adjusts the likelihood of routing to a given node, depending upon price and performance. diff --git a/docs/resources/source-code/dev_scheduler.md b/docs/resources/source-code/dev_scheduler.md index f6fc4771c..e9a1cb10d 100644 --- a/docs/resources/source-code/dev_scheduler.md +++ b/docs/resources/source-code/dev_scheduler.md @@ -28,12 +28,13 @@ Process: `#{ id, Scheduler: #{ Authority } }`
benchmark_suite/2*
benchmark_suite_test_/0*
cache_remote_schedule/2*Cache a schedule received from a remote scheduler.
check_lookahead_and_local_cache/4*Check if we have a result from a lookahead worker or from our local cache.
checkpoint/1Returns the current state of the scheduler.
do_get_remote_schedule/6*Get a schedule from a remote scheduler, unless we already have already read all of the assignments from the local cache.
do_post_schedule/4*Post schedule the message.
filter_json_assignments/3*Filter JSON assignment results from a remote legacy scheduler.
find_message_to_schedule/3*Search the given base and request message pair to find the message to -schedule.
find_remote_scheduler/3*Use the SchedulerLocation to the remote path and return a redirect.
find_server/3*Locate the correct scheduling server for a given process.
find_server/4*
find_target_id/3*Find the schedule ID from a given request.
generate_local_schedule/5*Generate a GET /schedule response for a process.
generate_redirect/3*Generate a redirect message to a scheduler.
get_hint/2*If a hint is present in the string, return it.
get_local_assignments/4*Get the assignments for a process, and whether the request was truncated.
get_local_schedule_test/0*
get_remote_schedule/5*Get a schedule from a remote scheduler, but first read all of the +schedule.
find_remote_scheduler/3*Use the SchedulerLocation to the remote path and return a redirect.
find_server/3*Locate the correct scheduling server for a given process.
find_server/4*
find_target_id/3*Find the schedule ID from a given request.
generate_local_schedule/5*Generate a GET /schedule response for a process.
generate_redirect/3*Generate a redirect message to a scheduler.
get_hint/2*If a hint is present in the string, return it.
get_local_assignments/4*Get the assignments for a process, and whether the request was truncated.
get_local_schedule_test/0*
get_location/3*Search for the location of the scheduler in the scheduler-location +cache.
get_remote_schedule/5*Get a schedule from a remote scheduler, but first read all of the assignments from the local cache that we already know about.
get_schedule/3*Generate and return a schedule for a process, optionally between two slots -- labelled as from and to.
http_get_json_schedule_test_/0*
http_get_legacy_schedule_as_aos2_test_/0*
http_get_legacy_schedule_slot_range_test_/0*
http_get_legacy_schedule_test_/0*
http_get_legacy_slot_test_/0*
http_get_schedule/4*
http_get_schedule/5*
http_get_schedule_redirect_test/0*
http_get_schedule_test_/0*
http_get_slot/2*
http_init/0*
http_init/1*
http_post_legacy_schedule_test_/0*
http_post_schedule_sign/4*
http_post_schedule_test/0*
info/0This device uses a default_handler to route requests to the correct -function.
many_clients/1*
message_cached_assignments/2*Non-device exported helper to get the cached assignments held in a +function.
location/3Router for record requests.
many_clients/1*
message_cached_assignments/2*Non-device exported helper to get the cached assignments held in a process.
next/3Load the schedule for a process into the cache, then return the next -assignment.
node_from_redirect/2*Get the node URL from a redirect.
post_legacy_schedule/4*
post_remote_schedule/4*
post_schedule/3*Schedules a new message on the SU.
read_local_assignments/4*Get the assignments for a process.
redirect_from_graphql_test/0*
redirect_to_hint_test/0*
register/3Generate a new scheduler location record and register it.
register_new_process_test/0*
register_scheduler_test/0*
remote_slot/3*Get the current slot from a remote scheduler.
remote_slot/4*Get the current slot from a remote scheduler, based on the variant of +assignment.
node_from_redirect/2*Get the node URL from a redirect.
post_legacy_schedule/4*
post_location/3*Generate a new scheduler location record and register it.
post_remote_schedule/4*
post_schedule/3*Schedules a new message on the SU.
read_local_assignments/4*Get the assignments for a process.
redirect_from_graphql_test/0*
redirect_to_hint_test/0*
register_location_on_boot_test/0*Test that a scheduler location is registered on boot.
register_new_process_test/0*
register_scheduler_test/0*
remote_slot/3*Get the current slot from a remote scheduler.
remote_slot/4*Get the current slot from a remote scheduler, based on the variant of the process's scheduler.
router/4The default handler for the scheduler device.
schedule/3A router for choosing between getting the existing schedule, or scheduling a new message.
schedule_message_and_get_slot_test/0*
single_resolution/1*
slot/3Returns information about the current slot for a process.
spawn_lookahead_worker/3*Spawn a new Erlang process to fetch the next assignments from the local cache, if we have them available.
start/0Helper to ensure that the environment is started.
status/3Returns information about the entire scheduler.
status_test/0*
test_process/0Generate a _transformed_ process message, not as they are generated @@ -198,6 +199,17 @@ Get the assignments for a process, and whether the request was truncated. `get_local_schedule_test() -> any()` + + +### get_location/3 * ### + +`get_location(Msg1, Req, Opts) -> any()` + +Search for the location of the scheduler in the scheduler-location +cache. If an address is provided, we search for the location of that +specific scheduler. Otherwise, we return the location record for the current +node's scheduler, if it has been established. + ### get_remote_schedule/5 * ### @@ -316,6 +328,14 @@ we redirect to the remote scheduler or proxy based on the node opts. This device uses a default_handler to route requests to the correct function. + + +### location/3 ### + +`location(Msg1, Msg2, Opts) -> any()` + +Router for `record` requests. Expects either a `POST` or `GET` request. + ### many_clients/1 * ### @@ -356,6 +376,15 @@ Get the node URL from a redirect. `post_legacy_schedule(ProcID, OnlyCommitted, Node, Opts) -> any()` + + +### post_location/3 * ### + +`post_location(Msg1, RawReq, Opts) -> any()` + +Generate a new scheduler location record and register it. We both send +the new scheduler-location to the given registry, and return it to the caller. + ### post_remote_schedule/4 * ### @@ -392,14 +421,13 @@ Get the assignments for a process. `redirect_to_hint_test() -> any()` - + -### register/3 ### +### register_location_on_boot_test/0 * ### -`register(Msg1, Req, Opts) -> any()` +`register_location_on_boot_test() -> any()` -Generate a new scheduler location record and register it. We both send -the new scheduler-location to the given registry, and return it to the caller. +Test that a scheduler location is registered on boot. diff --git a/docs/resources/source-code/dev_scheduler_cache.md b/docs/resources/source-code/dev_scheduler_cache.md index 9ccbafa18..2ddd9866a 100644 --- a/docs/resources/source-code/dev_scheduler_cache.md +++ b/docs/resources/source-code/dev_scheduler_cache.md @@ -59,7 +59,7 @@ Write an assignment message into the cache. ### write_location/2 ### -`write_location(LocationMsg, Opts) -> any()` +`write_location(LocMsg, Opts) -> any()` Write the latest known scheduler location for an address. diff --git a/docs/resources/source-code/dev_simple_pay.md b/docs/resources/source-code/dev_simple_pay.md index cbf8d6ae5..b65230680 100644 --- a/docs/resources/source-code/dev_simple_pay.md +++ b/docs/resources/source-code/dev_simple_pay.md @@ -19,7 +19,7 @@ definition.
balance/3Get the balance of a user in the ledger.
debit/3Preprocess a request by checking the ledger and charging the user.
estimate/3Estimate the cost of a request by counting the number of messages in -the request, then multiplying by the per-message price.
get_balance/2*Get the balance of a user in the ledger.
get_balance_and_top_up_test/0*
is_operator/2*Check if the request is from the operator.
set_balance/3*Adjust a user's balance, normalizing their wallet ID first.
test_opts/0*
test_opts/1*
topup/3Top up the user's balance in the ledger.
+the request, then multiplying by the per-message price.
get_balance/2*Get the balance of a user in the ledger.
get_balance_and_top_up_test/0*
is_operator/2*Check if the request is from the operator.
set_balance/3*Adjust a user's balance, normalizing their wallet ID first.
test_opts/1*
topup/3Top up the user's balance in the ledger.
@@ -84,12 +84,6 @@ Check if the request is from the operator. Adjust a user's balance, normalizing their wallet ID first. - - -### test_opts/0 * ### - -`test_opts() -> any()` - ### test_opts/1 * ### diff --git a/docs/resources/source-code/dev_snp.md b/docs/resources/source-code/dev_snp.md index b7821bd2a..9eb969850 100644 --- a/docs/resources/source-code/dev_snp.md +++ b/docs/resources/source-code/dev_snp.md @@ -14,8 +14,8 @@ as well as generating them, if called in an appropriate environment.
execute_is_trusted/3*Ensure that all of the software hashes are trusted.
generate/3Generate an commitment report and emit it as a message, including all of the necessary data to generate the nonce (ephemeral node address + node message ID), as well as the expected measurement (firmware, kernel, and VMSAs -hashes).
generate_nonce/2*Generate the nonce to use in the commitment report.
init/3Should take in options to set for the device such as kernel, initrd, firmware, -and append hashes and make them available to the device.
is_debug/1*Ensure that the node's debug policy is disabled.
real_node_test/0*
report_data_matches/3*Ensure that the report data matches the expected report data.
trusted/3Default implementation of a resolver for trusted software.
verify/3Verify an commitment report message; validating the identity of a +hashes).
generate_nonce/2*Generate the nonce to use in the commitment report.
is_debug/1*Ensure that the node's debug policy is disabled.
real_node_test/0*
report_data_matches/3*Ensure that the report data matches the expected report data.
trusted/3Validates if a given message parameter matches a trusted value from the SNP trusted list +Returns {ok, true} if the message is trusted, {ok, false} otherwise.
verify/3Verify an commitment report message; validating the identity of a remote node, its ephemeral private address, and the integrity of the report.
@@ -52,17 +52,6 @@ hashes). Generate the nonce to use in the commitment report. - - -### init/3 ### - -`init(M1, M2, Opts) -> any()` - -Should take in options to set for the device such as kernel, initrd, firmware, -and append hashes and make them available to the device. Only runnable once, -and only if the operator is not set to an address (and thus, the node has not -had any priviledged access). - ### is_debug/1 * ### @@ -91,9 +80,8 @@ Ensure that the report data matches the expected report data. `trusted(Msg1, Msg2, NodeOpts) -> any()` -Default implementation of a resolver for trusted software. Searches the -`trusted` key in the base message for a list of trusted values, and checks -if the value in the request message is a member of that list. +Validates if a given message parameter matches a trusted value from the SNP trusted list +Returns {ok, true} if the message is trusted, {ok, false} otherwise diff --git a/docs/resources/source-code/dev_test.md b/docs/resources/source-code/dev_test.md index 38835cba6..854ac1d7d 100644 --- a/docs/resources/source-code/dev_test.md +++ b/docs/resources/source-code/dev_test.md @@ -9,10 +9,10 @@
compute/3Example implementation of a compute handler.
compute_test/0*
delay/3Does nothing, just sleeps Req/duration or 750 ms and returns the -appropriate form in order to be used as preprocessor.
device_with_function_key_module_test/0*Tests the resolution of a default function.
increment_counter/3Find a test worker's PID and send it an increment message.
info/1Exports a default_handler function that can be used to test the +appropriate form in order to be used as a hook.
device_with_function_key_module_test/0*Tests the resolution of a default function.
increment_counter/3Find a test worker's PID and send it an increment message.
info/1Exports a default_handler function that can be used to test the handler resolution mechanism.
info/3Exports a default_handler function that can be used to test the handler resolution mechanism.
init/3Example init/3 handler.
mul/2Example implementation of an imported function for a WASM -executor.
postprocess/3Set the postprocessor-called key to true in the HTTP server.
restore/3Example restore/3 handler.
restore_test/0*
snapshot/3Do nothing when asked to snapshot.
test_func/1
update_state/3Find a test worker's PID and send it an update message.
+executor.
restore/3Example restore/3 handler.
restore_test/0*
snapshot/3Do nothing when asked to snapshot.
test_func/1
update_state/3Find a test worker's PID and send it an update message.
@@ -42,7 +42,7 @@ slot number in the results key. `delay(Msg1, Req, Opts) -> any()` Does nothing, just sleeps `Req/duration or 750` ms and returns the -appropriate form in order to be used as preprocessor. +appropriate form in order to be used as a hook. @@ -95,14 +95,6 @@ Example `init/3` handler. Sets the `Already-Seen` key to an empty list. Example implementation of an `imported` function for a WASM executor. - - -### postprocess/3 ### - -`postprocess(Msg, X2, Opts) -> any()` - -Set the `postprocessor-called` key to true in the HTTP server. - ### restore/3 ### diff --git a/docs/resources/source-code/dev_volume.md b/docs/resources/source-code/dev_volume.md new file mode 100644 index 000000000..210df23d9 --- /dev/null +++ b/docs/resources/source-code/dev_volume.md @@ -0,0 +1,271 @@ +# [Module dev_volume.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/dev_volume.erl) + + + + +Secure Volume Management for HyperBEAM Nodes. + + + +## Description ## + +This module handles encrypted storage operations for HyperBEAM, providing +a robust and secure approach to data persistence. It manages the complete +lifecycle of encrypted volumes from detection to creation, formatting, and +mounting. + +Key responsibilities: +- Volume detection and initialization +- Encrypted partition creation and formatting +- Secure mounting using cryptographic keys +- Store path reconfiguration to use mounted volumes +- Automatic handling of various system states +(new device, existing partition, etc.) + +The primary entry point is the `mount/3` function, which orchestrates the +entire process based on the provided configuration parameters. This module +works alongside `hb_volume` which provides the low-level operations for +device manipulation. + +Security considerations: +- Ensures data at rest is protected through LUKS encryption +- Provides proper volume sanitization and secure mounting +- IMPORTANT: This module only applies configuration set in node options and +does NOT accept disk operations via HTTP requests. It cannot format arbitrary +disks as all operations are safeguarded by host operating system permissions +enforced upon the HyperBEAM environment. + +## Function Index ## + + +
check_base_device/8*Check if the base device exists and if it does, check if the partition exists.
check_partition/8*Check if the partition exists.
create_and_mount_partition/8*Create, format and mount a new partition.
decrypt_volume_key/2*Decrypts an encrypted volume key using the node's private key.
format_and_mount/6*Format and mount a newly created partition.
info/1Exported function for getting device info, controls which functions are +exposed via the device API.
info/3HTTP info response providing information about this device.
mount/3Handles the complete process of secure encrypted volume mounting.
mount_existing_partition/6*Mount an existing partition.
mount_formatted_partition/6*Mount a newly formatted partition.
public_key/3Returns the node's public key for secure key exchange.
update_node_config/2*Update the node's configuration with the new store.
update_store_path/2*Update the store path to use the mounted volume.
+ + + + +## Function Details ## + + + +### check_base_device/8 * ### + +

+check_base_device(Device::term(), Partition::term(), PartitionType::term(), VolumeName::term(), MountPoint::term(), StorePath::term(), Key::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Device`: The base device to check.
`Partition`: The partition to check.
`PartitionType`: The type of partition to check.
`VolumeName`: The name of the volume to check.
`MountPoint`: The mount point to check.
`StorePath`: The store path to check.
`Key`: The key to check.
`Opts`: The options to check.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Check if the base device exists and if it does, check if the partition exists. + + + +### check_partition/8 * ### + +

+check_partition(Device::term(), Partition::term(), PartitionType::term(), VolumeName::term(), MountPoint::term(), StorePath::term(), Key::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Device`: The base device to check.
`Partition`: The partition to check.
`PartitionType`: The type of partition to check.
`VolumeName`: The name of the volume to check.
`MountPoint`: The mount point to check.
`StorePath`: The store path to check.
`Key`: The key to check.
`Opts`: The options to check.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Check if the partition exists. If it does, attempt to mount it. +If it doesn't exist, create it, format it with encryption and mount it. + + + +### create_and_mount_partition/8 * ### + +

+create_and_mount_partition(Device::term(), Partition::term(), PartitionType::term(), Key::term(), MountPoint::term(), VolumeName::term(), StorePath::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Device`: The device to create the partition on.
`Partition`: The partition to create.
`PartitionType`: The type of partition to create.
`Key`: The key to create the partition with.
`MountPoint`: The mount point to mount the partition to.
`VolumeName`: The name of the volume to mount.
`StorePath`: The store path to mount.
`Opts`: The options to mount.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Create, format and mount a new partition. + + + +### decrypt_volume_key/2 * ### + +

+decrypt_volume_key(EncryptedKeyBase64::binary(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Opts`: A map of configuration options.
+ +returns: `{ok, DecryptedKey}` on successful decryption, or +`{error, Binary}` if decryption fails. + +Decrypts an encrypted volume key using the node's private key. + +This function takes an encrypted key (typically sent by a client who encrypted +it with the node's public key) and decrypts it using the node's private RSA key. + + + +### format_and_mount/6 * ### + +

+format_and_mount(Partition::term(), Key::term(), MountPoint::term(), VolumeName::term(), StorePath::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Partition`: The partition to format and mount.
`Key`: The key to format and mount the partition with.
`MountPoint`: The mount point to mount the partition to.
`VolumeName`: The name of the volume to mount.
`StorePath`: The store path to mount.
`Opts`: The options to mount.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Format and mount a newly created partition. + + + +### info/1 ### + +`info(X1) -> any()` + +Exported function for getting device info, controls which functions are +exposed via the device API. + + + +### info/3 ### + +`info(Msg1, Msg2, Opts) -> any()` + +HTTP info response providing information about this device + + + +### mount/3 ### + +

+mount(M1::term(), M2::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`M1`: Base message for context.
`M2`: Request message with operation details.
`Opts`: A map of configuration options for volume operations.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Handles the complete process of secure encrypted volume mounting. + +This function performs the following operations depending on the state: +1. Validates the encryption key is present +2. Checks if the base device exists +3. Checks if the partition exists on the device +4. If the partition exists, attempts to mount it +5. If the partition doesn't exist, creates it, formats it with encryption +and mounts it +6. Updates the node's store configuration to use the mounted volume + +Config options in Opts map: +- volume_key: (Required) The encryption key +- volume_device: Base device path +- volume_partition: Partition path +- volume_partition_type: Filesystem type +- volume_name: Name for encrypted volume +- volume_mount_point: Where to mount +- volume_store_path: Store path on volume + + + +### mount_existing_partition/6 * ### + +

+mount_existing_partition(Partition::term(), Key::term(), MountPoint::term(), VolumeName::term(), StorePath::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Partition`: The partition to mount.
`Key`: The key to mount.
`MountPoint`: The mount point to mount.
`VolumeName`: The name of the volume to mount.
`StorePath`: The store path to mount.
`Opts`: The options to mount.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Mount an existing partition. + + + +### mount_formatted_partition/6 * ### + +

+mount_formatted_partition(Partition::term(), Key::term(), MountPoint::term(), VolumeName::term(), StorePath::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`Partition`: The partition to mount.
`Key`: The key to mount the partition with.
`MountPoint`: The mount point to mount the partition to.
`VolumeName`: The name of the volume to mount.
`StorePath`: The store path to mount.
`Opts`: The options to mount.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Mount a newly formatted partition. + + + +### public_key/3 ### + +

+public_key(M1::term(), M2::term(), Opts::map()) -> {ok, map()} | {error, binary()}
+
+
+ +`Opts`: A map of configuration options.
+ +returns: `{ok, Map}` containing the node's public key on success, or +`{error, Binary}` if the node's wallet is not available. + +Returns the node's public key for secure key exchange. + +This function retrieves the node's wallet and extracts the public key +for encryption purposes. It allows users to securely exchange encryption keys +by first encrypting their volume key with the node's public key. + +The process ensures that sensitive keys are never transmitted in plaintext. +The encrypted key can then be securely sent to the node, which will decrypt it +using its private key before using it for volume encryption. + + + +### update_node_config/2 * ### + +

+update_node_config(NewStore::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`NewStore`: The new store to update the node's configuration with.
`Opts`: The options to update the node's configuration with.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Update the node's configuration with the new store. + + + +### update_store_path/2 * ### + +

+update_store_path(StorePath::term(), Opts::map()) -> {ok, binary()} | {error, binary()}
+
+
+ +`StorePath`: The store path to update.
`Opts`: The options to update.
+ +returns: `{ok, Binary}` on success with operation result message, or +`{error, Binary}` on failure with error message. + +Update the store path to use the mounted volume. + diff --git a/docs/resources/source-code/edoc-info b/docs/resources/source-code/edoc-info index 6553a7347..1163d1833 100644 --- a/docs/resources/source-code/edoc-info +++ b/docs/resources/source-code/edoc-info @@ -5,20 +5,20 @@ dev_codec_httpsig,dev_codec_httpsig_conv,dev_codec_json, dev_codec_structured,dev_cron,dev_cu,dev_dedup, dev_delegated_compute,dev_faff,dev_genesis_wasm,dev_green_zone, - dev_hyperbuddy,dev_json_iface,dev_local_name,dev_lookup,dev_lua, - dev_lua_lib,dev_lua_test,dev_manifest,dev_message,dev_meta, + dev_hook,dev_hyperbuddy,dev_json_iface,dev_local_name,dev_lookup, + dev_lua,dev_lua_lib,dev_lua_test,dev_manifest,dev_message,dev_meta, dev_monitor,dev_multipass,dev_name,dev_node_process,dev_p4, dev_patch,dev_poda,dev_process,dev_process_cache,dev_process_worker, dev_push,dev_relay,dev_router,dev_scheduler,dev_scheduler_cache, dev_scheduler_formats,dev_scheduler_registry,dev_scheduler_server, - dev_simple_pay,dev_snp,dev_snp_nif,dev_stack,dev_test,dev_wasi, - dev_wasm,hb,hb_ao,hb_ao_test_vectors,hb_app,hb_beamr,hb_beamr_io, - hb_cache,hb_cache_control,hb_cache_render,hb_client,hb_crypto, - hb_debugger,hb_escape,hb_event,hb_examples,hb_features, + dev_simple_pay,dev_snp,dev_snp_nif,dev_stack,dev_test,dev_volume, + dev_wasi,dev_wasm,hb,hb_ao,hb_ao_test_vectors,hb_app,hb_beamr, + hb_beamr_io,hb_cache,hb_cache_control,hb_cache_render,hb_client, + hb_crypto,hb_debugger,hb_escape,hb_event,hb_examples,hb_features, hb_gateway_client,hb_http,hb_http_benchmark_tests,hb_http_client, - hb_http_client_sup,hb_http_server,hb_json,hb_logger,hb_message, - hb_metrics_collector,hb_name,hb_opts,hb_path,hb_persistent, - hb_private,hb_process_monitor,hb_router,hb_singleton,hb_store, - hb_store_fs,hb_store_gateway,hb_store_remote_node,hb_store_rocksdb, - hb_structured_fields,hb_sup,hb_test_utils,hb_tracer,hb_util, - hb_volume,rsa_pss]}. + hb_http_client_sup,hb_http_server,hb_json,hb_keccak,hb_logger, + hb_message,hb_metrics_collector,hb_name,hb_opts,hb_path, + hb_persistent,hb_private,hb_process_monitor,hb_router,hb_singleton, + hb_store,hb_store_fs,hb_store_gateway,hb_store_remote_node, + hb_store_rocksdb,hb_structured_fields,hb_sup,hb_test_utils, + hb_tracer,hb_util,hb_volume,rsa_pss]}. diff --git a/docs/resources/source-code/hb_ao.md b/docs/resources/source-code/hb_ao.md index 329864dcf..a01a66fb5 100644 --- a/docs/resources/source-code/hb_ao.md +++ b/docs/resources/source-code/hb_ao.md @@ -95,7 +95,7 @@ HyperBEAM device implementations are defined as follows:
deep_set/4Recursively search a map, resolving keys, and set the value of the key at the given path.
default_module/0*The default device is the identity device, which simply returns the -value associated with any key as it exists in its Erlang map.
device_set/4*Call the device's set function.
do_resolve_many/2*
ensure_loaded/2*Ensure that the message is loaded from the cache if it is an ID.
error_execution/5*Handle an error in a device call.
error_infinite/3*Catch all return if we are in an infinite loop.
error_invalid_intermediate_status/5*
error_invalid_message/3*Catch all return if the message is invalid.
find_exported_function/5Find the function with the highest arity that has the given name, if it +value associated with any key as it exists in its Erlang map.
device_set/4*Call the device's set function.
device_set/5*
do_resolve_many/2*
ensure_loaded/2*Ensure that the message is loaded from the cache if it is an ID.
error_execution/5*Handle an error in a device call.
error_infinite/3*Catch all return if we are in an infinite loop.
error_invalid_intermediate_status/5*
error_invalid_message/3*Catch all return if the message is invalid.
find_exported_function/5Find the function with the highest arity that has the given name, if it exists.
force_message/2
get/2Shortcut for resolving a key in a message without its status if it is ok.
get/3
get/4
get_first/2take a sequence of base messages and paths, then return the value of the first message that can be resolved using a path.
get_first/3
info/2Get the info map for a device, optionally giving it a message if the @@ -118,7 +118,10 @@ actually takes.
@@ -139,6 +142,12 @@ according to the `Message2` passed to it. Call the device's `set` function. + + +### device_set/5 * ### + +`device_set(Msg, Key, Value, Mode, Opts) -> any()` + ### do_resolve_many/2 * ### @@ -430,11 +439,13 @@ The resolver is composed of a series of discrete phases: 4: Persistent-resolver lookup. 5: Device lookup. 6: Execution. -7: Cryptographic linking. -8: Result caching. -9: Notify waiters. -10: Fork worker. -11: Recurse or terminate. +7: Execution of the `step` hook. +8: Subresolution. +9: Cryptographic linking. +10: Result caching. +11: Notify waiters. +12: Fork worker. +13: Recurse or terminate. diff --git a/docs/resources/source-code/hb_ao_test_vectors.md b/docs/resources/source-code/hb_ao_test_vectors.md index f4e179262..967e70f79 100644 --- a/docs/resources/source-code/hb_ao_test_vectors.md +++ b/docs/resources/source-code/hb_ao_test_vectors.md @@ -12,7 +12,7 @@ execution under different circumstances.
as_path_test/1*
basic_get_test/1*
basic_set_test/1*
continue_as_test/1*
deep_recursive_get_test/1*
deep_set_new_messages_test/0*
deep_set_test/1*
deep_set_with_device_test/1*
denormalized_device_key_test/1*
device_excludes_test/1*
device_exports_test/1*
device_with_default_handler_function_test/1*
device_with_handler_function_test/1*
exec_dummy_device/2*Ensure that we can read a device from the cache then execute it.
gen_default_device/0*Create a simple test device that implements the default handler.
gen_handler_device/0*Create a simple test device that implements the handler key.
generate_device_with_keys_using_args/0*Generates a test device with three keys, each of which uses -progressively more of the arguments that can be passed to a device key.
get_as_with_device_test/1*
get_with_device_test/1*
key_from_id_device_with_args_test/1*Test that arguments are passed to a device key as expected.
key_to_binary_test/1*
list_transform_test/1*
load_as_test/1*
load_device_test/0*
recursive_get_test/1*
resolve_binary_key_test/1*
resolve_from_multiple_keys_test/1*
resolve_id_test/1*
resolve_key_twice_test/1*
resolve_path_element_test/1*
resolve_simple_test/1*
run_all_test_/0*Run each test in the file with each set of options.
run_test/0*
set_with_device_test/1*
start_as_test/1*
start_as_with_parameters_test/1*
test_opts/0*
test_suite/0*
untrusted_load_device_test/0*
+progressively more of the arguments that can be passed to a device key.
get_as_with_device_test/1*
get_with_device_test/1*
key_from_id_device_with_args_test/1*Test that arguments are passed to a device key as expected.
key_to_binary_test/1*
list_transform_test/1*
load_as_test/1*
load_device_test/0*
recursive_get_test/1*
resolve_binary_key_test/1*
resolve_from_multiple_keys_test/1*
resolve_id_test/1*
resolve_key_twice_test/1*
resolve_path_element_test/1*
resolve_simple_test/1*
run_all_test_/0*Run each test in the file with each set of options.
run_test/0*
set_with_device_test/1*
start_as_test/1*
start_as_with_parameters_test/1*
step_hook_test/1*
test_opts/0*
test_suite/0*
untrusted_load_device_test/0*
@@ -253,6 +253,12 @@ the store for each test. `start_as_with_parameters_test(Opts) -> any()` + + +### step_hook_test/1 * ### + +`step_hook_test(InitOpts) -> any()` + ### test_opts/0 * ### diff --git a/docs/resources/source-code/hb_cache_render.md b/docs/resources/source-code/hb_cache_render.md index 0a543f115..dd12d54f2 100644 --- a/docs/resources/source-code/hb_cache_render.md +++ b/docs/resources/source-code/hb_cache_render.md @@ -10,7 +10,7 @@ A module that helps to render given Key graphs into the .dot files. ## Function Index ## -
add_arc/4*Add an arc to the graph.
add_node/3*Add a node to the graph.
cache_path_to_dot/2Generate a dot file from a cache path and options/store.
cache_path_to_dot/3
cache_path_to_graph/3*Main function to collect graph elements.
collect_output/2*Helper function to collect output from port.
dot_to_svg/1Convert a dot graph to SVG format.
extract_label/1*Extract a label from a path.
graph_to_dot/1*Generate the DOT file from the graph.
prepare_deeply_nested_complex_message/0
prepare_signed_data/0
prepare_unsigned_data/0
process_composite_node/6*Process a composite (directory) node.
process_simple_node/6*Process a simple (leaf) node.
render/1Render the given Key into svg.
render/2
test_signed/2*
test_unsigned/1*
traverse_store/4*Traverse the store recursively to build the graph.
+
add_arc/4*Add an arc to the graph.
add_node/3*Add a node to the graph.
cache_path_to_dot/2Generate a dot file from a cache path and options/store.
cache_path_to_dot/3
cache_path_to_graph/3Main function to collect graph elements.
collect_output/2*Helper function to collect output from port.
dot_to_svg/1Convert a dot graph to SVG format.
extract_label/1*Extract a label from a path.
get_graph_data/1Get graph data for the Three.js visualization.
get_label/1*Extract a readable label from a path.
get_node_type/1*Convert node color from hb_cache_render to node type for visualization.
graph_to_dot/1*Generate the DOT file from the graph.
prepare_deeply_nested_complex_message/0
prepare_signed_data/0
prepare_unsigned_data/0
process_composite_node/6*Process a composite (directory) node.
process_simple_node/6*Process a simple (leaf) node.
render/1Render the given Key into svg.
render/2
test_signed/2*
test_unsigned/1*
traverse_store/4*Traverse the store recursively to build the graph.
@@ -49,7 +49,7 @@ Generate a dot file from a cache path and options/store -### cache_path_to_graph/3 * ### +### cache_path_to_graph/3 ### `cache_path_to_graph(ToRender, GraphOpts, StoreOrOpts) -> any()` @@ -79,6 +79,30 @@ Convert a dot graph to SVG format Extract a label from a path + + +### get_graph_data/1 ### + +`get_graph_data(Opts) -> any()` + +Get graph data for the Three.js visualization + + + +### get_label/1 * ### + +`get_label(Path) -> any()` + +Extract a readable label from a path + + + +### get_node_type/1 * ### + +`get_node_type(Color) -> any()` + +Convert node color from hb_cache_render to node type for visualization + ### graph_to_dot/1 * ### diff --git a/docs/resources/source-code/hb_http_server.md b/docs/resources/source-code/hb_http_server.md index 90e1ce4fd..946292de2 100644 --- a/docs/resources/source-code/hb_http_server.md +++ b/docs/resources/source-code/hb_http_server.md @@ -23,8 +23,9 @@ the execution parameters of all downstream requests to be controlled.allowed_methods/2Return the list of allowed methods for the HTTP server.cors_reply/2*Reply to CORS preflight requests.get_opts/1handle_request/3*Handle all non-CORS preflight requests as AO-Core requests.http3_conn_sup_loop/0*init/2Entrypoint for all HTTP requests.new_server/1*read_body/1*Helper to grab the full body of a HTTP request, even if it's chunked.read_body/2*set_default_opts/1set_opts/1Update the Opts map that the HTTP server uses for all future -requests.start/0Starts the HTTP server.start/1start_http2/3*start_http3/3*start_node/0Test that we can start the server, send a message, and get a response.start_node/1 +
allowed_methods/2Return the list of allowed methods for the HTTP server.
cors_reply/2*Reply to CORS preflight requests.
get_opts/1
handle_request/3*Handle all non-CORS preflight requests as AO-Core requests.
http3_conn_sup_loop/0*
init/2Entrypoint for all HTTP requests.
new_server/1*Trigger the creation of a new HTTP server node.
read_body/1*Helper to grab the full body of a HTTP request, even if it's chunked.
read_body/2*
set_default_opts/1
set_node_opts_test/0*Ensure that the start hook can be used to modify the node options.
set_opts/1Merges the provided Opts with uncommitted values from Request, +preserves the http_server value, and updates node_history by prepending +the Request.
set_opts/2
start/0Starts the HTTP server.
start/1
start_http2/3*
start_http3/3*
start_node/0Test that we can start the server, send a message, and get a response.
start_node/1
@@ -85,6 +86,12 @@ the server ID, which can be used to lookup the node message. `new_server(RawNodeMsg) -> any()` +Trigger the creation of a new HTTP server node. Accepts a `NodeMsg` +message, which is used to configure the server. This function executed the +`start` hook on the node, giving it the opportunity to modify the `NodeMsg` +before it is used to configure the server. The `start` hook expects gives and +expects the node message to be in the `body` key. + ### read_body/1 * ### @@ -105,14 +112,34 @@ Helper to grab the full body of a HTTP request, even if it's chunked. `set_default_opts(Opts) -> any()` + + +### set_node_opts_test/0 * ### + +`set_node_opts_test() -> any()` + +Ensure that the `start` hook can be used to modify the node options. We +do this by creating a message with a device that has a `start` key. This +key takes the message's body (the anticipated node options) and returns a +modified version of that body, which will be used to configure the node. We +then check that the node options were modified as we expected. + ### set_opts/1 ### `set_opts(Opts) -> any()` -Update the `Opts` map that the HTTP server uses for all future -requests. +Merges the provided `Opts` with uncommitted values from `Request`, +preserves the http_server value, and updates node_history by prepending +the `Request`. If a server reference exists, updates the Cowboy environment +variable 'node_msg' with the resulting options map. + + + +### set_opts/2 ### + +`set_opts(Request, Opts) -> any()` diff --git a/docs/resources/source-code/hb_keccak.md b/docs/resources/source-code/hb_keccak.md new file mode 100644 index 000000000..ec3d4f0bb --- /dev/null +++ b/docs/resources/source-code/hb_keccak.md @@ -0,0 +1,77 @@ +# [Module hb_keccak.erl](https://github.com/permaweb/HyperBEAM/blob/main/src/hb_keccak.erl) + + + + + + +## Function Index ## + + +
hash_to_checksum_address/2*
init/0*
keccak_256/1
keccak_256_key_test/0*
keccak_256_key_to_address_test/0*
keccak_256_test/0*
key_to_ethereum_address/1
sha3_256/1
sha3_256_test/0*
to_hex/1*
+ + + + +## Function Details ## + + + +### hash_to_checksum_address/2 * ### + +`hash_to_checksum_address(Last40, Hash) -> any()` + + + +### init/0 * ### + +`init() -> any()` + + + +### keccak_256/1 ### + +`keccak_256(Bin) -> any()` + + + +### keccak_256_key_test/0 * ### + +`keccak_256_key_test() -> any()` + + + +### keccak_256_key_to_address_test/0 * ### + +`keccak_256_key_to_address_test() -> any()` + + + +### keccak_256_test/0 * ### + +`keccak_256_test() -> any()` + + + +### key_to_ethereum_address/1 ### + +`key_to_ethereum_address(Key) -> any()` + + + +### sha3_256/1 ### + +`sha3_256(Bin) -> any()` + + + +### sha3_256_test/0 * ### + +`sha3_256_test() -> any()` + + + +### to_hex/1 * ### + +`to_hex(Bin) -> any()` + diff --git a/docs/resources/source-code/hb_opts.md b/docs/resources/source-code/hb_opts.md index 63c395bc7..fcb0bea22 100644 --- a/docs/resources/source-code/hb_opts.md +++ b/docs/resources/source-code/hb_opts.md @@ -28,10 +28,10 @@ with a refusal to execute.
cached_os_env/2*Cache the result of os:getenv/1 in the process dictionary, as it never -changes during the lifetime of a node.
config_lookup/2*An abstraction for looking up configuration variables.
default_message/0The default configuration options of the hyperbeam node.
get/1Get an option from the global options, optionally overriding with a +changes during the lifetime of a node.
check_required_opts/2Utility function to check for required options in a list.
config_lookup/2*An abstraction for looking up configuration variables.
default_message/0The default configuration options of the hyperbeam node.
get/1Get an option from the global options, optionally overriding with a local Opts map if prefer or only is set to local.
get/2
get/3
global_get/2*Get an environment variable or configuration key.
load/1Parse a flat@1.0 encoded file into a map, matching the types of the keys to those in the default message.
load_bin/1
mimic_default_types/2Mimic the types of the default message for a given map.
normalize_default/1*Get an option from environment variables, optionally consulting the -hb_features of the node if a conditional default tuple is provided.
+hb_features of the node if a conditional default tuple is provided.validate_node_history/1Validate that the node_history length is within an acceptable range.validate_node_history/3 @@ -47,6 +47,26 @@ keys to those in the default message. + +### check_required_opts/2 ### + +

+check_required_opts(KeyValuePairs::[{binary(), term()}], Opts::map()) -> {ok, map()} | {error, binary()}
+
+
+ +`KeyValuePairs`: A list of {Name, Value} pairs to check.
`Opts`: The original options map to return if validation succeeds.
+ +returns: `{ok, Opts}` if all required options are present, or +`{error, <<"Missing required parameters: ", MissingOptsStr/binary>>}` +where `MissingOptsStr` is a comma-separated list of missing option names. + +Utility function to check for required options in a list. +Takes a list of {Name, Value} pairs and returns: +- {ok, Opts} when all required options are present (Value =/= not_found) +- {error, ErrorMsg} with a message listing all missing options when any are not_found + ### config_lookup/2 * ### @@ -131,3 +151,17 @@ Mimic the types of the default message for a given map. Get an option from environment variables, optionally consulting the `hb_features` of the node if a conditional default tuple is provided. + + +### validate_node_history/1 ### + +`validate_node_history(Opts) -> any()` + +Validate that the node_history length is within an acceptable range. + + + +### validate_node_history/3 ### + +`validate_node_history(Opts, MinLength, MaxLength) -> any()` + diff --git a/docs/resources/source-code/hb_singleton.md b/docs/resources/source-code/hb_singleton.md index 50c06999e..652f2b669 100644 --- a/docs/resources/source-code/hb_singleton.md +++ b/docs/resources/source-code/hb_singleton.md @@ -2,7 +2,6 @@ -* [Data Types](#types) A parser that translates AO-Core HTTP API requests in TABM format into an ordered list of messages to evaluate. @@ -73,10 +72,10 @@ tabm_message() = map() ## Function Index ## -
all_path_parts/2*Extract all of the parts from the binary, given (a list of) separators.
append_path/2*
apply_types/1*Step 3: Apply types to values and remove specifiers.
basic_hashpath_test/0*
basic_hashpath_to_test/0*
build_messages/2*Step 5: Merge the base message with the scoped messages.
decode_string/1*Attempt Cowboy URL decode, then sanitize the result.
do_build/3*
from/1Normalize a singleton TABM message into a list of executable AO-Core +
all_path_parts/2*Extract all of the parts from the binary, given (a list of) separators.
append_path/2*
apply_types/1*Step 3: Apply types to values and remove specifiers.
basic_hashpath_test/0*
basic_hashpath_to_test/0*
build/3*
build_messages/2*Step 5: Merge the base message with the scoped messages.
decode_string/1*Attempt Cowboy URL decode, then sanitize the result.
from/1Normalize a singleton TABM message into a list of executable AO-Core messages.
group_scoped/2*Step 4: Group headers/query by N-scope.
inlined_keys_test/0*
inlined_keys_to_test/0*
maybe_join/2*Join a list of items with a separator, or return the first item if there is only one item.
maybe_subpath/1*Check if the string is a subpath, returning it in parsed form, -or the original string with a specifier.
maybe_typed/2*Parse a key's type (applying it to the value) and device name if present.
multiple_inlined_keys_test/0*
multiple_inlined_keys_to_test/0*
multiple_messages_test/0*
multiple_messages_to_test/0*
normalize_base/1*Normalize the base path.
parse_explicit_message_test/0*
parse_full_path/1*Parse the relative reference into path, query, and fragment.
parse_inlined_key_val/1*Extrapolate the inlined key-value pair from a path segment.
parse_part/1*Parse a path part into a message or an ID.
parse_part_mods/2*Parse part modifiers: +or the original string with a specifier.
maybe_typed/2*Parse a key's type (applying it to the value) and device name if present.
multiple_inlined_keys_test/0*
multiple_inlined_keys_to_test/0*
multiple_messages_test/0*
multiple_messages_to_test/0*
normalize_base/1*Normalize the base path.
parse_explicit_message_test/0*
parse_full_path/1*Parse the relative reference into path, query, and fragment.
parse_inlined_key_val/1*Extrapolate the inlined key-value pair from a path segment.
parse_inlined_keys/2*Parse inlined key-value pairs from a path segment.
parse_part/1*Parse a path part into a message or an ID.
parse_part_mods/2*Parse part modifiers: 1.
parse_scope/1*Get the scope of a key.
part/2*Extract the characters from the binary until a separator is found.
part/4*
path_messages/1*Step 2: Decode, split and sanitize the path.
path_parts/2*Split the path into segments, filtering out empty segments and segments that are too long.
path_parts_test/0*
scoped_key_test/0*
scoped_key_to_test/0*
simple_to_test/0*
single_message_test/0*
subpath_in_inlined_test/0*
subpath_in_inlined_to_test/0*
subpath_in_key_test/0*
subpath_in_key_to_test/0*
subpath_in_path_test/0*
subpath_in_path_to_test/0*
to/1Convert a list of AO-Core message into TABM message.
to_suite_test_/0*
type/1*
typed_key_test/0*
typed_key_to_test/0*
@@ -119,6 +118,12 @@ Step 3: Apply types to values and remove specifiers. `basic_hashpath_to_test() -> any()` + + +### build/3 * ### + +`build(I, Rest, ScopedKeys) -> any()` + ### build_messages/2 * ### @@ -135,17 +140,11 @@ Step 5: Merge the base message with the scoped messages. Attempt Cowboy URL decode, then sanitize the result. - - -### do_build/3 * ### - -`do_build(I, Rest, ScopedKeys) -> any()` - ### from/1 ### -`from(RawMsg) -> any()` +`from(Path) -> any()` Normalize a singleton TABM message into a list of executable AO-Core messages. @@ -253,6 +252,15 @@ Extrapolate the inlined key-value pair from a path segment. If the key has a value, it may provide a type (as with typical keys), but if a value is not provided, it is assumed to be a boolean `true`. + + +### parse_inlined_keys/2 * ### + +`parse_inlined_keys(InlinedMsgBin, Msg) -> any()` + +Parse inlined key-value pairs from a path segment. Each key-value pair +is separated by `&` and is of the form `K=V`. + ### parse_part/1 * ### diff --git a/docs/resources/source-code/hb_store_rocksdb.md b/docs/resources/source-code/hb_store_rocksdb.md index 57e2b067e..1da975be5 100644 --- a/docs/resources/source-code/hb_store_rocksdb.md +++ b/docs/resources/source-code/hb_store_rocksdb.md @@ -2,7 +2,6 @@ -* [Data Types](#types) A process wrapper over rocksdb storage. diff --git a/docs/resources/source-code/hb_structured_fields.md b/docs/resources/source-code/hb_structured_fields.md index 977c2ce49..5aa7d74fc 100644 --- a/docs/resources/source-code/hb_structured_fields.md +++ b/docs/resources/source-code/hb_structured_fields.md @@ -2,7 +2,6 @@ -* [Data Types](#types) A module for parsing and converting between Erlang and HTTP Structured Fields, as described in RFC-9651. diff --git a/docs/resources/source-code/hb_util.md b/docs/resources/source-code/hb_util.md index 5700dfb64..711e9acc5 100644 --- a/docs/resources/source-code/hb_util.md +++ b/docs/resources/source-code/hb_util.md @@ -284,7 +284,8 @@ as well as a standard map of HyperBEAM runtime options. `human_id(Bin) -> any()` Convert a native binary ID to a human readable ID. If the ID is already -a human readable ID, it is returned as is. +a human readable ID, it is returned as is. If it is an ethereum address, it +is returned as is. diff --git a/docs/resources/source-code/rsa_pss.md b/docs/resources/source-code/rsa_pss.md index 3d110df8e..afa0543e3 100644 --- a/docs/resources/source-code/rsa_pss.md +++ b/docs/resources/source-code/rsa_pss.md @@ -2,7 +2,6 @@ -* [Data Types](#types) Distributed under the Mozilla Public License v2.0. diff --git a/docs/run/running-a-hyperbeam-node.md b/docs/run/running-a-hyperbeam-node.md index 18b881f07..30f9df072 100644 --- a/docs/run/running-a-hyperbeam-node.md +++ b/docs/run/running-a-hyperbeam-node.md @@ -24,7 +24,7 @@ To successfully build and run a HyperBEAM node, your system needs several softwa ncurses-dev \ libssl-dev \ sudo \ - curl \ + curl ca-certificates ``` @@ -60,14 +60,14 @@ Installation methods: ```bash sudo apt install erlang ``` - + === "Source Build" Download from [erlang.org](https://www.erlang.org/downloads) and follow the build instructions for your platform. @@ -86,13 +86,13 @@ Installation methods: === "Linux / macOS (Direct Download)" Get the `rebar3` binary from the [official website](https://rebar3.org/). Place the downloaded `rebar3` file in your system's `PATH` (e.g., `/usr/local/bin`) and make it executable (`chmod +x rebar3`). -=== "asdf (Recommended)" + ### Node.js diff --git a/docs/theme/templates/base.html b/docs/theme/templates/base.html index 36a17d00e..22c8a5ff4 100644 --- a/docs/theme/templates/base.html +++ b/docs/theme/templates/base.html @@ -241,7 +241,7 @@

@@ -261,7 +261,7 @@

Run your Node

href="/build/get-started-building-on-ao-core.html" > + + + @@ -367,7 +371,7 @@

what-is-hyperbeam-fig @@ -396,7 +400,7 @@

what-is-hyperbeam-fig @@ -420,12 +424,21 @@

-

+

- + + +
+ @@ -475,6 +488,14 @@

+ {% else %} diff --git a/mkdocs.yml b/mkdocs.yml index 708467228..02649fbca 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -25,7 +25,7 @@ nav: - Serverless decentralized compute: 'build/serverless-decentralized-compute.md' - Extending HyperBEAM: 'build/extending-hyperbeam.md' - Devices: - - Overview: 'devices/index.md' + - Overview: 'devices/overview.md' - '~meta@1.0': 'devices/meta-at-1-0.md' - '~process@1.0': 'devices/process-at-1-0.md' - '~message@1.0': 'devices/message-at-1-0.md' @@ -35,8 +35,11 @@ nav: - '~scheduler@1.0': 'devices/scheduler-at-1-0.md' - '~relay@1.0': 'devices/relay-at-1-0.md' - Resources: - - Source Code: - - Overview: 'resources/source-code/index.md' + # - Overview: 'resources/source-code/index.md' + - FAQ: 'resources/reference/faq.md' + - LLMs.txt: 'resources/llms.md' + - Glossary: 'resources/reference/glossary.md' + - Source Code Modules: - Modules: - ar_bundles: 'resources/source-code/ar_bundles.md' - ar_deep_hash: 'resources/source-code/ar_deep_hash.md' @@ -140,11 +143,8 @@ nav: - hb_volume: 'resources/source-code/hb_volume.md' - hb: 'resources/source-code/hb.md' - rsa_pss: 'resources/source-code/rsa_pss.md' - - Reference: - # - Troubleshooting: 'resources/reference/troubleshooting.md' - - Glossary: 'resources/reference/glossary.md' - - FAQ: 'resources/reference/faq.md' - - LLMs.txt: 'resources/llms.md' + # - Troubleshooting: 'resources/reference/troubleshooting.md' + # - Community: # - Contribute Overview: 'community/guidelines.md' # - Development Setup: 'community/setup.md' @@ -203,13 +203,13 @@ theme: - content.code.copy - content.code.select - content.code.annotate - # - navigation.footer + - navigation.footer palette: # Palette toggle for light mode - media: "(prefers-color-scheme: light)" scheme: default primary: white - accent: orange + accent: blue toggle: icon: material/brightness-7 name: Switch to dark mode @@ -217,7 +217,7 @@ theme: - media: "(prefers-color-scheme: dark)" scheme: default primary: white - accent: orange + accent: blue toggle: icon: material/brightness-7 name: Switch to light mode @@ -240,8 +240,8 @@ extra_javascript: - js/parallax.js - js/navigation.js - js/toc-highlight.js - - js/custom-nav.js - js/disable-preload-transition.js + - js/header-scroll.js extra: social: diff --git a/src/dev_green_zone.erl b/src/dev_green_zone.erl index e700a1d33..78609b7bd 100644 --- a/src/dev_green_zone.erl +++ b/src/dev_green_zone.erl @@ -116,8 +116,8 @@ default_zone_required_opts(Opts) -> %% @param _M1 Ignored parameter %% @param _M2 May contain a `required-config' map for custom requirements %% @param Opts A map of configuration options -%% @returns {ok, Binary} on success with confirmation message -%% @returns {error, Binary} on failure with error message +%% @returns `{ok, Binary}' on success with confirmation message, or +%% `{error, Binary}' on failure with error message. -spec init(M1 :: term(), M2 :: term(), Opts :: map()) -> {ok, binary()} | {error, binary()}. init(_M1, _M2, Opts) -> ?event(green_zone, {init, start}), @@ -184,8 +184,8 @@ init(_M1, _M2, Opts) -> %% @param M1 The join request message with target peer information %% @param M2 Additional request details, may include adoption preferences %% @param Opts A map of configuration options for join operations -%% @returns {ok, Map} on success with join response details -%% @returns {error, Binary} on failure with error message +%% @returns `{ok, Map}' on success with join response details, or +%% `{error, Binary}' on failure with error message. -spec join(M1 :: term(), M2 :: term(), Opts :: map()) -> {ok, map()} | {error, binary()}. join(M1, M2, Opts) -> @@ -220,8 +220,8 @@ join(M1, M2, Opts) -> %% @param _M1 Ignored parameter %% @param _M2 Ignored parameter %% @param Opts A map of configuration options -%% @returns {ok, Map} containing the encrypted key and IV on success -%% @returns {error, Binary} if the node is not part of a green zone +%% @returns `{ok, Map}' containing the encrypted key and IV on success, or +%% `{error, Binary}' if the node is not part of a green zone -spec key(M1 :: term(), M2 :: term(), Opts :: map()) -> {ok, map()} | {error, binary()}. key(_M1, _M2, Opts) -> @@ -276,8 +276,8 @@ key(_M1, _M2, Opts) -> %% @param _M1 Ignored parameter %% @param _M2 Ignored parameter %% @param Opts A map of configuration options -%% @returns {ok, Map} on success with confirmation details -%% @returns {error, Binary} if the node is not part of a green zone or +%% @returns `{ok, Map}' on success with confirmation details, or +%% `{error, Binary}' if the node is not part of a green zone or %% identity adoption fails. -spec become(M1 :: term(), M2 :: term(), Opts :: map()) -> {ok, map()} | {error, binary()}. @@ -374,8 +374,8 @@ finalize_become(KeyResp, NodeLocation, NodeID, GreenZoneAES, Opts) -> %% @param _M1 Ignored parameter %% @param M2 May contain ShouldMount flag to enable encrypted volume mounting %% @param InitOpts A map of initial configuration options -%% @returns {ok, Map} on success with confirmation message -%% @returns {error, Map|Binary} on failure with error details +%% @returns `{ok, Map}' on success with confirmation message, or +%% `{error, Map|Binary}' on failure with error details -spec join_peer( PeerLocation :: binary(), PeerID :: binary(), @@ -483,8 +483,8 @@ join_peer(PeerLocation, PeerID, _M1, M2, InitOpts) -> %% @param PeerID The ID of the peer node to join %% @param Req The request message with adoption preferences %% @param InitOpts A map of initial configuration options -%% @returns {ok, Map} with updated configuration on success -%% @returns {error, Binary} if configuration retrieval fails +%% @returns `{ok, Map}' with updated configuration on success, or +%% `{error, Binary}' if configuration retrieval fails -spec maybe_set_zone_opts( PeerLocation :: binary(), PeerID :: binary(), @@ -601,8 +601,8 @@ calculate_node_message(RequiredOpts, Req, BinList) when is_binary(BinList) -> %% @param M1 Ignored parameter %% @param Req The join request containing commitment report and public key %% @param Opts A map of configuration options -%% @returns {ok, Map} on success with encrypted AES key -%% @returns {error, Binary} on failure with error message +%% @returns `{ok, Map}' on success with encrypted AES key, or +%% `{error, Binary}' on failure with error message -spec validate_join(M1 :: term(), Req :: map(), Opts :: map()) -> {ok, map()} | {error, binary()}. validate_join(_M1, Req, Opts) -> diff --git a/src/dev_meta.erl b/src/dev_meta.erl index 7aa1e4980..a0e428fd3 100644 --- a/src/dev_meta.erl +++ b/src/dev_meta.erl @@ -11,7 +11,7 @@ -include("include/hb.hrl"). -include_lib("eunit/include/eunit.hrl"). %%% Include the auto-generated build info header file. --include_lib("_build/hb_buildinfo.hrl"). +-include_lib("../_build/hb_buildinfo.hrl"). %% @doc Ensure that the helper function `adopt_node_message/2' is not exported. %% The naming of this method carefully avoids a clash with the exported `info/3' diff --git a/src/dev_push.erl b/src/dev_push.erl index e9ebe6756..c555eefe9 100644 --- a/src/dev_push.erl +++ b/src/dev_push.erl @@ -13,14 +13,12 @@ %% `slot' key will be pushed. %% %% Optional parameters: -%% ``` -%% /result-depth: The depth to which the full contents of the result +%% `/result-depth': The depth to which the full contents of the result %% will be included in the response. Default: 1, returning %% the full result of the first message, but only the 'tree' %% of downstream messages. -%% /push-mode: Whether or not the push should be done asynchronously. +%% `/push-mode': Whether or not the push should be done asynchronously. %% Default: `sync', pushing synchronously. -%% ``` push(Base, Req, Opts) -> Process = dev_process:as_process(Base, Opts), ?event(push, {push_base, {base, Process}, {req, Req}}, Opts), diff --git a/src/dev_volume.erl b/src/dev_volume.erl index 53c1d73d0..62a0a4705 100644 --- a/src/dev_volume.erl +++ b/src/dev_volume.erl @@ -87,8 +87,8 @@ info(_Msg1, _Msg2, _Opts) -> %% @param M1 Base message for context. %% @param M2 Request message with operation details. %% @param Opts A map of configuration options for volume operations. -%% @returns {ok, Binary} on success with operation result message. -%% @returns {error, Binary} on failure with error message. +%% @returns `{ok, Binary}' on success with operation result message, or +%% `{error, Binary}' on failure with error message. -spec mount(term(), term(), map()) -> {ok, binary()} | {error, binary()}. mount(_M1, _M2, Opts) -> % Check if an encrypted key was sent in the request @@ -146,8 +146,8 @@ mount(_M1, _M2, Opts) -> %% @param _M1 Ignored parameter. %% @param _M2 Ignored parameter. %% @param Opts A map of configuration options. -%% @returns {ok, Map} containing the node's public key on success. -%% @returns {error, Binary} if the node's wallet is not available. +%% @returns `{ok, Map}' containing the node's public key on success, or +%% `{error, Binary}' if the node's wallet is not available. -spec public_key(term(), term(), map()) -> {ok, map()} | {error, binary()}. public_key(_M1, _M2, Opts) -> ?event(volume, {public_key, start}), @@ -181,8 +181,8 @@ public_key(_M1, _M2, Opts) -> %% %% @param EncryptedKey The encrypted volume key (Base64 encoded). %% @param Opts A map of configuration options. -%% @returns {ok, DecryptedKey} on successful decryption. -%% @returns {error, Binary} if decryption fails. +%% @returns `{ok, DecryptedKey}' on successful decryption, or +%% `{error, Binary}' if decryption fails. -spec decrypt_volume_key(binary(), map()) -> {ok, binary()} | {error, binary()}. decrypt_volume_key(EncryptedKeyBase64, Opts) -> % Decode the encrypted key @@ -219,8 +219,8 @@ decrypt_volume_key(EncryptedKeyBase64, Opts) -> %% @param StorePath The store path to check. %% @param Key The key to check. %% @param Opts The options to check. -%% @returns {ok, Binary} on success with operation result message. -%% @returns {error, Binary} on failure with error message. +%% @returns `{ok, Binary}' on success with operation result message, or +%% `{error, Binary}' on failure with error message. -spec check_base_device( term(), term(), term(), term(), term(), term(), term(), map() ) -> {ok, binary()} | {error, binary()}. @@ -252,8 +252,8 @@ check_base_device( %% @param StorePath The store path to check. %% @param Key The key to check. %% @param Opts The options to check. -%% @returns {ok, Binary} on success with operation result message. -%% @returns {error, Binary} on failure with error message. +%% @returns `{ok, Binary}' on success with operation result message, or +%% `{error, Binary}' on failure with error message. -spec check_partition( term(), term(), term(), term(), term(), term(), term(), map() ) -> {ok, binary()} | {error, binary()}. @@ -282,8 +282,8 @@ check_partition( %% @param VolumeName The name of the volume to mount. %% @param StorePath The store path to mount. %% @param Opts The options to mount. -%% @returns {ok, Binary} on success with operation result message. -%% @returns {error, Binary} on failure with error message. +%% @returns `{ok, Binary}' on success with operation result message, or +%% `{error, Binary}' on failure with error message. -spec mount_existing_partition( term(), term(), term(), term(), term(), map() ) -> {ok, binary()} | {error, binary()}. @@ -309,8 +309,8 @@ mount_existing_partition( %% @param VolumeName The name of the volume to mount. %% @param StorePath The store path to mount. %% @param Opts The options to mount. -%% @returns {ok, Binary} on success with operation result message. -%% @returns {error, Binary} on failure with error message. +%% @returns `{ok, Binary}' on success with operation result message, or +%% `{error, Binary}' on failure with error message. -spec create_and_mount_partition( term(), term(), term(), term(), term(), term(), term(), map() ) -> {ok, binary()} | {error, binary()}. @@ -337,8 +337,8 @@ create_and_mount_partition( %% @param VolumeName The name of the volume to mount. %% @param StorePath The store path to mount. %% @param Opts The options to mount. -%% @returns {ok, Binary} on success with operation result message. -%% @returns {error, Binary} on failure with error message. +%% @returns `{ok, Binary}' on success with operation result message, or +%% `{error, Binary}' on failure with error message. -spec format_and_mount( term(), term(), term(), term(), term(), map() ) -> {ok, binary()} | {error, binary()}. @@ -363,8 +363,8 @@ format_and_mount( %% @param VolumeName The name of the volume to mount. %% @param StorePath The store path to mount. %% @param Opts The options to mount. -%% @returns {ok, Binary} on success with operation result message. -%% @returns {error, Binary} on failure with error message. +%% @returns `{ok, Binary}' on success with operation result message, or +%% `{error, Binary}' on failure with error message. -spec mount_formatted_partition( term(), term(), term(), term(), term(), map() ) -> {ok, binary()} | {error, binary()}. @@ -383,8 +383,8 @@ mount_formatted_partition( %% @doc Update the store path to use the mounted volume. %% @param StorePath The store path to update. %% @param Opts The options to update. -%% @returns {ok, Binary} on success with operation result message. -%% @returns {error, Binary} on failure with error message. +%% @returns `{ok, Binary}' on success with operation result message, or +%% `{error, Binary}' on failure with error message. -spec update_store_path(term(), map()) -> {ok, binary()} | {error, binary()}. update_store_path(StorePath, Opts) -> CurrentStore = hb_opts:get(store, [], Opts), @@ -400,8 +400,8 @@ update_store_path(StorePath, Opts) -> %% @doc Update the node's configuration with the new store. %% @param NewStore The new store to update the node's configuration with. %% @param Opts The options to update the node's configuration with. -%% @returns {ok, Binary} on success with operation result message. -%% @returns {error, Binary} on failure with error message. +%% @returns `{ok, Binary}' on success with operation result message, or +%% `{error, Binary}' on failure with error message. -spec update_node_config(term(), map()) -> {ok, binary()} | {error, binary()}. update_node_config(NewStore, Opts) -> ok = hb_http_server:set_opts(Opts#{store => NewStore}), diff --git a/src/hb_opts.erl b/src/hb_opts.erl index bdc2d5c3d..b6df90802 100644 --- a/src/hb_opts.erl +++ b/src/hb_opts.erl @@ -379,8 +379,8 @@ mimic_default_types(Map, Mode) -> %% @param Opts The options map containing node_history %% @param MinLength The minimum acceptable length of node_history %% @param MaxLength The maximum acceptable length of node_history -%% @returns {ok, Length} if MinLength =< Length =< MaxLength, -%% or {error, Reason} if the length is outside the range. +%% @returns `{ok, Length}' if `MinLength =< Length =< MaxLength', +%% or `{error, Reason}' if the length is outside the range. validate_node_history(Opts) -> validate_node_history(Opts, 1, 1). validate_node_history(Opts, MinLength, MaxLength) -> @@ -418,9 +418,9 @@ validate_node_history(Opts, MinLength, MaxLength) -> %% - {error, ErrorMsg} with a message listing all missing options when any are not_found %% @param KeyValuePairs A list of {Name, Value} pairs to check. %% @param Opts The original options map to return if validation succeeds. -%% @returns {ok, Opts} if all required options are present. -%% @returns {error, <<"Missing required parameters: ", MissingOptsStr/binary>>} -%% where MissingOptsStr is a comma-separated list of missing option names. +%% @returns `{ok, Opts}' if all required options are present, or +%% `{error, <<"Missing required parameters: ", MissingOptsStr/binary>>}' +%% where `MissingOptsStr' is a comma-separated list of missing option names. -spec check_required_opts(list({binary(), term()}), map()) -> {ok, map()} | {error, binary()}. check_required_opts(KeyValuePairs, Opts) ->