diff --git a/.github/actions/debian/action.yml b/.github/actions/debian/action.yml index 888dec4d5..302e29e81 100644 --- a/.github/actions/debian/action.yml +++ b/.github/actions/debian/action.yml @@ -28,15 +28,15 @@ inputs: options: - miden-node - miden-remote-prover - service: + package: required: true - description: The service to build the packages for. + description: The Debian package name. type: choice options: - miden-node - miden-prover - miden-prover-proxy - package: + packaging_dir: required: true description: Name of packaging directory. type: choice @@ -78,7 +78,7 @@ runs: - name: Create package directories shell: bash run: | - pkg=${{ inputs.service }} + pkg=${{ inputs.package }} mkdir -p \ packaging/deb/$pkg/DEBIAN \ packaging/deb/$pkg/usr/bin \ @@ -89,15 +89,18 @@ runs: - name: Copy package install scripts shell: bash run: | - svc=${{ inputs.service }} pkg=${{ inputs.package }} + pkg_dir=${{ inputs.packaging_dir }} crate=${{ inputs.crate_dir }} - git show ${{ steps.git-sha.outputs.sha }}:bin/$crate/.env > packaging/deb/$svc/lib/systemd/system/$svc.env - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg/$svc.service > packaging/deb/$svc/lib/systemd/system/$svc.service - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg/postinst > packaging/deb/$svc/DEBIAN/postinst - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg/postrm > packaging/deb/$svc/DEBIAN/postrm - chmod 0775 packaging/deb/$svc/DEBIAN/postinst - chmod 0775 packaging/deb/$svc/DEBIAN/postrm + git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/postinst > packaging/deb/$pkg/DEBIAN/postinst + git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/postrm > packaging/deb/$pkg/DEBIAN/postrm + for service_file in $(ls packaging/$pkg_dir/*.service | sed "s/.*miden/miden/g"); do + svc=$(echo $service_file | sed "s/.service//g") + git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/$service_file > packaging/deb/$pkg/lib/systemd/system/$service_file + git show ${{ steps.git-sha.outputs.sha }}:bin/$crate/.env > packaging/deb/$pkg/lib/systemd/system/$svc.env + done + chmod 0775 packaging/deb/$pkg/DEBIAN/postinst + chmod 0775 packaging/deb/$pkg/DEBIAN/postrm - name: Create control files shell: bash @@ -108,7 +111,7 @@ runs: # Control file's version field must be x.y.z format so strip the rest. version=$(git describe --tags --abbrev=0 | sed 's/[^0-9.]//g' ) - pkg=${{ inputs.service }} + pkg=${{ inputs.package }} cat > packaging/deb/$pkg/DEBIAN/control << EOF Package: $pkg Version: $version @@ -132,14 +135,14 @@ runs: - name: Copy binary files shell: bash run: | - pkg=${{ inputs.service }} + pkg=${{ inputs.package }} bin=${{ inputs.crate }} cp -p ./bin/$bin packaging/deb/$pkg/usr/bin/ - name: Build packages shell: bash run: | - dpkg-deb --build --root-owner-group packaging/deb/${{ inputs.service }} + dpkg-deb --build --root-owner-group packaging/deb/${{ inputs.package }} # Save the .deb files, delete the rest. mv packaging/deb/*.deb . @@ -148,12 +151,12 @@ runs: - name: Package names shell: bash run: | - echo "package=${{ inputs.service }}-${{ inputs.gitref }}-${{ inputs.arch }}.deb" >> $GITHUB_ENV + echo "package=${{ inputs.package }}-${{ inputs.gitref }}-${{ inputs.arch }}.deb" >> $GITHUB_ENV - name: Rename package files shell: bash run: | - mv ${{ inputs.service }}.deb ${{ env.package }} + mv ${{ inputs.package}}.deb ${{ env.package }} - name: shasum packages shell: bash diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index 0e7fe0c07..b259c23fd 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -12,38 +12,16 @@ permissions: jobs: docker-build: - strategy: - matrix: - component: [node] runs-on: Linux-ARM64-Runner - name: Build ${{ matrix.component }} steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Configure AWS credentials - if: github.event.pull_request.head.repo.fork == false - uses: aws-actions/configure-aws-credentials@v4 - with: - aws-region: ${{ secrets.AWS_REGION }} - role-to-assume: ${{ secrets.AWS_ROLE }} - role-session-name: GithubActionsSession - - - name: Set cache parameters - if: github.event.pull_request.head.repo.fork == false - run: | - echo "CACHE_FROM=type=s3,region=${{ secrets.AWS_REGION }},bucket=${{ secrets.AWS_CACHE_BUCKET }},name=miden-${{ matrix.component }}" >> $GITHUB_ENV - echo "CACHE_TO=type=s3,region=${{ secrets.AWS_REGION }},bucket=${{ secrets.AWS_CACHE_BUCKET }},name=miden-${{ matrix.component }}" >> $GITHUB_ENV - - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - with: - cache-binary: true - - name: Build Docker image - uses: docker/build-push-action@v5 + - name: Build and push + uses: docker/build-push-action@v6 with: push: false - file: ./bin/${{ matrix.component }}/Dockerfile - cache-from: ${{ env.CACHE_FROM || '' }} - cache-to: ${{ env.CACHE_TO || '' }} + file: ./bin/node/Dockerfile + cache-from: type=gha + # Only save cache on push into next + cache-to: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' && 'type=gha,mode=max' || '' }} diff --git a/.github/workflows/publish-debian-all.yml b/.github/workflows/publish-debian-all.yml index a6d63d503..76e65d0eb 100644 --- a/.github/workflows/publish-debian-all.yml +++ b/.github/workflows/publish-debian-all.yml @@ -39,8 +39,8 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: node - service: miden-node - package: node + package: miden-node + packaging_dir: node crate: miden-node arch: ${{ matrix.arch }} @@ -62,8 +62,8 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: remote-prover - service: miden-prover - package: prover + package: miden-prover + packaging_dir: prover crate: miden-remote-prover arch: ${{ matrix.arch }} @@ -85,8 +85,8 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: remote-prover - service: miden-prover-proxy - package: prover-proxy + package: miden-prover-proxy + packaging_dir: prover-proxy crate: miden-remote-prover arch: ${{ matrix.arch }} @@ -108,7 +108,7 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ env.version }} crate_dir: network-monitor - service: miden-network-monitor - package: network-monitor + package: miden-network-monitor + packaging_dir: network-monitor crate: miden-network-monitor arch: ${{ matrix.arch }} diff --git a/.github/workflows/publish-debian.yml b/.github/workflows/publish-debian.yml index 81e8d7447..d17d06532 100644 --- a/.github/workflows/publish-debian.yml +++ b/.github/workflows/publish-debian.yml @@ -3,8 +3,8 @@ name: Publish Debian Package on: workflow_dispatch: inputs: - service: - description: "Name of service to publish" + package: + description: "Name of package to publish" required: true type: choice options: @@ -20,7 +20,7 @@ on: - network-monitor - node - remote-prover - package: + packaging_dir: required: true description: "Name of packaging directory" type: choice @@ -48,7 +48,7 @@ permissions: jobs: publish: - name: Publish ${{ inputs.service }} ${{ matrix.arch }} Debian + name: Publish ${{ inputs.package }} ${{ matrix.arch }} Debian strategy: matrix: arch: [amd64, arm64] @@ -69,7 +69,7 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} gitref: ${{ inputs.version }} crate_dir: ${{ inputs.crate_dir }} - service: ${{ inputs.service }} package: ${{ inputs.package }} + packaging_dir: ${{ inputs.packaging_dir }} crate: ${{ inputs.crate }} arch: ${{ matrix.arch }} diff --git a/.gitignore b/.gitignore index 0a086d3d0..a4d92ce8e 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ # will have compiled files and executables debug/ target/ +miden-node-stress-test-* # Generated by protox `file_descriptor_set.bin` *.bin diff --git a/CHANGELOG.md b/CHANGELOG.md index 048d156b5..9faf9bd88 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,21 @@ # Changelog +## v0.14.0 (TBD) + +### Enhancements + +- [BREAKING] Move block proving from Blocker Producer to the Store ([#1579](https://github.com/0xMiden/miden-node/pull/1579)). +- [BREAKING] Updated miden-base dependencies to use `next` branch; renamed `NoteInputs` to `NoteStorage`, `.inputs()` to `.storage()`, and database `inputs` column to `storage` ([#1595](https://github.com/0xMiden/miden-node/pull/1595)). + +### Changes + +- [BREAKING] Added typed `GetAccountError` for `GetAccount` endpoint, splitting `BlockNotAvailable` into `UnknownBlock` and `BlockPruned`. `AccountNotFound` and `AccountNotPublic` now return `InvalidArgument` gRPC status instead of `NotFound`; clients should parse the error details discriminant rather than branching on status codes ([#1646](https://github.com/0xMiden/miden-node/pull/1646)). +- Changed `note_type` field in proto `NoteMetadata` from `uint32` to a `NoteType` enum ([#1594](https://github.com/0xMiden/miden-node/pull/1594)). +- Refactored NTX Builder startup and introduced `NtxBuilderConfig` with configurable parameters ([#1610](https://github.com/0xMiden/miden-node/pull/1610)). +- Refactored NTX Builder actor state into `AccountDeltaTracker` and `NotePool` for clarity, and added tracing instrumentation to event broadcasting ([#1611](https://github.com/0xMiden/miden-node/pull/1611)). +- Add #[track_caller] to tracing/logging helpers ([#1651](https://github.com/0xMiden/miden-node/pull/1651)). +- Improved tracing span fields ([#1650](https://github.com/0xMiden/miden-node/pull/1650)) + ## v0.13.5 (TBD) - OpenTelemetry traces are now flushed before program termination on panic ([#1643](https://github.com/0xMiden/miden-node/pull/1643)). @@ -59,10 +75,12 @@ - The network monitor now marks the chain as unhealthy if it fails to create new blocks ([#1512](https://github.com/0xMiden/miden-node/pull/1512)). - Limited number of storage map keys in `GetAccount` requests ([#1517](https://github.com/0xMiden/miden-node/pull/1517)). - Block producer now detects if it is desync'd from the store's chain tip and aborts ([#1520](https://github.com/0xMiden/miden-node/pull/1520)). +- Pin tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). +- Add `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). +- Add check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). +- Ensure store terminates on nullifier tree or account tree root vs header mismatch (#[#1569](https://github.com/0xMiden/miden-node/pull/1569)). - Added support for foreign accounts to `NtxDataStore` and add `GetAccount` endpoint to NTX Builder gRPC store client ([#1521](https://github.com/0xMiden/miden-node/pull/1521)). -- Pined tool versions in CI ([#1523](https://github.com/0xMiden/miden-node/pull/1523)). -- Added `GetVaultAssetWitnesses` and `GetStorageMapWitness` RPC endpoints to store ([#1529](https://github.com/0xMiden/miden-node/pull/1529)). -- Added check to ensure tree store state is in sync with database storage ([#1532](https://github.com/0xMiden/miden-node/issues/1534)). +- Use paged queries for tree rebuilding to reduce memory usage during startup ([#1536](https://github.com/0xMiden/miden-node/pull/1536)). ### Changes @@ -127,7 +145,7 @@ - Network transaction builder now marks notes from any error as failed ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). - Network transaction builder now adheres to note limit set by protocol ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). - Race condition resolved in the store's `apply_block` ([#1508](https://github.com/0xMiden/miden-node/pull/1508)). - - This presented as a database locked error and in rare cases a desync between the mempool and store. + - This presented as a database locked error and in rare cases a desync between the mempool and store. ## v0.12.6 (2026-01-12) diff --git a/Cargo.lock b/Cargo.lock index 2af2b9672..c8c67c56a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -27,17 +27,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "ahash" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" -dependencies = [ - "getrandom 0.2.17", - "once_cell", - "version_check", -] - [[package]] name = "ahash" version = "0.8.12" @@ -471,9 +460,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" [[package]] name = "bzip2-sys" @@ -651,19 +640,19 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.54" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" +checksum = "3e34525d5bbbd55da2bb745d34b36121baac88d07619a9a09cfcf4a6c0832785" dependencies = [ "clap_builder", - "clap_derive 4.5.49", + "clap_derive 4.5.55", ] [[package]] name = "clap_builder" -version = "4.5.54" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" +checksum = "59a20016a20a3da95bef50ec7238dbd09baeef4311dcdd38ec15aba69812fb61" dependencies = [ "anstream", "anstyle", @@ -686,9 +675,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.49" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" +checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -791,7 +780,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.54", + "clap 4.5.55", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -1334,7 +1323,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -1679,9 +1668,6 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.8", -] [[package]] name = "hashbrown" @@ -2112,7 +2098,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -2272,9 +2258,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "librocksdb-sys" @@ -2296,6 +2282,7 @@ version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f" dependencies = [ + "cc", "pkg-config", "vcpkg", ] @@ -2481,9 +2468,8 @@ dependencies = [ [[package]] name = "miden-agglayer" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a867217bab689c0539f6b4797cb452f0932de6904479a38f1322e045b9383b" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "fs-err", "miden-assembly", @@ -2498,9 +2484,9 @@ dependencies = [ [[package]] name = "miden-air" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d819876b9e9b630e63152400e6df2a201668a9bdfd33d54d6806b9d7b992ff8" +checksum = "ab2f1db9cdbd5da3eaf07fa0a8122d27b575f96b0699388c98f6c0e468cb9c1f" dependencies = [ "miden-core", "miden-utils-indexing", @@ -2511,9 +2497,9 @@ dependencies = [ [[package]] name = "miden-assembly" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24c6a18e29c03141cf9044604390a00691c7342924ec865b4acfdd560ff41ede" +checksum = "cf4aba6bc5cfda2393ecc032b55caabde289fb980650560f8333803db4e48f09" dependencies = [ "env_logger", "log", @@ -2526,9 +2512,9 @@ dependencies = [ [[package]] name = "miden-assembly-syntax" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7458ff670f5a514bf972aa84d6e1851a4c4e9afa351f53b71bdc2218b99254b6" +checksum = "23eae66f2a55c2a0666f4ed896b61797845b528435ad2bae41fd9a221f94bad7" dependencies = [ "aho-corasick", "env_logger", @@ -2550,9 +2536,8 @@ dependencies = [ [[package]] name = "miden-block-prover" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e92a0ddae8d0983e37bc636edba741947b1e3dc63baed2ad85921342080154a" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "miden-protocol", "thiserror 2.0.18", @@ -2560,9 +2545,9 @@ dependencies = [ [[package]] name = "miden-core" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a5c9c8c3d42ae8381ed49e47ff9ad2d2e345c4726761be36b7d4000ebb40ae" +checksum = "2716bb01f07f0b19398e3d9785e23a724b89aef64d614a9073c1d44c6898a9a9" dependencies = [ "derive_more", "itertools 0.14.0", @@ -2582,9 +2567,9 @@ dependencies = [ [[package]] name = "miden-core-lib" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6556494ea5576803730fa15015bee6bd9d1a117450f22e7df0883421e7423674" +checksum = "9ac97f4fb334ee842663f99f33677beacc7bdf4b7d4eeff419c2cd98a5a68bfa" dependencies = [ "env_logger", "fs-err", @@ -2644,9 +2629,9 @@ dependencies = [ [[package]] name = "miden-debug-types" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19123e896f24b575e69921a79a39a0a4babeb98404a8601017feb13b75d653b3" +checksum = "b421786850ce05627355ee616c4a5fdc4a9ad1591859ede5e5564ab74aa4abd2" dependencies = [ "memchr", "miden-crypto", @@ -2671,9 +2656,9 @@ dependencies = [ [[package]] name = "miden-mast-package" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d6a322b91efa1bb71e224395ca1fb9ca00e2614f89427e35d8c42a903868a3" +checksum = "169025a61c2ca2e8a0f53f20a7bdcbdd1f8e34f528676137208bff64944652bb" dependencies = [ "derive_more", "miden-assembly-syntax", @@ -2725,14 +2710,15 @@ dependencies = [ [[package]] name = "miden-network-monitor" -version = "0.13.4" +version = "0.14.0" dependencies = [ "anyhow", "axum", - "clap 4.5.54", + "clap 4.5.55", "hex", "humantime", "miden-node-proto", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-utils", "miden-protocol", "miden-standards", @@ -2753,16 +2739,17 @@ dependencies = [ [[package]] name = "miden-node" -version = "0.13.4" +version = "0.14.0" dependencies = [ "anyhow", - "clap 4.5.54", + "clap 4.5.55", "figment", "fs-err", "hex", "humantime", "miden-node-block-producer", "miden-node-ntx-builder", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-rpc", "miden-node-store", "miden-node-utils", @@ -2774,18 +2761,18 @@ dependencies = [ [[package]] name = "miden-node-block-producer" -version = "0.13.4" +version = "0.14.0" dependencies = [ "anyhow", "assert_matches", "futures", "itertools 0.14.0", - "miden-block-prover", "miden-node-proto", "miden-node-proto-build", "miden-node-store", "miden-node-test-macro", "miden-node-utils", + "miden-node-validator", "miden-protocol", "miden-remote-prover-client", "miden-standards", @@ -2810,7 +2797,7 @@ dependencies = [ [[package]] name = "miden-node-grpc-error-macro" -version = "0.13.4" +version = "0.14.0" dependencies = [ "quote", "syn 2.0.114", @@ -2818,11 +2805,17 @@ dependencies = [ [[package]] name = "miden-node-ntx-builder" -version = "0.13.4" +version = "0.14.0" dependencies = [ "anyhow", + "deadpool", + "deadpool-diesel", + "deadpool-sync", + "diesel", + "diesel_migrations", "futures", "indexmap 2.13.0", + "libsqlite3-sys", "miden-node-proto", "miden-node-test-macro", "miden-node-utils", @@ -2842,7 +2835,7 @@ dependencies = [ [[package]] name = "miden-node-proto" -version = "0.13.4" +version = "0.14.0" dependencies = [ "anyhow", "assert_matches", @@ -2851,6 +2844,7 @@ dependencies = [ "http", "miden-node-grpc-error-macro", "miden-node-proto-build", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-utils", "miden-protocol", "miden-standards", @@ -2866,7 +2860,7 @@ dependencies = [ [[package]] name = "miden-node-proto-build" -version = "0.13.4" +version = "0.14.0" dependencies = [ "fs-err", "miette", @@ -2874,9 +2868,13 @@ dependencies = [ "tonic-prost-build", ] +[[package]] +name = "miden-node-rocksdb-cxx-linkage-fix" +version = "0.14.0" + [[package]] name = "miden-node-rpc" -version = "0.13.4" +version = "0.14.0" dependencies = [ "anyhow", "futures", @@ -2908,7 +2906,7 @@ dependencies = [ [[package]] name = "miden-node-store" -version = "0.13.4" +version = "0.14.0" dependencies = [ "anyhow", "assert_matches", @@ -2919,14 +2917,19 @@ dependencies = [ "diesel", "diesel_migrations", "fs-err", + "futures", "hex", "indexmap 2.13.0", + "libsqlite3-sys", + "miden-block-prover", "miden-crypto", "miden-node-proto", "miden-node-proto-build", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-test-macro", "miden-node-utils", "miden-protocol", + "miden-remote-prover-client", "miden-standards", "pretty_assertions", "rand 0.9.2", @@ -2942,19 +2945,20 @@ dependencies = [ "tonic-reflection", "tower-http", "tracing", + "url", ] [[package]] name = "miden-node-stress-test" -version = "0.13.4" +version = "0.14.0" dependencies = [ - "clap 4.5.54", + "clap 4.5.55", "fs-err", "futures", "miden-air", - "miden-block-prover", "miden-node-block-producer", "miden-node-proto", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-store", "miden-node-utils", "miden-protocol", @@ -2976,7 +2980,7 @@ dependencies = [ [[package]] name = "miden-node-utils" -version = "0.13.4" +version = "0.14.0" dependencies = [ "anyhow", "bytes", @@ -2985,6 +2989,7 @@ dependencies = [ "http-body-util", "itertools 0.14.0", "lru 0.16.3", + "miden-node-rocksdb-cxx-linkage-fix", "miden-protocol", "opentelemetry", "opentelemetry-otlp", @@ -3004,7 +3009,7 @@ dependencies = [ [[package]] name = "miden-node-validator" -version = "0.13.4" +version = "0.14.0" dependencies = [ "anyhow", "miden-node-proto", @@ -3023,9 +3028,9 @@ dependencies = [ [[package]] name = "miden-processor" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a659fac55de14647e2695f03d96b83ff94fe65fd31e74d81c225ec52af25acf" +checksum = "a18a6a5eebe64e81a29be6321ee8f4478c6bfaf619b7689825884e8cd308c044" dependencies = [ "itertools 0.14.0", "miden-air", @@ -3043,9 +3048,8 @@ dependencies = [ [[package]] name = "miden-protocol" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "785be319a826c9cb43d2e1a41a1fb1eee3f2baafe360e0d743690641f7c93ad5" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "bech32", "fs-err", @@ -3074,9 +3078,8 @@ dependencies = [ [[package]] name = "miden-protocol-macros" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dc854c1b9e49e82d3f39c5710345226e0b2a62ec0ea220c616f1f3a099cfb3" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "proc-macro2", "quote", @@ -3085,9 +3088,9 @@ dependencies = [ [[package]] name = "miden-prover" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e5df61f50f27886f6f777d6e0cdf785f7db87dd881799a84a801e7330c189c8" +checksum = "83070f0ca1a08235362e990238b6487191f814054aaebcc40883a073fdcd18f9" dependencies = [ "miden-air", "miden-debug-types", @@ -3099,18 +3102,19 @@ dependencies = [ [[package]] name = "miden-remote-prover" -version = "0.13.4" +version = "0.14.0" dependencies = [ "anyhow", "async-trait", "axum", "bytes", - "clap 4.5.54", + "clap 4.5.55", "http", "humantime", "miden-block-prover", "miden-node-proto", "miden-node-proto-build", + "miden-node-rocksdb-cxx-linkage-fix", "miden-node-utils", "miden-protocol", "miden-standards", @@ -3145,7 +3149,7 @@ dependencies = [ [[package]] name = "miden-remote-prover-client" -version = "0.13.4" +version = "0.14.0" dependencies = [ "fs-err", "getrandom 0.3.4", @@ -3165,9 +3169,8 @@ dependencies = [ [[package]] name = "miden-standards" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98e33771fc35e1e640582bcd26c88b2ab449dd3a70888b315546d0d3447f4bb3" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "fs-err", "miden-assembly", @@ -3183,9 +3186,8 @@ dependencies = [ [[package]] name = "miden-testing" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae5d41a888d1a5e520a9312a170975d0fbadefb1b9200543cebdf54dd0960310" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3200,14 +3202,14 @@ dependencies = [ "miden-tx-batch-prover", "rand 0.9.2", "rand_chacha 0.9.0", + "thiserror 2.0.18", "winterfell", ] [[package]] name = "miden-tx" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "430e4ee02b5efb71b104926e229441e0071a93a259a70740bf8c436495caa64f" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "miden-processor", "miden-protocol", @@ -3219,9 +3221,8 @@ dependencies = [ [[package]] name = "miden-tx-batch-prover" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03bc209b6487ebac0de230461e229a99d17ed73596c7d99fc59eea47a28a89cc" +version = "0.14.0" +source = "git+https://github.com/0xMiden/miden-base?branch=next#33eabfe45ab67bdfb9f7d8907b8d50298c6b182e" dependencies = [ "miden-protocol", "miden-tx", @@ -3229,9 +3230,9 @@ dependencies = [ [[package]] name = "miden-utils-core-derive" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa207ffd8b26a79d9b5b246a352812f0015c0bb8f75492ec089c5c8e6d5f9e2b" +checksum = "c9fc6d350fb9ad44797e8d0a1feaacaa6ee4079ef752d9ababc101ffc40ec354" dependencies = [ "proc-macro2", "quote", @@ -3240,9 +3241,9 @@ dependencies = [ [[package]] name = "miden-utils-diagnostics" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b2f55477d410542a5d8990ca04856adf5bef91bfa3b54ca3c03a5ff14a6e25c" +checksum = "af2462fb2e750247a56264eddf40e2e1c8d96ff9379abe73acbcbe81e530e1d5" dependencies = [ "miden-crypto", "miden-debug-types", @@ -3253,18 +3254,18 @@ dependencies = [ [[package]] name = "miden-utils-indexing" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f39efae17e14ec8f8a1266cffd29eb7a08ac837143cd09223b1af361bbb55730" +checksum = "57046b5c263b78e7fa5a6e328ca852e6319cf844faa26fbdcbb128ec555deb2a" dependencies = [ "thiserror 2.0.18", ] [[package]] name = "miden-utils-sync" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da7fa8f5fd27f122c83f55752f2a964bbfc2b713de419e9c152f7dcc05c194ec" +checksum = "e2d3e129b62099672a1ffc012ab2e26ee7f2b35e4ca18ca1f726b88c53546ddd" dependencies = [ "lock_api", "loom", @@ -3273,9 +3274,9 @@ dependencies = [ [[package]] name = "miden-verifier" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbddac2e76486fb657929338323c68b9e7f40e33b8cfb593d0fb5bf637db046e" +checksum = "fe033af062937938ded511e5238db3bf8e0c1a30205850d62fb23271b3c96f85" dependencies = [ "miden-air", "miden-core", @@ -3512,9 +3513,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" [[package]] name = "num-derive" @@ -3861,7 +3862,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "blake2", "bytes", @@ -3897,7 +3898,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash 0.8.12", + "ahash", "async-trait", "brotli", "bytes", @@ -3987,7 +3988,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash 0.8.12", + "ahash", ] [[package]] @@ -4019,7 +4020,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.12.3", + "hashbrown 0.16.1", "parking_lot", "rand 0.8.5", ] @@ -4346,9 +4347,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" dependencies = [ "bytes", "prost-derive", @@ -4438,9 +4439,9 @@ dependencies = [ [[package]] name = "protox" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8555716f64c546306ddf3383065dc40d4232609e79e0a4c50e94e87d54f30fb4" +checksum = "4f25a07a73c6717f0b9bbbd685918f5df9815f7efba450b83d9c9dea41f0e3a1" dependencies = [ "bytes", "miette", @@ -4836,7 +4837,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4849,7 +4850,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -5251,9 +5252,9 @@ checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] name = "siphasher" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] name = "slab" @@ -5481,7 +5482,7 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.3", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -5600,9 +5601,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.45" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", "itoa", @@ -5615,15 +5616,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.25" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" dependencies = [ "num-conv", "time-core", @@ -6261,9 +6262,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" +checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" dependencies = [ "getrandom 0.3.4", "js-sys", @@ -6454,7 +6455,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -6948,18 +6949,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" +checksum = "71ddd76bcebeed25db614f82bf31a9f4222d3fbba300e6fb6c00afa26cbd4d9d" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" +checksum = "d8187381b52e32220d50b255276aa16a084ec0a9017a0ca2152a1f55c539758d" dependencies = [ "proc-macro2", "quote", @@ -7028,9 +7029,9 @@ dependencies = [ [[package]] name = "zmij" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65" +checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439" [[package]] name = "zstd" diff --git a/Cargo.toml b/Cargo.toml index f99aef2d7..db02abc0d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "crates/ntx-builder", "crates/proto", "crates/remote-prover-client", + "crates/rocksdb-cxx-linkage-fix", "crates/rpc", "crates/store", "crates/test-macro", @@ -28,7 +29,7 @@ license = "MIT" readme = "README.md" repository = "https://github.com/0xMiden/miden-node" rust-version = "1.90" -version = "0.13.4" +version = "0.14.0" # Optimize the cryptography for faster tests involving account creation. [profile.test.package.miden-crypto] @@ -39,25 +40,28 @@ debug = true [workspace.dependencies] # Workspace crates. -miden-node-block-producer = { path = "crates/block-producer", version = "0.13" } -miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.13" } -miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.13" } -miden-node-proto = { path = "crates/proto", version = "0.13" } -miden-node-proto-build = { path = "proto", version = "0.13" } -miden-node-rpc = { path = "crates/rpc", version = "0.13" } -miden-node-store = { path = "crates/store", version = "0.13" } +miden-node-block-producer = { path = "crates/block-producer", version = "0.14" } +miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.14" } +miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.14" } +miden-node-proto = { path = "crates/proto", version = "0.14" } +miden-node-proto-build = { path = "proto", version = "0.14" } +miden-node-rpc = { path = "crates/rpc", version = "0.14" } +miden-node-store = { path = "crates/store", version = "0.14" } miden-node-test-macro = { path = "crates/test-macro" } -miden-node-utils = { path = "crates/utils", version = "0.13" } -miden-node-validator = { path = "crates/validator", version = "0.13" } -miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.13" } +miden-node-utils = { path = "crates/utils", version = "0.14" } +miden-node-validator = { path = "crates/validator", version = "0.14" } +miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.14" } +# Temporary workaround until +# is part of `rocksdb-rust` release +miden-node-rocksdb-cxx-linkage-fix = { path = "crates/rocksdb-cxx-linkage-fix", version = "0.14" } # miden-base aka protocol dependencies. These should be updated in sync. -miden-block-prover = { version = "0.13" } -miden-protocol = { default-features = false, version = "0.13" } -miden-standards = { version = "0.13" } -miden-testing = { version = "0.13" } -miden-tx = { default-features = false, version = "0.13" } -miden-tx-batch-prover = { version = "0.13" } +miden-block-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base" } +miden-protocol = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base" } +miden-standards = { branch = "next", git = "https://github.com/0xMiden/miden-base" } +miden-testing = { branch = "next", git = "https://github.com/0xMiden/miden-base" } +miden-tx = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base" } +miden-tx-batch-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base" } # Other miden dependencies. These should align with those expected by miden-base. miden-air = { features = ["std", "testing"], version = "0.20" } @@ -68,6 +72,11 @@ anyhow = { version = "1.0" } assert_matches = { version = "1.5" } async-trait = { version = "0.1" } clap = { features = ["derive"], version = "4.5" } +deadpool = { default-features = false, version = "0.12" } +deadpool-diesel = { version = "0.6" } +deadpool-sync = { default-features = false, version = "0.1" } +diesel = { version = "2.3" } +diesel_migrations = { version = "2.3" } fs-err = { version = "3" } futures = { version = "0.3" } hex = { version = "0.4" } @@ -75,12 +84,14 @@ http = { version = "1.3" } humantime = { version = "2.2" } indexmap = { version = "2.12" } itertools = { version = "0.14" } +libsqlite3-sys = { features = ["bundled"], version = "0.35" } lru = { default-features = false, version = "0.16" } pretty_assertions = { version = "1.4" } -# breaking change `DecodeError::new` is not exposed anymore -# but is assumed public by some internal dependency -prost = { default-features = false, version = "=0.14.1" } -protox = { version = "=0.9.0" } +# prost and protox are from different authors and are _not_ released in +# lockstep, nor are they adhering to semver semantics. We keep this +# to avoid future breakage. +prost = { default-features = false, version = "=0.14.3" } +protox = { version = "=0.9.1" } rand = { version = "0.9" } rand_chacha = { version = "0.9" } rstest = { version = "0.26" } @@ -88,6 +99,7 @@ serde = { features = ["derive"], version = "1" } thiserror = { default-features = false, version = "2.0" } tokio = { features = ["rt-multi-thread"], version = "1.46" } tokio-stream = { version = "0.1" } +tokio-util = { version = "0.7" } toml = { version = "0.9" } tonic = { default-features = false, version = "0.14" } tonic-health = { version = "0.14" } @@ -105,6 +117,7 @@ url = { features = ["serde"], version = "2.5" } # Pedantic lints are set to a lower priority which allows lints in the group to be selectively enabled. pedantic = { level = "warn", priority = -1 } +allow_attributes = "deny" cast_possible_truncation = "allow" # Overly many instances especially regarding indices. collapsible-if = "allow" # Too new to enforce. from_iter_instead_of_collect = "allow" # at times `FromIter` is much more readable diff --git a/Makefile b/Makefile index 64aa55bf4..fd1408f70 100644 --- a/Makefile +++ b/Makefile @@ -9,6 +9,7 @@ help: WARNINGS=RUSTDOCFLAGS="-D warnings" BUILD_PROTO=BUILD_PROTO=1 CONTAINER_RUNTIME ?= docker +STRESS_TEST_DATA_DIR ?= stress-test-store-$(shell date +%Y%m%d-%H%M%S) # -- linting -------------------------------------------------------------------------------------- @@ -108,6 +109,15 @@ install-node: ## Installs node install-remote-prover: ## Install remote prover's CLI $(BUILD_PROTO) cargo install --path bin/remote-prover --bin miden-remote-prover --features concurrent --locked +.PHONY: stress-test-smoke +stress-test: ## Runs stress-test benchmarks + ${BUILD_PROTO} cargo build --release --locked -p miden-node-stress-test + @mkdir -p $(STRESS_TEST_DATA_DIR) + ./target/release/miden-node-stress-test seed-store --data-directory $(STRESS_TEST_DATA_DIR) --num-accounts 500 --public-accounts-percentage 50 + ./target/release/miden-node-stress-test benchmark-store --data-directory $(STRESS_TEST_DATA_DIR) --iterations 10 --concurrency 1 sync-state + ./target/release/miden-node-stress-test benchmark-store --data-directory $(STRESS_TEST_DATA_DIR) --iterations 10 --concurrency 1 sync-notes + ./target/release/miden-node-stress-test benchmark-store --data-directory $(STRESS_TEST_DATA_DIR) --iterations 10 --concurrency 1 sync-nullifiers --prefixes 10 + .PHONY: install-stress-test install-stress-test: ## Installs stress-test binary cargo install --path bin/stress-test --locked diff --git a/bin/network-monitor/Cargo.toml b/bin/network-monitor/Cargo.toml index 11c2b1905..6667a4ded 100644 --- a/bin/network-monitor/Cargo.toml +++ b/bin/network-monitor/Cargo.toml @@ -37,3 +37,6 @@ tonic = { features = ["codegen", "tls-native-roots", "transport"], wo tonic-health = { workspace = true } tracing = { workspace = true } url = { features = ["serde"], workspace = true } + +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } diff --git a/bin/network-monitor/build.rs b/bin/network-monitor/build.rs new file mode 100644 index 000000000..ed4038d06 --- /dev/null +++ b/bin/network-monitor/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index c04426733..c2b9d0835 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -22,10 +22,10 @@ use miden_protocol::note::{ NoteAssets, NoteAttachment, NoteExecutionHint, - NoteInputs, NoteMetadata, NoteRecipient, NoteScript, + NoteStorage, NoteTag, NoteType, }; @@ -751,7 +751,7 @@ fn load_counter_account(file_path: &Path) -> Result { } /// Create and submit a network note that targets the counter account. -#[allow(clippy::too_many_arguments)] +#[expect(clippy::too_many_arguments)] #[instrument( parent = None, target = COMPONENT, @@ -872,7 +872,7 @@ fn create_network_note( Felt::new(rng.random()), ]); - let recipient = NoteRecipient::new(serial_num, script, NoteInputs::new(vec![])?); + let recipient = NoteRecipient::new(serial_num, script, NoteStorage::new(vec![])?); let network_note = Note::new(NoteAssets::new(vec![])?, metadata, recipient.clone()); Ok((network_note, recipient)) diff --git a/bin/network-monitor/src/faucet.rs b/bin/network-monitor/src/faucet.rs index 370d7bb10..caeafe055 100644 --- a/bin/network-monitor/src/faucet.rs +++ b/bin/network-monitor/src/faucet.rs @@ -47,7 +47,7 @@ pub struct FaucetTestDetails { struct PowChallengeResponse { challenge: String, target: u64, - #[allow(dead_code)] // Timestamp is part of API response but not used + #[expect(dead_code)] // Timestamp is part of API response but not used timestamp: u64, } @@ -55,7 +55,7 @@ struct PowChallengeResponse { #[derive(Debug, Deserialize)] struct GetTokensResponse { tx_id: String, - #[allow(dead_code)] // Note ID is part of API response but not used in monitoring + #[expect(dead_code)] // Note ID is part of API response but not used in monitoring note_id: String, } diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index b6ade3b4d..2743f3e8d 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -33,6 +33,9 @@ miden-protocol = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } url = { workspace = true } +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } + [dev-dependencies] figment = { features = ["env", "test", "toml"], version = "0.10" } miden-node-utils = { features = ["tracing-forest"], workspace = true } diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index 832b0bb8d..9778daec8 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -1,39 +1,47 @@ -FROM rust:1.90-slim-bullseye AS builder - +FROM rust:1.90-slim-bullseye AS chef # Install build dependencies. RocksDB is compiled from source by librocksdb-sys. RUN apt-get update && \ apt-get -y upgrade && \ - apt-get install -y llvm clang libclang-dev pkg-config libssl-dev libsqlite3-dev ca-certificates && \ + apt-get install -y \ + llvm \ + clang \ + libclang-dev \ + cmake \ + pkg-config \ + libssl-dev \ + libsqlite3-dev \ + ca-certificates && \ rm -rf /var/lib/apt/lists/* - +RUN cargo install cargo-chef WORKDIR /app -COPY ./Cargo.toml . -COPY ./Cargo.lock . -COPY ./bin ./bin -COPY ./crates ./crates -COPY ./proto ./proto -RUN cargo install --path bin/node --locked +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json -FROM debian:bullseye-slim +FROM chef AS builder +COPY --from=planner /app/recipe.json recipe.json +# Build dependencies - this is the caching Docker layer! +RUN cargo chef cook --release --recipe-path recipe.json +# Build application +COPY . . +RUN cargo build --release --locked --bin miden-node -# Update machine & install required packages -# The installation of sqlite3 is needed for correct function of the SQLite database +# Base line runtime image with runtime dependencies installed. +FROM debian:bullseye-slim AS runtime-base RUN apt-get update && \ apt-get -y upgrade && \ - apt-get install -y --no-install-recommends \ - sqlite3 \ + apt-get install -y --no-install-recommends sqlite3 \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/cargo/bin/miden-node /usr/local/bin/miden-node - +FROM runtime-base AS runtime +COPY --from=builder /app/target/release/miden-node /usr/local/bin/miden-node LABEL org.opencontainers.image.authors=devops@miden.team \ org.opencontainers.image.url=https://0xMiden.github.io/ \ org.opencontainers.image.documentation=https://github.com/0xMiden/miden-node \ org.opencontainers.image.source=https://github.com/0xMiden/miden-node \ org.opencontainers.image.vendor=Miden \ org.opencontainers.image.licenses=MIT - ARG CREATED ARG VERSION ARG COMMIT @@ -43,6 +51,5 @@ LABEL org.opencontainers.image.created=$CREATED \ # Expose RPC port EXPOSE 57291 - # Miden node does not spawn sub-processes, so it can be used as the PID1 CMD miden-node diff --git a/bin/node/build.rs b/bin/node/build.rs new file mode 100644 index 000000000..ed4038d06 --- /dev/null +++ b/bin/node/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/bin/node/src/commands/block_producer.rs b/bin/node/src/commands/block_producer.rs index 5cfbc78fc..5d416ea8e 100644 --- a/bin/node/src/commands/block_producer.rs +++ b/bin/node/src/commands/block_producer.rs @@ -86,7 +86,6 @@ impl BlockProducerCommand { store_url, validator_url, batch_prover_url: block_producer.batch_prover_url, - block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, block_interval: block_producer.block_interval, max_txs_per_batch: block_producer.max_txs_per_batch, @@ -125,7 +124,6 @@ mod tests { validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, - block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), max_txs_per_batch: 8, @@ -149,7 +147,6 @@ mod tests { validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, - block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), max_txs_per_batch: miden_protocol::MAX_ACCOUNTS_PER_BATCH + 1, /* Use protocol diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 22f1199a3..8bc38fd07 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -4,7 +4,6 @@ use std::time::Duration; use anyhow::Context; use miden_node_block_producer::BlockProducer; -use miden_node_ntx_builder::NetworkTransactionBuilder; use miden_node_rpc::Rpc; use miden_node_store::Store; use miden_node_utils::grpc::UrlExt; @@ -20,6 +19,7 @@ use super::{ENV_DATA_DIRECTORY, ENV_RPC_URL}; use crate::commands::{ BlockProducerConfig, DEFAULT_TIMEOUT, + ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, ENV_VALIDATOR_INSECURE_SECRET_KEY, @@ -68,6 +68,10 @@ pub enum BundledCommand { #[arg(long = "rpc.url", env = ENV_RPC_URL, value_name = "URL")] rpc_url: Url, + /// The remote block prover's gRPC url. If not provided, a local block prover will be used. + #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] + block_prover_url: Option, + /// Directory in which the Store component should store the database and raw block data. #[arg(long = "data-directory", env = ENV_DATA_DIRECTORY, value_name = "DIR")] data_directory: PathBuf, @@ -129,6 +133,7 @@ impl BundledCommand { }, BundledCommand::Start { rpc_url, + block_prover_url, data_directory, block_producer, ntx_builder, @@ -140,6 +145,7 @@ impl BundledCommand { let signer = SecretKey::read_from_bytes(&secret_key_bytes)?; Self::start( rpc_url, + block_prover_url, data_directory, ntx_builder, block_producer, @@ -151,9 +157,10 @@ impl BundledCommand { } } - #[allow(clippy::too_many_lines)] + #[expect(clippy::too_many_lines)] async fn start( rpc_url: Url, + block_prover_url: Option, data_directory: PathBuf, ntx_builder: NtxBuilderConfig, block_producer: BlockProducerConfig, @@ -212,6 +219,7 @@ impl BundledCommand { block_producer_listener: store_block_producer_listener, ntx_builder_listener: store_ntx_builder_listener, data_directory: data_directory_clone, + block_prover_url, grpc_timeout, } .serve() @@ -235,7 +243,6 @@ impl BundledCommand { store_url, validator_url, batch_prover_url: block_producer.batch_prover_url, - block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, block_interval: block_producer.block_interval, max_batches_per_block: block_producer.max_batches_per_block, @@ -296,27 +303,29 @@ impl BundledCommand { ]); // Start network transaction builder. The endpoint is available after loading completes. - let store_ntx_builder_url = Url::parse(&format!("http://{store_ntx_builder_address}")) - .context("Failed to parse URL")?; - if should_start_ntx_builder { + let store_ntx_builder_url = Url::parse(&format!("http://{store_ntx_builder_address}")) + .context("Failed to parse URL")?; let validator_url = Url::parse(&format!("http://{validator_address}")) .context("Failed to parse URL")?; + let block_producer_url = Url::parse(&format!("http://{block_producer_address}")) + .context("Failed to parse URL")?; + + let builder_config = ntx_builder.into_builder_config( + store_ntx_builder_url, + block_producer_url, + validator_url, + ); + let id = join_set .spawn(async move { - let block_producer_url = - Url::parse(&format!("http://{block_producer_address}")) - .context("Failed to parse URL")?; - NetworkTransactionBuilder::new( - store_ntx_builder_url, - block_producer_url, - validator_url, - ntx_builder.tx_prover_url, - ntx_builder.script_cache_size, - ) - .run() - .await - .context("failed while serving ntx builder component") + builder_config + .build() + .await + .context("failed to initialize ntx builder")? + .run() + .await + .context("failed while serving ntx builder component") }) .id(); component_ids.insert(id, "ntx-builder"); diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 7e8fa7e69..5b1e8e52a 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -68,6 +68,9 @@ pub struct NtxBuilderConfig { )] pub ticker_interval: Duration, + /// Number of note scripts to cache locally. + /// + /// Note scripts not in cache must first be retrieved from the store. #[arg( long = "ntx-builder.script-cache-size", env = ENV_NTX_SCRIPT_CACHE_SIZE, @@ -77,6 +80,20 @@ pub struct NtxBuilderConfig { pub script_cache_size: NonZeroUsize, } +impl NtxBuilderConfig { + /// Converts this CLI config into the ntx-builder's internal config. + pub fn into_builder_config( + self, + store_url: Url, + block_producer_url: Url, + validator_url: Url, + ) -> miden_node_ntx_builder::NtxBuilderConfig { + miden_node_ntx_builder::NtxBuilderConfig::new(store_url, block_producer_url, validator_url) + .with_tx_prover_url(self.tx_prover_url) + .with_script_cache_size(self.script_cache_size) + } +} + /// Configuration for the Block Producer component #[derive(clap::Args)] pub struct BlockProducerConfig { @@ -103,11 +120,6 @@ pub struct BlockProducerConfig { #[arg(long = "batch-prover.url", env = ENV_BATCH_PROVER_URL, value_name = "URL")] pub batch_prover_url: Option, - /// The remote block prover's gRPC url. If unset, will default to running a prover - /// in-process which is expensive. - #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] - pub block_prover_url: Option, - /// The number of transactions per batch. #[arg( long = "max-txs-per-batch", diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index 9dd311368..bde1cf774 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -17,6 +17,7 @@ use super::{ }; use crate::commands::{ DEFAULT_TIMEOUT, + ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, ENV_VALIDATOR_INSECURE_SECRET_KEY, @@ -24,7 +25,7 @@ use crate::commands::{ duration_to_human_readable_string, }; -#[allow(clippy::large_enum_variant, reason = "single use enum")] +#[expect(clippy::large_enum_variant, reason = "single use enum")] #[derive(clap::Subcommand)] pub enum StoreCommand { /// Bootstraps the blockchain database with the genesis block. @@ -72,6 +73,10 @@ pub enum StoreCommand { #[arg(long = "block-producer.url", env = ENV_STORE_BLOCK_PRODUCER_URL, value_name = "URL")] block_producer_url: Url, + /// The remote block prover's gRPC url. If not provided, a local block prover will be used. + #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] + block_prover_url: Option, + /// Directory in which to store the database and raw block data. #[arg(long, env = ENV_DATA_DIRECTORY, value_name = "DIR")] data_directory: PathBuf, @@ -115,6 +120,7 @@ impl StoreCommand { rpc_url, ntx_builder_url, block_producer_url, + block_prover_url, data_directory, enable_otel: _, grpc_timeout, @@ -123,6 +129,7 @@ impl StoreCommand { rpc_url, ntx_builder_url, block_producer_url, + block_prover_url, data_directory, grpc_timeout, ) @@ -143,6 +150,7 @@ impl StoreCommand { rpc_url: Url, ntx_builder_url: Url, block_producer_url: Url, + block_prover_url: Option, data_directory: PathBuf, grpc_timeout: Duration, ) -> anyhow::Result<()> { @@ -169,6 +177,7 @@ impl StoreCommand { Store { rpc_listener, + block_prover_url, ntx_builder_listener, block_producer_listener, data_directory, diff --git a/bin/remote-prover/Cargo.toml b/bin/remote-prover/Cargo.toml index 85bc355f7..adb60f7a8 100644 --- a/bin/remote-prover/Cargo.toml +++ b/bin/remote-prover/Cargo.toml @@ -66,6 +66,7 @@ miden-testing = { workspace = true } miden-tx = { features = ["testing"], workspace = true } [build-dependencies] -miden-node-proto-build = { features = ["internal"], workspace = true } -miette = { features = ["fancy"], version = "7.5" } -tonic-prost-build = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } +miette = { features = ["fancy"], version = "7.5" } +tonic-prost-build = { workspace = true } diff --git a/bin/remote-prover/build.rs b/bin/remote-prover/build.rs index f9b2eaafb..262ab49af 100644 --- a/bin/remote-prover/build.rs +++ b/bin/remote-prover/build.rs @@ -12,7 +12,8 @@ const GENERATED_OUT_DIR: &str = "src/generated"; /// Generates Rust protobuf bindings. fn main() -> miette::Result<()> { - println!("cargo::rerun-if-env-changed=BUILD_PROTO"); + miden_node_rocksdb_cxx_linkage_fix::configure(); + println!("cargo:rerun-if-env-changed=BUILD_PROTO"); if !BUILD_GENERATED_FILES_IN_SRC { return Ok(()); } diff --git a/bin/remote-prover/src/api/prover.rs b/bin/remote-prover/src/api/prover.rs index 24a70f731..9af8f8eb3 100644 --- a/bin/remote-prover/src/api/prover.rs +++ b/bin/remote-prover/src/api/prover.rs @@ -88,7 +88,6 @@ impl ProverRpcApi { Self { prover } } - #[allow(clippy::result_large_err)] #[instrument( target = COMPONENT, name = "remote_prover.prove_tx", @@ -123,7 +122,6 @@ impl ProverRpcApi { Ok(Response::new(proto::remote_prover::Proof { payload: proof.to_bytes() })) } - #[allow(clippy::result_large_err)] #[instrument( target = COMPONENT, name = "remote_prover.prove_batch", @@ -154,7 +152,6 @@ impl ProverRpcApi { Ok(Response::new(proto::remote_prover::Proof { payload: proven_batch.to_bytes() })) } - #[allow(clippy::result_large_err)] #[instrument( target = COMPONENT, name = "remote_prover.prove_block", @@ -180,7 +177,7 @@ impl ProverRpcApi { let block_proof = prover .try_lock() .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))? - .prove(tx_batches, block_header, block_inputs) + .prove(tx_batches, &block_header, block_inputs) .map_err(internal_error)?; Ok(Response::new(proto::remote_prover::Proof { payload: block_proof.to_bytes() })) diff --git a/bin/remote-prover/src/generated/mod.rs b/bin/remote-prover/src/generated/mod.rs index eb7d89309..830c3a508 100644 --- a/bin/remote-prover/src/generated/mod.rs +++ b/bin/remote-prover/src/generated/mod.rs @@ -1,4 +1,5 @@ #![allow(clippy::pedantic, reason = "generated by build.rs and tonic")] +#![allow(clippy::allow_attributes, reason = "generated by build.rs and tonic")] #[rustfmt::skip] pub mod remote_prover; diff --git a/bin/remote-prover/src/proxy/mod.rs b/bin/remote-prover/src/proxy/mod.rs index 81290d73a..e543022ac 100644 --- a/bin/remote-prover/src/proxy/mod.rs +++ b/bin/remote-prover/src/proxy/mod.rs @@ -252,14 +252,12 @@ pub struct RequestQueue { impl RequestQueue { /// Create a new empty request queue - #[allow(clippy::new_without_default)] pub fn new() -> Self { QUEUE_SIZE.set(0); Self { queue: RwLock::new(VecDeque::new()) } } /// Get the length of the queue - #[allow(clippy::len_without_is_empty)] pub async fn len(&self) -> usize { self.queue.read().await.len() } diff --git a/bin/remote-prover/src/proxy/worker.rs b/bin/remote-prover/src/proxy/worker.rs index aa418e8cb..ffa8f708e 100644 --- a/bin/remote-prover/src/proxy/worker.rs +++ b/bin/remote-prover/src/proxy/worker.rs @@ -159,7 +159,6 @@ impl Worker { /// Returns `Ok(())` if the worker is healthy and compatible, or `Err(reason)` if there's an /// issue. The caller should use `update_status` to apply the result to the worker's health /// status. - #[allow(clippy::too_many_lines)] #[tracing::instrument(target = COMPONENT, name = "worker.check_status")] pub async fn check_status(&mut self, supported_proof_type: ProofType) -> Result<(), String> { if !self.should_do_health_check() { diff --git a/bin/stress-test/Cargo.toml b/bin/stress-test/Cargo.toml index b9df84d41..9c3fe9387 100644 --- a/bin/stress-test/Cargo.toml +++ b/bin/stress-test/Cargo.toml @@ -21,7 +21,6 @@ clap = { features = ["derive"], version = "4.5" } fs-err = { workspace = true } futures = { workspace = true } miden-air = { features = ["testing"], workspace = true } -miden-block-prover = { features = ["testing"], workspace = true } miden-node-block-producer = { workspace = true } miden-node-proto = { workspace = true } miden-node-store = { workspace = true } @@ -33,3 +32,6 @@ rayon = { version = "1.10" } tokio = { workspace = true } tonic = { default-features = true, workspace = true } url = { workspace = true } + +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } diff --git a/bin/stress-test/build.rs b/bin/stress-test/build.rs new file mode 100644 index 000000000..ed4038d06 --- /dev/null +++ b/bin/stress-test/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/bin/stress-test/src/seeding/metrics.rs b/bin/stress-test/src/seeding/metrics.rs index cdf32965a..56e89e4a9 100644 --- a/bin/stress-test/src/seeding/metrics.rs +++ b/bin/stress-test/src/seeding/metrics.rs @@ -76,7 +76,7 @@ impl SeedingMetrics { } /// Prints the block metrics table. - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] fn print_block_metrics(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "\nBlock metrics:")?; writeln!(f, "Note: Each block contains 256 transactions (16 batches * 16 transactions).")?; @@ -189,7 +189,7 @@ impl SeedingMetrics { } impl Display for SeedingMetrics { - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!( f, diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index e0fe79338..3b80481bb 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -5,7 +5,6 @@ use std::time::{Duration, Instant}; use metrics::SeedingMetrics; use miden_air::ExecutionProof; -use miden_block_prover::LocalBlockProver; use miden_node_block_producer::store::StoreClient; use miden_node_proto::domain::batch::BatchInputs; use miden_node_proto::generated::store::rpc_client::RpcClient; @@ -30,6 +29,7 @@ use miden_protocol::block::{ FeeParameters, ProposedBlock, ProvenBlock, + SignedBlock, }; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey as EcdsaSecretKey; use miden_protocol::crypto::dsa::falcon512_rpo::{PublicKey, SecretKey}; @@ -145,7 +145,7 @@ async fn generate_blocks( let mut consume_notes_txs = vec![]; let consumes_per_block = TRANSACTIONS_PER_BATCH * BATCHES_PER_BLOCK - 1; - #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] + #[expect(clippy::cast_sign_loss, clippy::cast_precision_loss)] let num_public_accounts = (consumes_per_block as f64 * (f64::from(public_accounts_percentage) / 100.0)) .round() as usize; @@ -161,7 +161,7 @@ async fn generate_blocks( SecretKey::with_rng(&mut *rng) }; - let mut prev_block = genesis_block.clone(); + let mut prev_block_header = genesis_block.header().clone(); let mut current_anchor_header = genesis_block.header().clone(); for i in 0..total_blocks { @@ -193,7 +193,7 @@ async fn generate_blocks( note_nullifiers.extend(notes.iter().map(|n| n.nullifier().prefix())); // create the tx that creates the notes - let emit_note_tx = create_emit_note_tx(prev_block.header(), &mut faucet, notes.clone()); + let emit_note_tx = create_emit_note_tx(&prev_block_header, &mut faucet, notes.clone()); // collect all the txs block_txs.push(emit_note_tx); @@ -202,27 +202,23 @@ async fn generate_blocks( // create the batches with [TRANSACTIONS_PER_BATCH] txs each let batches: Vec = block_txs .par_chunks(TRANSACTIONS_PER_BATCH) - .map(|txs| create_batch(txs, prev_block.header())) + .map(|txs| create_batch(txs, &prev_block_header)) .collect(); // create the block and send it to the store let block_inputs = get_block_inputs(store_client, &batches, &mut metrics).await; // update blocks - prev_block = apply_block(batches, block_inputs, store_client, &mut metrics).await; - if current_anchor_header.block_epoch() != prev_block.header().block_epoch() { - current_anchor_header = prev_block.header().clone(); + prev_block_header = apply_block(batches, block_inputs, store_client, &mut metrics).await; + if current_anchor_header.block_epoch() != prev_block_header.block_epoch() { + current_anchor_header = prev_block_header.clone(); } // create the consume notes txs to be used in the next block let batch_inputs = - get_batch_inputs(store_client, prev_block.header(), ¬es, &mut metrics).await; - consume_notes_txs = create_consume_note_txs( - prev_block.header(), - accounts, - notes, - &batch_inputs.note_proofs, - ); + get_batch_inputs(store_client, &prev_block_header, ¬es, &mut metrics).await; + consume_notes_txs = + create_consume_note_txs(&prev_block_header, accounts, notes, &batch_inputs.note_proofs); // track store size every 50 blocks if i % 50 == 0 { @@ -248,21 +244,21 @@ async fn apply_block( block_inputs: BlockInputs, store_client: &StoreClient, metrics: &mut SeedingMetrics, -) -> ProvenBlock { - let proposed_block = ProposedBlock::new(block_inputs.clone(), batches).unwrap(); +) -> BlockHeader { + let proposed_block = ProposedBlock::new(block_inputs, batches).unwrap(); let (header, body) = proposed_block.clone().into_header_and_body().unwrap(); - let block_proof = LocalBlockProver::new(0) - .prove_dummy(proposed_block.batches().clone(), header.clone(), block_inputs) - .unwrap(); + let block_size: usize = header.to_bytes().len() + body.to_bytes().len(); let signature = EcdsaSecretKey::new().sign(header.commitment()); - let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); - let block_size: usize = proven_block.to_bytes().len(); + // SAFETY: The header, body, and signature are known to correspond to each other. + let signed_block = SignedBlock::new_unchecked(header, body, signature); + let ordered_batches = proposed_block.batches().clone(); let start = Instant::now(); - store_client.apply_block(&proven_block).await.unwrap(); + store_client.apply_block(&ordered_batches, &signed_block).await.unwrap(); metrics.track_block_insertion(start.elapsed(), block_size); - proven_block + let (header, ..) = signed_block.into_parts(); + header } // HELPER FUNCTIONS @@ -366,7 +362,7 @@ fn create_batch(txs: &[ProvenTransaction], block_ref: &BlockHeader) -> ProvenBat account_updates, InputNotes::new(input_notes).unwrap(), output_notes, - BlockNumber::from(u32::MAX), + BlockNumber::MAX, OrderedTransactionHeaders::new_unchecked(txs.iter().map(TransactionHeader::from).collect()), ) .unwrap() @@ -522,6 +518,8 @@ async fn get_block_inputs( /// Runs the store with the given data directory. Returns a tuple with: /// - a gRPC client to access the store /// - the URL of the store +/// +/// The store uses a local prover. pub async fn start_store( data_directory: PathBuf, ) -> (RpcClient>, Url) { @@ -543,6 +541,7 @@ pub async fn start_store( task::spawn(async move { Store { rpc_listener, + block_prover_url: None, ntx_builder_listener, block_producer_listener, data_directory: dir, diff --git a/bin/stress-test/src/store/metrics.rs b/bin/stress-test/src/store/metrics.rs index 95f8ce0ff..b56f36264 100644 --- a/bin/stress-test/src/store/metrics.rs +++ b/bin/stress-test/src/store/metrics.rs @@ -18,7 +18,7 @@ pub fn print_summary(timers_accumulator: &[Duration]) { } /// Computes a percentile from a list of durations. -#[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] +#[expect(clippy::cast_sign_loss, clippy::cast_precision_loss)] fn compute_percentile(times: &[Duration], percentile: f64) -> Duration { if times.is_empty() { return Duration::ZERO; diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index fa39303ae..7e83b0ae5 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -76,7 +76,7 @@ pub async fn bench_sync_state(data_directory: PathBuf, iterations: usize, concur print_summary(&timers_accumulator); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let average_notes_per_response = responses.iter().map(|r| r.notes.len()).sum::() as f64 / responses.len() as f64; println!("Average notes per response: {average_notes_per_response}"); @@ -270,7 +270,7 @@ pub async fn bench_sync_nullifiers( print_summary(&timers_accumulator); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let average_nullifiers_per_response = responses.iter().map(|r| r.nullifiers.len()).sum::() as f64 / responses.len() as f64; println!("Average nullifiers per response: {average_nullifiers_per_response}"); @@ -364,7 +364,7 @@ pub async fn bench_sync_transactions( print_summary(&timers_accumulator); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let average_transactions_per_response = if responses.is_empty() { 0.0 } else { @@ -376,13 +376,13 @@ pub async fn bench_sync_transactions( // Calculate pagination statistics let total_runs = results.len(); let paginated_runs = results.iter().filter(|r| r.pages > 1).count(); - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let pagination_rate = if total_runs > 0 { (paginated_runs as f64 / total_runs as f64) * 100.0 } else { 0.0 }; - #[allow(clippy::cast_precision_loss)] + #[expect(clippy::cast_precision_loss)] let avg_pages = if total_runs > 0 { results.iter().map(|r| r.pages as f64).sum::() / total_runs as f64 } else { diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index e5e5511ad..023a7a448 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -22,14 +22,12 @@ tracing-forest = ["miden-node-utils/tracing-forest"] anyhow = { workspace = true } futures = { workspace = true } itertools = { workspace = true } -miden-block-prover = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { features = ["testing"], workspace = true } miden-protocol = { default-features = true, workspace = true } miden-remote-prover-client = { features = ["batch-prover", "block-prover"], workspace = true } miden-standards = { workspace = true } -miden-tx = { default-features = true, workspace = true } miden-tx-batch-prover = { workspace = true } rand = { version = "0.9" } thiserror = { workspace = true } @@ -46,6 +44,7 @@ assert_matches = { workspace = true } miden-node-store = { workspace = true } miden-node-test-macro = { workspace = true } miden-node-utils = { features = ["testing"], workspace = true } +miden-node-validator = { workspace = true } miden-protocol = { default-features = true, features = ["testing"], workspace = true } miden-standards = { features = ["testing"], workspace = true } miden-tx = { features = ["testing"], workspace = true } diff --git a/crates/block-producer/src/batch_builder/mod.rs b/crates/block-producer/src/batch_builder/mod.rs index e3cc714c2..34dab83a3 100644 --- a/crates/block-producer/src/batch_builder/mod.rs +++ b/crates/block-producer/src/batch_builder/mod.rs @@ -9,7 +9,7 @@ use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::batch::{BatchId, ProposedBatch, ProvenBatch}; -use miden_remote_prover_client::remote_prover::batch_prover::RemoteBatchProver; +use miden_remote_prover_client::RemoteBatchProver; use miden_tx_batch_prover::LocalBatchProver; use rand::Rng; use tokio::task::JoinSet; diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index a3a36ec4f..56b5a3666 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -1,29 +1,15 @@ -use std::ops::{Deref, Range}; +use std::ops::Deref; use std::sync::Arc; use anyhow::Context; use futures::FutureExt; -use miden_block_prover::LocalBlockProver; use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::batch::{OrderedBatches, ProvenBatch}; -use miden_protocol::block::{ - BlockBody, - BlockHeader, - BlockInputs, - BlockNumber, - BlockProof, - ProposedBlock, - ProvenBlock, -}; -use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_protocol::block::{BlockInputs, BlockNumber, ProposedBlock, ProvenBlock, SignedBlock}; use miden_protocol::note::NoteHeader; -use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionHeader}; -use miden_remote_prover_client::remote_prover::block_prover::RemoteBlockProver; -use rand::Rng; +use miden_protocol::transaction::TransactionHeader; use tokio::time::Duration; -use tracing::{Span, info, instrument}; -use url::Url; +use tracing::{Span, instrument}; use crate::errors::BuildBlockError; use crate::mempool::SharedMempool; @@ -35,21 +21,19 @@ use crate::{COMPONENT, TelemetryInjectorExt}; // ================================================================================================= pub struct BlockBuilder { + /// The frequency at which blocks are produced. pub block_interval: Duration, - /// Used to simulate block proving by sleeping for a random duration selected from this range. - pub simulated_proof_time: Range, /// Simulated block failure rate as a percentage. /// /// Note: this _must_ be sign positive and less than 1.0. pub failure_rate: f64, + /// The store RPC client for committing blocks. pub store: StoreClient, + /// The validator RPC client for validating blocks. pub validator: BlockProducerValidatorClient, - - /// The prover used to prove a proposed block into a proven block. - pub block_prover: BlockProver, } impl BlockBuilder { @@ -59,20 +43,12 @@ impl BlockBuilder { pub fn new( store: StoreClient, validator: BlockProducerValidatorClient, - block_prover_url: Option, block_interval: Duration, ) -> Self { - let block_prover = match block_prover_url { - Some(url) => BlockProver::new_remote(url), - None => BlockProver::new_local(MIN_PROOF_SECURITY_LEVEL), - }; - Self { block_interval, // Note: The range cannot be empty. - simulated_proof_time: Duration::ZERO..Duration::from_millis(1), failure_rate: 0.0, - block_prover, store, validator, } @@ -136,16 +112,11 @@ impl BlockBuilder { self.get_block_inputs(selected) .inspect_ok(BlockBatchesAndInputs::inject_telemetry) .and_then(|inputs| self.propose_block(inputs)) - .inspect_ok(|(proposed_block, _)| { + .inspect_ok(|proposed_block| { ProposedBlock::inject_telemetry(proposed_block); }) - .and_then(|(proposed_block, inputs)| self.validate_block(proposed_block, inputs)) - .and_then(|(proposed_block, inputs, header, signature, body)| self.prove_block(proposed_block, inputs, header, signature, body)) - .inspect_ok(ProvenBlock::inject_telemetry) - // Failure must be injected before the final pipeline stage i.e. before commit is called. The system cannot - // handle errors after it considers the process complete (which makes sense). - .and_then(|proven_block| async { self.inject_failure(proven_block) }) - .and_then(|proven_block| self.commit_block(mempool, proven_block)) + .and_then(|proposed_block| self.build_and_validate_block(proposed_block)) + .and_then(|(ordered_batches, signed_block)| self.commit_block(mempool, ordered_batches, signed_block)) // Handle errors by propagating the error to the root span and rolling back the block. .inspect_err(|err| Span::current().set_error(err)) .or_else(|err| async { @@ -239,23 +210,21 @@ impl BlockBuilder { async fn propose_block( &self, batches_inputs: BlockBatchesAndInputs, - ) -> Result<(ProposedBlock, BlockInputs), BuildBlockError> { + ) -> Result { let BlockBatchesAndInputs { batches, inputs } = batches_inputs; let batches = batches.into_iter().map(Arc::unwrap_or_clone).collect(); - let proposed_block = ProposedBlock::new(inputs.clone(), batches) - .map_err(BuildBlockError::ProposeBlockFailed)?; + let proposed_block = + ProposedBlock::new(inputs, batches).map_err(BuildBlockError::ProposeBlockFailed)?; - Ok((proposed_block, inputs)) + Ok(proposed_block) } #[instrument(target = COMPONENT, name = "block_builder.validate_block", skip_all, err)] - async fn validate_block( + async fn build_and_validate_block( &self, proposed_block: ProposedBlock, - block_inputs: BlockInputs, - ) -> Result<(OrderedBatches, BlockInputs, BlockHeader, Signature, BlockBody), BuildBlockError> - { + ) -> Result<(OrderedBatches, SignedBlock), BuildBlockError> { // Concurrently build the block and validate it via the validator. let build_result = tokio::task::spawn_blocking({ let proposed_block = proposed_block.clone(); @@ -278,53 +247,27 @@ impl BlockBuilder { } let (ordered_batches, ..) = proposed_block.into_parts(); - Ok((ordered_batches, block_inputs, header, signature, body)) - } - - #[instrument(target = COMPONENT, name = "block_builder.prove_block", skip_all, err)] - async fn prove_block( - &self, - ordered_batches: OrderedBatches, - block_inputs: BlockInputs, - header: BlockHeader, - signature: Signature, - body: BlockBody, - ) -> Result { - // Prove block using header and body from validator. - let block_proof = self - .block_prover - .prove(ordered_batches.clone(), header.clone(), block_inputs) - .await?; - self.simulate_proving().await; - - // SAFETY: The header and body are assumed valid and consistent with the proof. - let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); - if proven_block.proof_security_level() < MIN_PROOF_SECURITY_LEVEL { - return Err(BuildBlockError::SecurityLevelTooLow( - proven_block.proof_security_level(), - MIN_PROOF_SECURITY_LEVEL, - )); - } - // TODO(sergerad): Consider removing this validation. Once block proving is implemented, - // this would be replaced with verifying the proof returned from the prover against - // the block header. - validate_tx_headers(&proven_block, &ordered_batches.to_transactions())?; - - Ok(proven_block) + // SAFETY: The header, body, and signature are known to correspond to each other because the + // header and body are derived from the proposed block and the signature is verified + // against the corresponding commitment. + let signed_block = SignedBlock::new_unchecked(header, body, signature); + Ok((ordered_batches, signed_block)) } #[instrument(target = COMPONENT, name = "block_builder.commit_block", skip_all, err)] async fn commit_block( &self, mempool: &SharedMempool, - built_block: ProvenBlock, + ordered_batches: OrderedBatches, + signed_block: SignedBlock, ) -> Result<(), BuildBlockError> { self.store - .apply_block(&built_block) + .apply_block(&ordered_batches, &signed_block) .await .map_err(BuildBlockError::StoreApplyBlockFailed)?; - mempool.lock().await.commit_block(built_block.header().clone()); + let (header, ..) = signed_block.into_parts(); + mempool.lock().await.commit_block(header); Ok(()) } @@ -333,31 +276,6 @@ impl BlockBuilder { async fn rollback_block(&self, mempool: &SharedMempool, block: BlockNumber) { mempool.lock().await.rollback_block(block); } - - #[instrument(target = COMPONENT, name = "block_builder.simulate_proving", skip_all)] - async fn simulate_proving(&self) { - let proving_duration = rand::rng().random_range(self.simulated_proof_time.clone()); - - Span::current().set_attribute("range.min_s", self.simulated_proof_time.start); - Span::current().set_attribute("range.max_s", self.simulated_proof_time.end); - Span::current().set_attribute("dice_roll_s", proving_duration); - - tokio::time::sleep(proving_duration).await; - } - - #[instrument(target = COMPONENT, name = "block_builder.inject_failure", skip_all, err)] - fn inject_failure(&self, value: T) -> Result { - let roll = rand::rng().random::(); - - Span::current().set_attribute("failure_rate", self.failure_rate); - Span::current().set_attribute("dice_roll", roll); - - if roll < self.failure_rate { - Err(BuildBlockError::InjectedFailure) - } else { - Ok(value) - } - } } /// A wrapper around batches selected for inlucion in a block, primarily used to be able to inject @@ -454,76 +372,3 @@ impl TelemetryInjectorExt for ProvenBlock { span.set_attribute("block.commitments.transaction", header.tx_commitment()); } } - -// BLOCK PROVER -// ================================================================================================ - -pub enum BlockProver { - Local(LocalBlockProver), - Remote(RemoteBlockProver), -} - -impl BlockProver { - pub fn new_local(security_level: u32) -> Self { - info!(target: COMPONENT, "Using local block prover"); - Self::Local(LocalBlockProver::new(security_level)) - } - - pub fn new_remote(endpoint: impl Into) -> Self { - info!(target: COMPONENT, "Using remote block prover"); - Self::Remote(RemoteBlockProver::new(endpoint)) - } - - #[instrument(target = COMPONENT, skip_all, err)] - async fn prove( - &self, - tx_batches: OrderedBatches, - block_header: BlockHeader, - block_inputs: BlockInputs, - ) -> Result { - match self { - Self::Local(prover) => prover - .prove(tx_batches, block_header, block_inputs) - .map_err(BuildBlockError::ProveBlockFailed), - Self::Remote(prover) => prover - .prove(tx_batches, block_header, block_inputs) - .await - .map_err(BuildBlockError::RemoteProverClientError), - } - } -} - -/// Validates that the proven block's transaction headers are consistent with the transactions -/// passed in the proposed block. -/// -/// This expects that transactions from the proposed block and proven block are in the same -/// order, as defined by [`OrderedTransactionHeaders`]. -fn validate_tx_headers( - proven_block: &ProvenBlock, - proposed_txs: &OrderedTransactionHeaders, -) -> Result<(), BuildBlockError> { - if proposed_txs.as_slice().len() != proven_block.body().transactions().as_slice().len() { - return Err(BuildBlockError::other(format!( - "remote prover returned {} transaction headers but {} transactions were passed as part of the proposed block", - proven_block.body().transactions().as_slice().len(), - proposed_txs.as_slice().len() - ))); - } - - // Because we checked the length matches we can zip the iterators up. - // We expect the transaction headers to be in the same order. - for (proposed_header, proven_header) in proposed_txs - .as_slice() - .iter() - .zip(proven_block.body().transactions().as_slice()) - { - if proposed_header != proven_header { - return Err(BuildBlockError::other(format!( - "transaction header with id {} does not match header of the transaction in the proposed block", - proposed_header.id() - ))); - } - } - - Ok(()) -} diff --git a/crates/block-producer/src/domain/transaction.rs b/crates/block-producer/src/domain/transaction.rs index 5b2ab30b3..f581ca95e 100644 --- a/crates/block-producer/src/domain/transaction.rs +++ b/crates/block-producer/src/domain/transaction.rs @@ -1,5 +1,3 @@ -#![allow(dead_code, reason = "WIP: mempoool refactor")] - use std::collections::HashSet; use std::sync::Arc; @@ -127,10 +125,6 @@ impl AuthenticatedTransaction { Arc::clone(&self.inner) } - pub fn raw_proven_transaction(&self) -> &ProvenTransaction { - &self.inner - } - pub fn expires_at(&self) -> BlockNumber { self.inner.expiration_block_num() } @@ -177,4 +171,8 @@ impl AuthenticatedTransaction { self.store_account_state = None; self } + + pub fn raw_proven_transaction(&self) -> &ProvenTransaction { + &self.inner + } } diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 40c74c99f..b610b0534 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -1,6 +1,5 @@ use core::error::Error as CoreError; -use miden_block_prover::BlockProverError; use miden_node_proto::errors::{ConversionError, GrpcError}; use miden_protocol::Word; use miden_protocol::account::AccountId; @@ -223,16 +222,10 @@ pub enum BuildBlockError { ValidateBlockFailed(#[source] Box), #[error("block signature is invalid")] InvalidSignature, - #[error("failed to prove block")] - ProveBlockFailed(#[source] BlockProverError), + /// We sometimes randomly inject errors into the batch building process to test our failure /// responses. - #[error("nothing actually went wrong, failure was injected on purpose")] - InjectedFailure, - #[error("failed to prove block with remote prover")] - RemoteProverClientError(#[source] RemoteProverClientError), - #[error("block proof security level is too low: {0} < {1}")] - SecurityLevelTooLow(u32, u32), + /// Custom error variant for errors not covered by the other variants. #[error("{error_msg}")] Other { diff --git a/crates/block-producer/src/lib.rs b/crates/block-producer/src/lib.rs index 36ab9b53d..955aa2356 100644 --- a/crates/block-producer/src/lib.rs +++ b/crates/block-producer/src/lib.rs @@ -60,7 +60,7 @@ pub const DEFAULT_BATCH_INTERVAL: Duration = Duration::from_secs(1); /// /// The value is selected such that all transactions should approximately be processed within one /// minutes with a block time of 5s. -#[allow(clippy::cast_sign_loss, reason = "Both durations are positive")] +#[expect(clippy::cast_sign_loss, reason = "Both durations are positive")] pub const DEFAULT_MEMPOOL_TX_CAPACITY: NonZeroUsize = NonZeroUsize::new( DEFAULT_MAX_BATCHES_PER_BLOCK * DEFAULT_MAX_TXS_PER_BATCH diff --git a/crates/block-producer/src/mempool/nodes.rs b/crates/block-producer/src/mempool/nodes.rs index 461a836c2..c41e305fa 100644 --- a/crates/block-producer/src/mempool/nodes.rs +++ b/crates/block-producer/src/mempool/nodes.rs @@ -416,7 +416,7 @@ mod tests { BTreeMap::from([(account_update.account_id(), account_update)]), InputNotes::default(), Vec::default(), - BlockNumber::from(u32::MAX), + BlockNumber::MAX, OrderedTransactionHeaders::new_unchecked(vec![tx_header]), ) .unwrap(); diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 8245c1ee6..d7ea49db0 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -40,6 +40,9 @@ use crate::store::StoreClient; use crate::validator::BlockProducerValidatorClient; use crate::{CACHED_MEMPOOL_STATS_UPDATE_INTERVAL, COMPONENT, SERVER_NUM_BATCH_BUILDERS}; +#[cfg(test)] +mod tests; + /// The block producer server. /// /// Specifies how to connect to the store, batch prover, and block prover components. @@ -55,8 +58,6 @@ pub struct BlockProducer { pub validator_url: Url, /// The address of the batch prover component. pub batch_prover_url: Option, - /// The address of the block prover component. - pub block_prover_url: Option, /// The interval at which to produce batches. pub batch_interval: Duration, /// The interval at which to produce blocks. @@ -82,7 +83,6 @@ impl BlockProducer { /// /// Executes in place (i.e. not spawned) and will run indefinitely until a fatal error is /// encountered. - #[allow(clippy::too_many_lines)] pub async fn serve(self) -> anyhow::Result<()> { info!(target: COMPONENT, endpoint=?self.block_producer_address, store=%self.store_url, "Initializing server"); let store = StoreClient::new(self.store_url.clone()); @@ -123,8 +123,7 @@ impl BlockProducer { info!(target: COMPONENT, "Server initialized"); - let block_builder = - BlockBuilder::new(store.clone(), validator, self.block_prover_url, self.block_interval); + let block_builder = BlockBuilder::new(store.clone(), validator, self.block_interval); let batch_builder = BatchBuilder::new( store.clone(), SERVER_NUM_BATCH_BUILDERS, diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index 453512597..c404a2ae9 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -1,27 +1,25 @@ +use std::num::NonZeroUsize; use std::time::Duration; -use miden_air::{ExecutionProof, HashFunction}; -use miden_node_proto::generated::{ - self as proto, block_producer::api_client as block_producer_client, -}; +use miden_node_proto::generated::block_producer::api_client as block_producer_client; use miden_node_store::{GenesisState, Store}; -use miden_protocol::{ - Digest, - account::{AccountId, AccountIdVersion, AccountStorageMode, AccountType}, - transaction::ProvenTransactionBuilder, -}; -use miden_tx::utils::Serializable; -use tokio::{net::TcpListener, runtime, task, time::sleep}; +use miden_node_utils::fee::test_fee_params; +use miden_node_validator::Validator; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::testing::random_signer::RandomBlockSigner as _; +use tokio::net::TcpListener; +use tokio::time::sleep; +use tokio::{runtime, task}; use tonic::transport::{Channel, Endpoint}; -use winterfell::Proof; +use url::Url; -use crate::{BlockProducer, SERVER_MAX_BATCHES_PER_BLOCK, SERVER_MAX_TXS_PER_BATCH}; +use crate::{BlockProducer, DEFAULT_MAX_BATCHES_PER_BLOCK, DEFAULT_MAX_TXS_PER_BATCH}; +/// Tests that the block producer starts up correctly even when the store is not initially +/// available. The block producer should retry with exponential backoff until the store becomes +/// available, then start serving requests. #[tokio::test] async fn block_producer_startup_is_robust_to_network_failures() { - // This test starts the block producer and tests that it starts serving only after the store - // is started. - // get the addresses for the store and block producer let store_addr = { let store_listener = @@ -36,113 +34,103 @@ async fn block_producer_startup_is_robust_to_network_failures() { .expect("Failed to get block-producer address") }; - let ntx_builder_addr = { - let ntx_builder_address = TcpListener::bind("127.0.0.1:0") - .await - .expect("failed to bind the ntx builder address"); - ntx_builder_address.local_addr().expect("failed to get ntx builder address") + let validator_addr = { + let validator_listener = + TcpListener::bind("127.0.0.1:0").await.expect("failed to bind validator"); + validator_listener.local_addr().expect("failed to get validator address") }; - // start the block producer + let grpc_timeout = Duration::from_secs(30); + + // start the validator + task::spawn(async move { + Validator { + address: validator_addr, + grpc_timeout, + signer: SecretKey::random(), + } + .serve() + .await + .unwrap(); + }); + + // start the block producer BEFORE the store is available + // this tests the exponential backoff behavior + let store_url = Url::parse(&format!("http://{store_addr}")).expect("Failed to parse store URL"); + let validator_url = + Url::parse(&format!("http://{validator_addr}")).expect("Failed to parse validator URL"); task::spawn(async move { BlockProducer { block_producer_address: block_producer_addr, - store_address: store_addr, - ntx_builder_address: Some(ntx_builder_addr), + store_url, + validator_url, batch_prover_url: None, - block_prover_url: None, batch_interval: Duration::from_millis(500), block_interval: Duration::from_millis(500), - max_txs_per_batch: SERVER_MAX_TXS_PER_BATCH, - max_batches_per_block: SERVER_MAX_BATCHES_PER_BLOCK, + max_txs_per_batch: DEFAULT_MAX_TXS_PER_BATCH, + max_batches_per_block: DEFAULT_MAX_BATCHES_PER_BLOCK, + grpc_timeout, + mempool_tx_capacity: NonZeroUsize::new(100).unwrap(), } .serve() .await .unwrap(); }); - // test: connecting to the block producer should fail until the store is started + // test: connecting to the block producer should fail because the store is not yet started + // (and therefore the block producer is not yet listening) let block_producer_endpoint = Endpoint::try_from(format!("http://{block_producer_addr}")).expect("valid url"); let block_producer_client = block_producer_client::ApiClient::connect(block_producer_endpoint.clone()).await; - assert!(block_producer_client.is_err()); + assert!( + block_producer_client.is_err(), + "Block producer should not be available before store is started" + ); // start the store let data_directory = tempfile::tempdir().expect("tempdir should be created"); - let store_runtime = { - let genesis_state = GenesisState::new(vec![], 1, 1); - Store::bootstrap(genesis_state.clone(), data_directory.path()) - .expect("store should bootstrap"); - let dir = data_directory.path().to_path_buf(); - let rpc_listener = - TcpListener::bind("127.0.0.1:0").await.expect("store should bind the RPC port"); - let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") - .await - .expect("Failed to bind store ntx-builder gRPC endpoint"); - let block_producer_listener = TcpListener::bind(store_addr) - .await - .expect("store should bind the block-producer port"); - // in order to later kill the store, we need to spawn a new runtime and run the store on - // it. That allows us to kill all the tasks spawned by the store when we - // kill the runtime. - let store_runtime = - runtime::Builder::new_multi_thread().enable_time().enable_io().build().unwrap(); - store_runtime.spawn(async move { - Store { - rpc_listener, - ntx_builder_listener, - block_producer_listener, - data_directory: dir, - grpc_timeout: std::time::Duration::from_secs(30), + let store_runtime = start_store(store_addr, data_directory.path()).await; + + // wait for the block producer's exponential backoff to connect to the store + // use a retry loop since CI environments may be slower + let block_producer_client = { + let mut attempts = 0; + loop { + attempts += 1; + match block_producer_client::ApiClient::connect(block_producer_endpoint.clone()).await { + Ok(client) => break client, + Err(_) if attempts < 30 => { + sleep(Duration::from_millis(200)).await; + }, + Err(e) => panic!( + "block producer client should connect after store is started (after {attempts} attempts): {e}" + ), } - .serve() - .await - .expect("store should start serving"); - }); - store_runtime + } }; - // we need to wait for the exponential backoff of the block producer to connect to the store - sleep(Duration::from_secs(1)).await; + // test: status request against block-producer should succeed + let response = send_status_request(block_producer_client).await; + assert!(response.is_ok(), "Status request should succeed, got: {:?}", response.err()); - let block_producer_client = block_producer_client::ApiClient::connect(block_producer_endpoint) - .await - .expect("block producer client should connect"); + // verify the response contains expected data + let status = response.unwrap().into_inner(); + assert_eq!(status.status, "connected"); - // test: request against block-producer api should succeed - let response = send_request(block_producer_client.clone(), 0).await; - assert!(response.is_ok()); - - // kill the store - shutdown_store(store_runtime).await; - - // test: request against block-producer api should fail immediately - let response = send_request(block_producer_client.clone(), 1).await; - assert!(response.is_err()); - - // test: restart the store and request should succeed - let store_runtime = restart_store(store_addr, data_directory.path()).await; - let response = send_request(block_producer_client.clone(), 2).await; - assert!(response.is_ok()); - - // Shutdown the store before data_directory is dropped to allow RocksDB to flush properly + // Shutdown the store before data_directory is dropped to allow the database to flush properly shutdown_store(store_runtime).await; } -/// Shuts down the store runtime properly to allow RocksDB to flush before the temp directory is -/// deleted. -async fn shutdown_store(store_runtime: runtime::Runtime) { - task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) - .await - .expect("shutdown should complete"); -} - -/// Restarts a store using an existing data directory. Returns the runtime handle for shutdown. -async fn restart_store( +/// Starts the store with a fresh genesis state and returns the runtime handle. +async fn start_store( store_addr: std::net::SocketAddr, data_directory: &std::path::Path, ) -> runtime::Runtime { + let genesis_state = GenesisState::new(vec![], test_fee_params(), 1, 1, SecretKey::random()); + Store::bootstrap(genesis_state.clone(), data_directory).expect("store should bootstrap"); + + let dir = data_directory.to_path_buf(); let rpc_listener = TcpListener::bind("127.0.0.1:0").await.expect("store should bind the RPC port"); let ntx_builder_listener = TcpListener::bind("127.0.0.1:0") @@ -151,7 +139,8 @@ async fn restart_store( let block_producer_listener = TcpListener::bind(store_addr) .await .expect("store should bind the block-producer port"); - let dir = data_directory.to_path_buf(); + + // Use a separate runtime so we can kill all store tasks later let store_runtime = runtime::Builder::new_multi_thread().enable_time().enable_io().build().unwrap(); store_runtime.spawn(async move { @@ -159,8 +148,9 @@ async fn restart_store( rpc_listener, ntx_builder_listener, block_producer_listener, + block_prover_url: None, data_directory: dir, - grpc_timeout: std::time::Duration::from_secs(30), + grpc_timeout: Duration::from_secs(30), } .serve() .await @@ -169,32 +159,17 @@ async fn restart_store( store_runtime } -/// Creates a dummy transaction and submits it to the block producer. -async fn send_request( +/// Shuts down the store runtime properly to allow the database to flush before the temp directory +/// is deleted. +async fn shutdown_store(store_runtime: runtime::Runtime) { + task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) + .await + .expect("shutdown should complete"); +} + +/// Sends a status request to the block producer to verify connectivity. +async fn send_status_request( mut client: block_producer_client::ApiClient, - i: u8, -) -> Result, tonic::Status> -{ - let tx = ProvenTransactionBuilder::new( - AccountId::dummy( - [0; 15], - AccountIdVersion::Version0, - AccountType::RegularAccountImmutableCode, - AccountStorageMode::Private, - ), - Digest::default(), - [i; 32].try_into().unwrap(), - Digest::default(), - 0.into(), - Digest::default(), - u32::MAX.into(), - ExecutionProof::new(Proof::new_dummy(), HashFunction::default()), - ) - .build() - .unwrap(); - let request = proto::transaction::ProvenTransaction { - transaction: tx.to_bytes(), - transaction_replay: None, - }; - client.submit_proven_transaction(request).await +) -> Result, tonic::Status> { + client.status(()).await } diff --git a/crates/block-producer/src/store/mod.rs b/crates/block-producer/src/store/mod.rs index a82a60582..fb20bc160 100644 --- a/crates/block-producer/src/store/mod.rs +++ b/crates/block-producer/src/store/mod.rs @@ -10,7 +10,8 @@ use miden_node_proto::{AccountState, generated as proto}; use miden_node_utils::formatting::format_opt; use miden_protocol::Word; use miden_protocol::account::AccountId; -use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, ProvenBlock}; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, SignedBlock}; use miden_protocol::note::Nullifier; use miden_protocol::transaction::ProvenTransaction; use miden_protocol::utils::Serializable; @@ -238,8 +239,15 @@ impl StoreClient { } #[instrument(target = COMPONENT, name = "store.client.apply_block", skip_all, err)] - pub async fn apply_block(&self, block: &ProvenBlock) -> Result<(), StoreError> { - let request = tonic::Request::new(proto::blockchain::Block { block: block.to_bytes() }); + pub async fn apply_block( + &self, + ordered_batches: &OrderedBatches, + signed_block: &SignedBlock, + ) -> Result<(), StoreError> { + let request = tonic::Request::new(proto::store::ApplyBlockRequest { + ordered_batches: ordered_batches.to_bytes(), + block: Some(signed_block.into()), + }); self.client.clone().apply_block(request).await.map(|_| ()).map_err(Into::into) } diff --git a/crates/block-producer/src/test_utils/batch.rs b/crates/block-producer/src/test_utils/batch.rs index ecbd21586..ca705e241 100644 --- a/crates/block-producer/src/test_utils/batch.rs +++ b/crates/block-producer/src/test_utils/batch.rs @@ -66,7 +66,7 @@ impl TransactionBatchConstructor for ProvenBatch { account_updates, InputNotes::new_unchecked(input_notes), output_notes, - BlockNumber::from(u32::MAX), + BlockNumber::MAX, OrderedTransactionHeaders::new_unchecked( txs.into_iter().map(TransactionHeader::from).collect(), ), diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index 06ed8eb3b..1d34db128 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -15,8 +15,14 @@ workspace = true [dependencies] anyhow = { workspace = true } +deadpool = { features = ["managed", "rt_tokio_1"], workspace = true } +deadpool-diesel = { features = ["sqlite"], workspace = true } +deadpool-sync = { features = ["tracing"], workspace = true } +diesel = { features = ["numeric", "sqlite"], workspace = true } +diesel_migrations = { features = ["sqlite"], workspace = true } futures = { workspace = true } indexmap = { workspace = true } +libsqlite3-sys = { workspace = true } miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } miden-protocol = { default-features = true, workspace = true } @@ -25,7 +31,7 @@ miden-tx = { default-features = true, workspace = true } thiserror = { workspace = true } tokio = { features = ["rt-multi-thread"], workspace = true } tokio-stream = { workspace = true } -tokio-util = { version = "0.7" } +tokio-util = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } url = { workspace = true } @@ -36,3 +42,6 @@ miden-node-utils = { features = ["testing"], workspace = true } miden-protocol = { default-features = true, features = ["testing"], workspace = true } miden-standards = { workspace = true } rstest = { workspace = true } + +[package.metadata.cargo-machete] +ignored = ["libsqlite3-sys"] diff --git a/crates/ntx-builder/build.rs b/crates/ntx-builder/build.rs new file mode 100644 index 000000000..881be3168 --- /dev/null +++ b/crates/ntx-builder/build.rs @@ -0,0 +1,11 @@ +// This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in +// `src/db/migrations.rs` to include the latest version of the migrations into the binary, see +// . + +fn main() { + println!("cargo:rerun-if-changed=./src/db/migrations"); + // If we do one re-write, the default rules are disabled, + // hence we need to trigger explicitly on `Cargo.toml`. + // + println!("cargo:rerun-if-changed=Cargo.toml"); +} diff --git a/crates/ntx-builder/diesel.toml b/crates/ntx-builder/diesel.toml new file mode 100644 index 000000000..71215dbf7 --- /dev/null +++ b/crates/ntx-builder/diesel.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/db/schema.rs" diff --git a/crates/ntx-builder/src/actor/account_state.rs b/crates/ntx-builder/src/actor/account_state.rs index 25020c8b2..b58cfd692 100644 --- a/crates/ntx-builder/src/actor/account_state.rs +++ b/crates/ntx-builder/src/actor/account_state.rs @@ -1,5 +1,6 @@ use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::num::NonZeroUsize; +use std::sync::Arc; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; @@ -13,7 +14,7 @@ use miden_protocol::transaction::{PartialBlockchain, TransactionId}; use tracing::instrument; use super::ActorShutdownReason; -use super::note_state::{NetworkAccountEffect, NetworkAccountNoteState}; +use super::note_state::{AccountDeltaTracker, NetworkAccountEffect, NotePool}; use crate::COMPONENT; use crate::actor::inflight_note::InflightNetworkNote; use crate::builder::ChainState; @@ -40,7 +41,9 @@ pub struct TransactionCandidate { pub chain_tip_header: BlockHeader, /// The chain MMR, which lags behind the tip by one block. - pub chain_mmr: PartialBlockchain, + /// + /// Wrapped in `Arc` to avoid expensive clones when reading the chain state. + pub chain_mmr: Arc, } // NETWORK ACCOUNT STATE @@ -49,32 +52,30 @@ pub struct TransactionCandidate { /// The current state of a network account. #[derive(Clone)] pub struct NetworkAccountState { - /// The network account ID corresponding to the network account this state represents. + /// The network account ID this state represents. account_id: NetworkAccountId, - /// Component of this state which Contains the committed and inflight account updates as well - /// as available and nullified notes. - account: NetworkAccountNoteState, + /// Tracks committed and inflight account state updates. + account: AccountDeltaTracker, + + /// Manages available and nullified notes. + notes: NotePool, /// Uncommitted transactions which have some impact on the network state. /// - /// This is tracked so we can commit or revert such transaction effects. Transactions _without_ - /// an impact are ignored. + /// This is tracked so we can commit or revert transaction effects. Transactions _without_ an + /// impact are ignored. inflight_txs: BTreeMap, /// Nullifiers of all network notes targeted at this account. /// /// Used to filter mempool events: when a `TransactionAdded` event reports consumed nullifiers, - /// only those present in this set are processed (moved from `available_notes` to - /// `nullified_notes`). Nullifiers are added when notes are loaded or created, and removed - /// when the consuming transaction is committed. + /// only those present in this set are processed. Nullifiers are added when notes are loaded + /// or created, and removed when the consuming transaction is committed. known_nullifiers: HashSet, } impl NetworkAccountState { - /// Maximum number of attempts to execute a network note. - const MAX_NOTE_ATTEMPTS: usize = 30; - /// Load's all available network notes from the store, along with the required account states. #[instrument(target = COMPONENT, name = "ntx.state.load", skip_all)] pub async fn load( @@ -95,10 +96,15 @@ impl NetworkAccountState { let known_nullifiers: HashSet = notes.iter().map(SingleTargetNetworkNote::nullifier).collect(); - let account = NetworkAccountNoteState::new(account, notes); + let account_tracker = AccountDeltaTracker::new(account); + let mut note_pool = NotePool::default(); + for note in notes { + note_pool.add_note(note); + } let state = Self { - account, + account: account_tracker, + notes: note_pool, account_id, inflight_txs: BTreeMap::default(), known_nullifiers, @@ -110,24 +116,31 @@ impl NetworkAccountState { } /// Selects the next candidate network transaction. + /// + /// # Parameters + /// + /// - `limit`: Maximum number of notes to include in the transaction. + /// - `max_note_attempts`: Maximum number of execution attempts before a note is dropped. + /// - `chain_state`: Current chain state for the transaction. #[instrument(target = COMPONENT, name = "ntx.state.select_candidate", skip_all)] pub fn select_candidate( &mut self, limit: NonZeroUsize, + max_note_attempts: usize, chain_state: ChainState, ) -> Option { // Remove notes that have failed too many times. - self.account.drop_failing_notes(Self::MAX_NOTE_ATTEMPTS); + self.notes.drop_failing_notes(max_note_attempts); // Skip empty accounts, and prune them. // This is how we keep the number of accounts bounded. - if self.account.is_empty() { + if self.is_empty() { return None; } // Select notes from the account that can be consumed or are ready for a retry. let notes = self - .account + .notes .available_notes(&chain_state.chain_tip_header.block_num()) .take(limit.get()) .cloned() @@ -154,7 +167,7 @@ impl NetworkAccountState { #[instrument(target = COMPONENT, name = "ntx.state.notes_failed", skip_all)] pub fn notes_failed(&mut self, notes: &[Note], block_num: BlockNumber) { let nullifiers = notes.iter().map(Note::nullifier).collect::>(); - self.account.fail_notes(nullifiers.as_slice(), block_num); + self.notes.fail_notes(nullifiers.as_slice(), block_num); } /// Updates state with the mempool event. @@ -197,6 +210,11 @@ impl NetworkAccountState { None } + /// Returns `true` if there is no inflight state being tracked. + fn is_empty(&self) -> bool { + self.account.has_no_inflight() && self.notes.is_empty() + } + /// Handles a [`MempoolEvent::TransactionAdded`] event. fn add_transaction( &mut self, @@ -234,7 +252,7 @@ impl NetworkAccountState { ); tx_impact.notes.insert(note.nullifier()); self.known_nullifiers.insert(note.nullifier()); - self.account.add_note(note.clone()); + self.notes.add_note(note.clone()); } for nullifier in nullifiers { // Ignore nullifiers that aren't network note nullifiers. @@ -242,8 +260,7 @@ impl NetworkAccountState { continue; } tx_impact.nullifiers.insert(*nullifier); - // We don't use the entry wrapper here because the account must already exist. - let _ = self.account.add_nullifier(*nullifier); + let _ = self.notes.nullify(*nullifier); } if !tx_impact.is_empty() { @@ -268,7 +285,7 @@ impl NetworkAccountState { if self.known_nullifiers.remove(&nullifier) { // Its possible for the account to no longer exist if the transaction creating it // was reverted. - self.account.commit_nullifier(nullifier); + self.notes.commit_nullifier(nullifier); } } } @@ -292,7 +309,7 @@ impl NetworkAccountState { // Revert notes. for note_nullifier in impact.notes { if self.known_nullifiers.contains(¬e_nullifier) { - self.account.revert_note(note_nullifier); + self.notes.remove_note(note_nullifier); self.known_nullifiers.remove(¬e_nullifier); } } @@ -300,7 +317,7 @@ impl NetworkAccountState { // Revert nullifiers. for nullifier in impact.nullifiers { if self.known_nullifiers.contains(&nullifier) { - self.account.revert_nullifier(nullifier); + self.notes.revert_nullifier(nullifier); self.known_nullifiers.remove(&nullifier); } } @@ -471,10 +488,15 @@ mod tests { let known_nullifiers: HashSet = notes.iter().map(SingleTargetNetworkNote::nullifier).collect(); - let account = NetworkAccountNoteState::new(account, notes); + let account_tracker = AccountDeltaTracker::new(account); + let mut note_pool = NotePool::default(); + for note in notes { + note_pool.add_note(note); + } Self { - account, + account: account_tracker, + notes: note_pool, account_id, inflight_txs: BTreeMap::default(), known_nullifiers, @@ -534,7 +556,7 @@ mod tests { let mut state = NetworkAccountState::new_for_testing(account, network_account_id, vec![note1, note2]); - let available_count = state.account.available_notes(&BlockNumber::from(0)).count(); + let available_count = state.notes.available_notes(&BlockNumber::from(0)).count(); assert_eq!(available_count, 2, "both notes should be available initially"); let tx_id = mock_tx_id(1); @@ -549,7 +571,7 @@ mod tests { assert!(shutdown.is_none(), "mempool_update should not trigger shutdown"); let available_nullifiers: Vec<_> = state - .account + .notes .available_notes(&BlockNumber::from(0)) .map(|n| n.to_inner().nullifier()) .collect(); @@ -630,7 +652,7 @@ mod tests { state.mempool_update(&event); // Verify note is not available - let available_count = state.account.available_notes(&BlockNumber::from(0)).count(); + let available_count = state.notes.available_notes(&BlockNumber::from(0)).count(); assert_eq!(available_count, 0, "note should not be available after being consumed"); // Revert the transaction @@ -640,7 +662,7 @@ mod tests { // Verify note is available again let available_nullifiers: Vec<_> = state - .account + .notes .available_notes(&BlockNumber::from(0)) .map(|n| n.to_inner().nullifier()) .collect(); @@ -683,7 +705,7 @@ mod tests { // Verify the note is available let available_nullifiers: Vec<_> = state - .account + .notes .available_notes(&BlockNumber::from(0)) .map(|n| n.to_inner().nullifier()) .collect(); diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index edcf58c07..59e9cdb4f 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -31,7 +31,7 @@ use miden_protocol::transaction::{ TransactionInputs, }; use miden_protocol::vm::FutureMaybeSend; -use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; +use miden_remote_prover_client::RemoteTransactionProver; use miden_tx::auth::UnreachableAuth; use miden_tx::utils::Serializable; use miden_tx::{ @@ -83,7 +83,7 @@ type NtxResult = Result; /// Provides the context for execution [network transaction candidates](TransactionCandidate). #[derive(Clone)] pub struct NtxContext { - /// TODO(sergerad): Remove block producer client when block proving moved to store. + /// Client for submitting proven transactions to the Block Producer. block_producer: BlockProducerClient, /// Client for validating transactions via the Validator. @@ -327,7 +327,8 @@ impl NtxContext { struct NtxDataStore { account: Account, reference_block: BlockHeader, - chain_mmr: PartialBlockchain, + /// The chain MMR, wrapped in `Arc` to avoid expensive clones when reading the chain state. + chain_mmr: Arc, mast_store: TransactionMastStore, /// Store client for retrieving note scripts. store: StoreClient, @@ -362,7 +363,7 @@ impl NtxDataStore { fn new( account: Account, reference_block: BlockHeader, - chain_mmr: PartialBlockchain, + chain_mmr: Arc, store: StoreClient, script_cache: LruCache, ) -> Self { @@ -421,7 +422,7 @@ impl DataStore for NtxDataStore { .await; let partial_account = PartialAccount::from(&self.account); - Ok((partial_account, self.reference_block.clone(), self.chain_mmr.clone())) + Ok((partial_account, self.reference_block.clone(), (*self.chain_mmr).clone())) } } diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index ae8f63629..c5ecc2ccd 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -3,6 +3,7 @@ mod execute; mod inflight_note; mod note_state; +use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; @@ -18,7 +19,7 @@ use miden_protocol::account::{Account, AccountDelta}; use miden_protocol::block::BlockNumber; use miden_protocol::note::NoteScript; use miden_protocol::transaction::TransactionId; -use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; +use miden_remote_prover_client::RemoteTransactionProver; use tokio::sync::{AcquireError, RwLock, Semaphore, mpsc}; use tokio_util::sync::CancellationToken; use url::Url; @@ -66,6 +67,10 @@ pub struct AccountActorContext { /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. /// This cache is shared across all account actors to maximize cache efficiency. pub script_cache: LruCache, + /// Maximum number of notes per transaction. + pub max_notes_per_tx: NonZeroUsize, + /// Maximum number of note execution attempts before dropping a note. + pub max_note_attempts: usize, } // ACCOUNT ORIGIN @@ -156,12 +161,15 @@ pub struct AccountActor { mode: ActorMode, event_rx: mpsc::Receiver>, cancel_token: CancellationToken, - // TODO(sergerad): Remove block producer when block proving moved to store. block_producer: BlockProducerClient, validator: ValidatorClient, prover: Option, chain_state: Arc>, script_cache: LruCache, + /// Maximum number of notes per transaction. + max_notes_per_tx: NonZeroUsize, + /// Maximum number of note execution attempts before dropping a note. + max_note_attempts: usize, } impl AccountActor { @@ -193,6 +201,8 @@ impl AccountActor { prover, chain_state: actor_context.chain_state.clone(), script_cache: actor_context.script_cache.clone(), + max_notes_per_tx: actor_context.max_notes_per_tx, + max_note_attempts: actor_context.max_note_attempts, } } @@ -259,7 +269,11 @@ impl AccountActor { // Read the chain state. let chain_state = self.chain_state.read().await.clone(); // Find a candidate transaction and execute it. - if let Some(tx_candidate) = state.select_candidate(crate::MAX_NOTES_PER_TX, chain_state) { + if let Some(tx_candidate) = state.select_candidate( + self.max_notes_per_tx, + self.max_note_attempts, + chain_state, + ) { self.execute_transactions(&mut state, tx_candidate).await; } else { // No transactions to execute, wait for events. @@ -334,7 +348,7 @@ impl AccountActor { /// - After 10 attempts, the backoff period is 12 blocks. /// - After 20 attempts, the backoff period is 148 blocks. /// - etc... -#[allow(clippy::cast_precision_loss, clippy::cast_sign_loss)] +#[expect(clippy::cast_precision_loss, clippy::cast_sign_loss)] fn has_backoff_passed( chain_tip: BlockNumber, last_attempt: Option, diff --git a/crates/ntx-builder/src/actor/note_state.rs b/crates/ntx-builder/src/actor/note_state.rs index 87b91fc21..610334c67 100644 --- a/crates/ntx-builder/src/actor/note_state.rs +++ b/crates/ntx-builder/src/actor/note_state.rs @@ -9,59 +9,28 @@ use miden_protocol::note::Nullifier; use crate::actor::inflight_note::InflightNetworkNote; -// ACCOUNT STATE +// ACCOUNT DELTA TRACKER // ================================================================================================ -/// Tracks the state of a network account and its notes. +/// Tracks committed and inflight account state updates. #[derive(Clone)] -pub struct NetworkAccountNoteState { +pub struct AccountDeltaTracker { /// The committed account state, if any. /// - /// Its possible this is `None` if the account creation transaction is still inflight. + /// This may be `None` if the account creation transaction is still inflight. committed: Option, /// Inflight account updates in chronological order. inflight: VecDeque, - - /// Unconsumed notes of this account. - available_notes: HashMap, - - /// Notes which have been consumed by transactions that are still inflight. - nullified_notes: HashMap, } -impl NetworkAccountNoteState { - /// Creates a new account state from the supplied account and notes. - pub fn new(account: Account, notes: Vec) -> Self { - let account_id = NetworkAccountId::try_from(account.id()) - .expect("only network accounts are used for account state"); - - let mut state = Self { +impl AccountDeltaTracker { + /// Creates a new tracker with the given committed account state. + pub fn new(account: Account) -> Self { + Self { committed: Some(account), inflight: VecDeque::default(), - available_notes: HashMap::default(), - nullified_notes: HashMap::default(), - }; - - for note in notes { - // Currently only support single target network notes in NTB. - assert!( - note.account_id() == account_id, - "Notes supplied into account state must match expected account ID" - ); - state.add_note(note); } - - state - } - - /// Returns an iterator over inflight notes that are not currently within their respective - /// backoff periods based on block number. - pub fn available_notes( - &self, - block_num: &BlockNumber, - ) -> impl Iterator { - self.available_notes.values().filter(|¬e| note.is_available(*block_num)) } /// Appends a delta to the set of inflight account updates. @@ -85,43 +54,72 @@ impl NetworkAccountNoteState { /// Reverts the newest account state delta. /// - /// # Returns - /// - /// Returns `true` if this reverted the account creation delta. The caller _must_ remove this - /// account and associated notes as calls to `account` will panic. + /// Returns `true` if this reverted the account creation delta. The caller _must_ handle + /// cleanup as calls to `latest_account` will panic afterwards. /// /// # Panics /// /// Panics if there are no deltas to revert. - #[must_use = "must remove this account and its notes"] + #[must_use = "must handle account removal if this returns true"] pub fn revert_delta(&mut self) -> bool { self.inflight.pop_back().expect("must have a delta to revert"); self.committed.is_none() && self.inflight.is_empty() } + /// Returns the latest inflight account state. + pub fn latest_account(&self) -> Account { + self.inflight + .back() + .or(self.committed.as_ref()) + .expect("account must have either a committed or inflight state") + .clone() + } + + /// Returns `true` if there are no inflight deltas. + pub fn has_no_inflight(&self) -> bool { + self.inflight.is_empty() + } +} + +// NOTE POOL +// ================================================================================================ + +/// Manages available and nullified notes for a network account. +#[derive(Clone, Default)] +pub struct NotePool { + /// Unconsumed notes available for consumption. + available: HashMap, + + /// Notes consumed by inflight transactions (not yet committed). + nullified: HashMap, +} + +impl NotePool { + /// Returns an iterator over notes that are available and not in backoff. + pub fn available_notes( + &self, + block_num: &BlockNumber, + ) -> impl Iterator { + self.available.values().filter(|¬e| note.is_available(*block_num)) + } + /// Adds a new network note making it available for consumption. pub fn add_note(&mut self, note: SingleTargetNetworkNote) { - self.available_notes.insert(note.nullifier(), InflightNetworkNote::new(note)); + self.available.insert(note.nullifier(), InflightNetworkNote::new(note)); } - /// Removes the note completely. - pub fn revert_note(&mut self, note: Nullifier) { - // Transactions can be reverted out of order. - // - // This means the tx which nullified the note might not have been reverted yet, and the note - // might still be in the nullified - self.available_notes.remove(¬e); - self.nullified_notes.remove(¬e); + /// Removes the note completely (used when reverting note creation). + pub fn remove_note(&mut self, nullifier: Nullifier) { + self.available.remove(&nullifier); + self.nullified.remove(&nullifier); } - /// Marks a note as being consumed. - /// - /// The note data is retained until the nullifier is committed. + /// Marks a note as being consumed by moving it to the nullified set. /// /// Returns `Err(())` if the note does not exist or was already nullified. - pub fn add_nullifier(&mut self, nullifier: Nullifier) -> Result<(), ()> { - if let Some(note) = self.available_notes.remove(&nullifier) { - self.nullified_notes.insert(nullifier, note); + pub fn nullify(&mut self, nullifier: Nullifier) -> Result<(), ()> { + if let Some(note) = self.available.remove(&nullifier) { + self.nullified.insert(nullifier, note); Ok(()) } else { tracing::warn!(%nullifier, "note must be available to nullify"); @@ -129,68 +127,47 @@ impl NetworkAccountNoteState { } } - /// Marks a nullifier as being committed, removing the associated note data entirely. + /// Commits a nullifier, removing the associated note entirely. /// - /// Silently ignores the request if the nullifier is not present, which can happen - /// if the note's transaction wasn't available when the nullifier was added. + /// Silently ignores if the nullifier is not present. pub fn commit_nullifier(&mut self, nullifier: Nullifier) { - // we might not have this if we didn't add it with `add_nullifier` - // in case it's transaction wasn't available in the first place. - // It shouldn't happen practically, since we skip them if the - // relevant account cannot be retrieved via `fetch`. - - let _ = self.nullified_notes.remove(&nullifier); + let _ = self.nullified.remove(&nullifier); } - /// Reverts a nullifier, marking the associated note as available again. + /// Reverts a nullifier, making the note available again. pub fn revert_nullifier(&mut self, nullifier: Nullifier) { // Transactions can be reverted out of order. - // - // The note may already have been fully removed by `revert_note` if the transaction creating - // the note was reverted before the transaction that consumed it. - if let Some(note) = self.nullified_notes.remove(&nullifier) { - self.available_notes.insert(nullifier, note); + if let Some(note) = self.nullified.remove(&nullifier) { + self.available.insert(nullifier, note); } } - /// Drops all notes that have failed to be consumed after a certain number of attempts. + /// Drops all notes that have exceeded the maximum attempt count. pub fn drop_failing_notes(&mut self, max_attempts: usize) { - self.available_notes.retain(|_, note| note.attempt_count() < max_attempts); - } - - /// Returns the latest inflight account state. - pub fn latest_account(&self) -> Account { - self.inflight - .back() - .or(self.committed.as_ref()) - .expect("account must have either a committed or inflight state") - .clone() - } - - /// Returns `true` if there is no inflight state being tracked. - /// - /// This implies this state is safe to remove without losing uncommitted data. - pub fn is_empty(&self) -> bool { - self.inflight.is_empty() - && self.available_notes.is_empty() - && self.nullified_notes.is_empty() + self.available.retain(|_, note| note.attempt_count() < max_attempts); } /// Marks the specified notes as failed. pub fn fail_notes(&mut self, nullifiers: &[Nullifier], block_num: BlockNumber) { for nullifier in nullifiers { - if let Some(note) = self.available_notes.get_mut(nullifier) { + if let Some(note) = self.available.get_mut(nullifier) { note.fail(block_num); } else { tracing::warn!(%nullifier, "failed note is not in account's state"); } } } + + /// Returns `true` if there are no notes being tracked. + pub fn is_empty(&self) -> bool { + self.available.is_empty() && self.nullified.is_empty() + } } -// NETWORK ACCOUNT UPDATE +// NETWORK ACCOUNT EFFECT // ================================================================================================ +/// Represents the effect of a transaction on a network account. #[derive(Clone)] pub enum NetworkAccountEffect { Created(Account), @@ -232,16 +209,16 @@ mod tests { #[rstest::rstest] #[test] - #[case::all_zero(Some(BlockNumber::from(0)), BlockNumber::from(0), 0, true)] - #[case::no_attempts(None, BlockNumber::from(0), 0, true)] - #[case::one_attempt(Some(BlockNumber::from(0)), BlockNumber::from(2), 1, true)] - #[case::three_attempts(Some(BlockNumber::from(0)), BlockNumber::from(3), 3, true)] - #[case::ten_attempts(Some(BlockNumber::from(0)), BlockNumber::from(13), 10, true)] - #[case::twenty_attempts(Some(BlockNumber::from(0)), BlockNumber::from(149), 20, true)] - #[case::one_attempt_false(Some(BlockNumber::from(0)), BlockNumber::from(1), 1, false)] - #[case::three_attempts_false(Some(BlockNumber::from(0)), BlockNumber::from(2), 3, false)] - #[case::ten_attempts_false(Some(BlockNumber::from(0)), BlockNumber::from(12), 10, false)] - #[case::twenty_attempts_false(Some(BlockNumber::from(0)), BlockNumber::from(148), 20, false)] + #[case::all_zero(Some(BlockNumber::GENESIS), BlockNumber::GENESIS, 0, true)] + #[case::no_attempts(None, BlockNumber::GENESIS, 0, true)] + #[case::one_attempt(Some(BlockNumber::GENESIS), BlockNumber::from(2), 1, true)] + #[case::three_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(3), 3, true)] + #[case::ten_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(13), 10, true)] + #[case::twenty_attempts(Some(BlockNumber::GENESIS), BlockNumber::from(149), 20, true)] + #[case::one_attempt_false(Some(BlockNumber::GENESIS), BlockNumber::from(1), 1, false)] + #[case::three_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(2), 3, false)] + #[case::ten_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(12), 10, false)] + #[case::twenty_attempts_false(Some(BlockNumber::GENESIS), BlockNumber::from(148), 20, false)] fn backoff_has_passed( #[case] last_attempt_block_num: Option, #[case] current_block_num: BlockNumber, diff --git a/crates/ntx-builder/src/block_producer.rs b/crates/ntx-builder/src/block_producer.rs index ce4d7b9c6..53925bdcf 100644 --- a/crates/ntx-builder/src/block_producer.rs +++ b/crates/ntx-builder/src/block_producer.rs @@ -62,7 +62,7 @@ impl BlockProducerClient { pub async fn subscribe_to_mempool_with_retry( &self, chain_tip: BlockNumber, - ) -> Result, Status> { + ) -> Result + Send + 'static, Status> { let mut retry_counter = 0; loop { match self.subscribe_to_mempool(chain_tip).await { @@ -90,7 +90,7 @@ impl BlockProducerClient { async fn subscribe_to_mempool( &self, chain_tip: BlockNumber, - ) -> Result, Status> { + ) -> Result + Send + 'static, Status> { let request = proto::block_producer::MempoolSubscriptionRequest { chain_tip: chain_tip.as_u32() }; let stream = self.client.clone().mempool_subscription(request).await?; diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index 8b789779f..14be4ef31 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -1,56 +1,63 @@ -use std::num::NonZeroUsize; +use std::pin::Pin; use std::sync::Arc; use anyhow::Context; -use futures::TryStreamExt; +use futures::Stream; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_utils::lru_cache::LruCache; -use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::block::BlockHeader; use miden_protocol::crypto::merkle::mmr::PartialMmr; -use miden_protocol::note::NoteScript; use miden_protocol::transaction::PartialBlockchain; use tokio::sync::{RwLock, mpsc}; -use url::Url; +use tokio_stream::StreamExt; +use tonic::Status; -use crate::MAX_IN_PROGRESS_TXS; +use crate::NtxBuilderConfig; use crate::actor::{AccountActorContext, AccountOrigin}; -use crate::block_producer::BlockProducerClient; use crate::coordinator::Coordinator; use crate::store::StoreClient; -// CONSTANTS -// ================================================================================================= - -/// The maximum number of blocks to keep in memory while tracking the chain tip. -const MAX_BLOCK_COUNT: usize = 4; - // CHAIN STATE // ================================================================================================ /// Contains information about the chain that is relevant to the [`NetworkTransactionBuilder`] and -/// all account actors managed by the [`Coordinator`] +/// all account actors managed by the [`Coordinator`]. +/// +/// The chain MMR stored here contains: +/// - The MMR peaks. +/// - Block headers and authentication paths for the last [`NtxBuilderConfig::max_block_count`] +/// blocks. +/// +/// Authentication paths for older blocks are pruned because the NTX builder executes all notes as +/// "unauthenticated" (see [`InputNotes::from_unauthenticated_notes`]) and therefore does not need +/// to prove that input notes were created in specific past blocks. #[derive(Debug, Clone)] pub struct ChainState { /// The current tip of the chain. pub chain_tip_header: BlockHeader, - /// A partial representation of the latest state of the chain. - pub chain_mmr: PartialBlockchain, + /// A partial representation of the chain MMR. + /// + /// Contains block headers and authentication paths for the last + /// [`NtxBuilderConfig::max_block_count`] blocks only, since all notes are executed as + /// unauthenticated. + pub chain_mmr: Arc, } impl ChainState { /// Constructs a new instance of [`ChainState`]. - fn new(chain_tip_header: BlockHeader, chain_mmr: PartialMmr) -> Self { + pub(crate) fn new(chain_tip_header: BlockHeader, chain_mmr: PartialMmr) -> Self { let chain_mmr = PartialBlockchain::new(chain_mmr, []) .expect("partial blockchain should build from partial mmr"); - Self { chain_tip_header, chain_mmr } + Self { + chain_tip_header, + chain_mmr: Arc::new(chain_mmr), + } } /// Consumes the chain state and returns the chain tip header and the partial blockchain as a /// tuple. - pub fn into_parts(self) -> (BlockHeader, PartialBlockchain) { + pub fn into_parts(self) -> (BlockHeader, Arc) { (self.chain_tip_header, self.chain_mmr) } } @@ -58,103 +65,75 @@ impl ChainState { // NETWORK TRANSACTION BUILDER // ================================================================================================ +/// A boxed, pinned stream of mempool events with a `'static` lifetime. +/// +/// Boxing gives the stream a `'static` lifetime by ensuring it owns all its data, avoiding +/// complex lifetime annotations that would otherwise be required when storing `impl TryStream`. +pub(crate) type MempoolEventStream = + Pin> + Send>>; + /// Network transaction builder component. /// -/// The network transaction builder is in in charge of building transactions that consume notes +/// The network transaction builder is in charge of building transactions that consume notes /// against network accounts. These notes are identified and communicated by the block producer. /// The service maintains a list of unconsumed notes and periodically executes and proves /// transactions that consume them (reaching out to the store to retrieve state as necessary). /// /// The builder manages the tasks for every network account on the chain through the coordinator. +/// +/// Create an instance using [`NtxBuilderConfig::build()`]. pub struct NetworkTransactionBuilder { - /// Address of the store gRPC server. - store_url: Url, - /// Address of the block producer gRPC server. - block_producer_url: Url, - /// Address of the Validator server. - validator_url: Url, - /// Address of the remote prover. If `None`, transactions will be proven locally, which is - /// undesirable due to the performance impact. - tx_prover_url: Option, - /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. - /// This cache is shared across all account actors. - script_cache: LruCache, + /// Configuration for the builder. + config: NtxBuilderConfig, /// Coordinator for managing actor tasks. coordinator: Coordinator, + /// Client for the store gRPC API. + store: StoreClient, + /// Shared chain state updated by the event loop and read by actors. + chain_state: Arc>, + /// Context shared with all account actors. + actor_context: AccountActorContext, + /// Stream of mempool events from the block producer. + mempool_events: MempoolEventStream, } impl NetworkTransactionBuilder { - /// Channel capacity for account loading. - const ACCOUNT_CHANNEL_CAPACITY: usize = 1_000; - - /// Creates a new instance of the network transaction builder. - pub fn new( - store_url: Url, - block_producer_url: Url, - validator_url: Url, - tx_prover_url: Option, - script_cache_size: NonZeroUsize, + pub(crate) fn new( + config: NtxBuilderConfig, + coordinator: Coordinator, + store: StoreClient, + chain_state: Arc>, + actor_context: AccountActorContext, + mempool_events: MempoolEventStream, ) -> Self { - let script_cache = LruCache::new(script_cache_size); - let coordinator = Coordinator::new(MAX_IN_PROGRESS_TXS); Self { - store_url, - block_producer_url, - validator_url, - tx_prover_url, - script_cache, + config, coordinator, + store, + chain_state, + actor_context, + mempool_events, } } - /// Runs the network transaction builder until a fatal error occurs. + /// Runs the network transaction builder event loop until a fatal error occurs. + /// + /// This method: + /// 1. Spawns a background task to load existing network accounts from the store + /// 2. Runs the main event loop, processing mempool events and managing actors + /// + /// # Errors + /// + /// Returns an error if: + /// - The mempool event stream ends unexpectedly + /// - An actor encounters a fatal error + /// - The account loader task fails pub async fn run(mut self) -> anyhow::Result<()> { - let store = StoreClient::new(self.store_url.clone()); - let block_producer = BlockProducerClient::new(self.block_producer_url.clone()); - - // Loop until we successfully subscribe. - // - // The mempool rejects our subscription if we don't have the same view of the chain aka - // if our chain tip does not match the mempools. This can occur if a new block is committed - // _after_ we fetch the chain tip from the store but _before_ our subscription request is - // handled. - // - // This is a hack-around for https://github.com/0xMiden/miden-node/issues/1566. - let (chain_tip_header, chain_mmr, mut mempool_events) = loop { - let (chain_tip_header, chain_mmr) = store - .get_latest_blockchain_data_with_retry() - .await? - .expect("store should contain a latest block"); - - match block_producer - .subscribe_to_mempool_with_retry(chain_tip_header.block_num()) - .await - { - Ok(subscription) => break (chain_tip_header, chain_mmr, subscription), - Err(status) if status.code() == tonic::Code::InvalidArgument => { - tracing::error!(err=%status, "mempool subscription failed due to desync, trying again"); - }, - Err(err) => return Err(err).context("failed to subscribe to mempool events"), - } - }; - - // Create chain state that will be updated by the coordinator and read by actors. - let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); - - let actor_context = AccountActorContext { - block_producer_url: self.block_producer_url.clone(), - validator_url: self.validator_url.clone(), - tx_prover_url: self.tx_prover_url.clone(), - chain_state: chain_state.clone(), - store: store.clone(), - script_cache: self.script_cache.clone(), - }; - // Spawn a background task to load network accounts from the store. - // Accounts are sent through a channel in batches and processed in the main event loop. + // Accounts are sent through a channel and processed in the main event loop. let (account_tx, mut account_rx) = - mpsc::channel::(Self::ACCOUNT_CHANNEL_CAPACITY); - let account_loader_store = store.clone(); + mpsc::channel::(self.config.account_channel_capacity); + let account_loader_store = self.store.clone(); let mut account_loader_handle = tokio::spawn(async move { account_loader_store .stream_network_account_ids(account_tx) @@ -162,7 +141,7 @@ impl NetworkTransactionBuilder { .context("failed to load network accounts from store") }); - // Main loop which manages actors and passes mempool events to them. + // Main event loop. loop { tokio::select! { // Handle actor result. @@ -170,22 +149,18 @@ impl NetworkTransactionBuilder { result?; }, // Handle mempool events. - event = mempool_events.try_next() => { + event = self.mempool_events.next() => { let event = event .context("mempool event stream ended")? .context("mempool event stream failed")?; - self.handle_mempool_event( - event.into(), - &actor_context, - chain_state.clone(), - ).await?; + self.handle_mempool_event(event.into()).await?; }, // Handle account batches loaded from the store. // Once all accounts are loaded, the channel closes and this branch // becomes inactive (recv returns None and we stop matching). Some(account_id) = account_rx.recv() => { - self.handle_loaded_account(account_id, &actor_context).await?; + self.handle_loaded_account(account_id).await?; }, // Handle account loader task completion/failure. // If the task fails, we abort since the builder would be in a degraded state @@ -202,33 +177,23 @@ impl NetworkTransactionBuilder { } } - /// Handles a batch of account IDs loaded from the store by spawning actors for them. - #[tracing::instrument( - name = "ntx.builder.handle_loaded_accounts", - skip(self, account_id, actor_context) - )] + /// Handles account IDs loaded from the store by spawning actors for them. + #[tracing::instrument(name = "ntx.builder.handle_loaded_account", skip(self, account_id))] async fn handle_loaded_account( &mut self, account_id: NetworkAccountId, - actor_context: &AccountActorContext, ) -> Result<(), anyhow::Error> { self.coordinator - .spawn_actor(AccountOrigin::store(account_id), actor_context) + .spawn_actor(AccountOrigin::store(account_id), &self.actor_context) .await?; Ok(()) } - /// Handles mempool events by sending them to actors via the coordinator and/or spawning new - /// actors as required. - #[tracing::instrument( - name = "ntx.builder.handle_mempool_event", - skip(self, event, actor_context, chain_state) - )] + /// Handles mempool events by routing them to actors and spawning new actors as needed. + #[tracing::instrument(name = "ntx.builder.handle_mempool_event", skip(self, event))] async fn handle_mempool_event( &mut self, event: Arc, - actor_context: &AccountActorContext, - chain_state: Arc>, ) -> Result<(), anyhow::Error> { match event.as_ref() { MempoolEvent::TransactionAdded { account_delta, .. } => { @@ -236,10 +201,12 @@ impl NetworkTransactionBuilder { if let Some(AccountUpdateDetails::Delta(delta)) = account_delta { // Handle account deltas for network accounts only. if let Some(network_account) = AccountOrigin::transaction(delta) { - // Spawn new actors if a transaction creates a new network account + // Spawn new actors if a transaction creates a new network account. let is_creating_account = delta.is_full_state(); if is_creating_account { - self.coordinator.spawn_actor(network_account, actor_context).await?; + self.coordinator + .spawn_actor(network_account, &self.actor_context) + .await?; } } } @@ -248,11 +215,11 @@ impl NetworkTransactionBuilder { }, // Update chain state and broadcast. MempoolEvent::BlockCommitted { header, txs } => { - self.update_chain_tip(header.as_ref().clone(), chain_state).await; + self.update_chain_tip(header.as_ref().clone()).await; self.coordinator.broadcast(event.clone()).await; - // All transactions pertaining to predating events should now be available through - // the store. So we can now drain them. + // All transactions pertaining to predating events should now be available + // through the store. So we can now drain them. for tx_id in txs { self.coordinator.drain_predating_events(tx_id); } @@ -271,24 +238,23 @@ impl NetworkTransactionBuilder { } } - /// Updates the chain tip and MMR block count. - /// - /// Blocks in the MMR are pruned if the block count exceeds the maximum. - async fn update_chain_tip(&mut self, tip: BlockHeader, chain_state: Arc>) { - // Lock the chain state. - let mut chain_state = chain_state.write().await; + /// Updates the chain tip and prunes old blocks from the MMR. + async fn update_chain_tip(&mut self, tip: BlockHeader) { + let mut chain_state = self.chain_state.write().await; // Update MMR which lags by one block. let mmr_tip = chain_state.chain_tip_header.clone(); - chain_state.chain_mmr.add_block(&mmr_tip, true); + Arc::make_mut(&mut chain_state.chain_mmr).add_block(&mmr_tip, true); // Set the new tip. chain_state.chain_tip_header = tip; // Keep MMR pruned. - let pruned_block_height = - (chain_state.chain_mmr.chain_length().as_usize().saturating_sub(MAX_BLOCK_COUNT)) - as u32; - chain_state.chain_mmr.prune_to(..pruned_block_height.into()); + let pruned_block_height = (chain_state + .chain_mmr + .chain_length() + .as_usize() + .saturating_sub(self.config.max_block_count)) as u32; + Arc::make_mut(&mut chain_state.chain_mmr).prune_to(..pruned_block_height.into()); } } diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs index 285cee47a..673c40106 100644 --- a/crates/ntx-builder/src/coordinator.rs +++ b/crates/ntx-builder/src/coordinator.rs @@ -90,20 +90,21 @@ pub struct Coordinator { /// Cache of events received from the mempool that predate corresponding network accounts. /// Grouped by network account ID to allow targeted event delivery to actors upon creation. predating_events: HashMap>>, + + /// Channel size for each actor's event channel. + actor_channel_size: usize, } impl Coordinator { - /// Maximum number of messages of the message channel for each actor. - const ACTOR_CHANNEL_SIZE: usize = 100; - /// Creates a new coordinator with the specified maximum number of inflight transactions - /// and shared script cache. - pub fn new(max_inflight_transactions: usize) -> Self { + /// and actor channel size. + pub fn new(max_inflight_transactions: usize, actor_channel_size: usize) -> Self { Self { actor_registry: HashMap::new(), actor_join_set: JoinSet::new(), semaphore: Arc::new(Semaphore::new(max_inflight_transactions)), predating_events: HashMap::new(), + actor_channel_size, } } @@ -122,11 +123,14 @@ impl Coordinator { // If an actor already exists for this account ID, something has gone wrong. if let Some(handle) = self.actor_registry.remove(&account_id) { - tracing::error!("account actor already exists for account: {}", account_id); + tracing::error!( + account_id = %account_id, + "Account actor already exists" + ); handle.cancel_token.cancel(); } - let (event_tx, event_rx) = mpsc::channel(Self::ACTOR_CHANNEL_SIZE); + let (event_tx, event_rx) = mpsc::channel(self.actor_channel_size); let cancel_token = tokio_util::sync::CancellationToken::new(); let actor = AccountActor::new(origin, actor_context, event_rx, cancel_token.clone()); let handle = ActorHandle::new(event_tx, cancel_token); @@ -143,7 +147,7 @@ impl Coordinator { } self.actor_registry.insert(account_id, handle); - tracing::info!("created actor for account: {}", account_id); + tracing::info!(account_id = %account_id, "Created actor for account prefix"); Ok(()) } @@ -154,18 +158,21 @@ impl Coordinator { /// message channel and can process it accordingly. /// /// If an actor fails to receive the event, it will be canceled. + #[tracing::instrument(name = "ntx.coordinator.broadcast", skip_all, fields( + actor.count = self.actor_registry.len(), + event.kind = %event.kind() + ))] pub async fn broadcast(&mut self, event: Arc) { - tracing::debug!( - actor_count = self.actor_registry.len(), - "broadcasting event to all actors" - ); - let mut failed_actors = Vec::new(); // Send event to all actors. for (account_id, handle) in &self.actor_registry { if let Err(err) = Self::send(handle, event.clone()).await { - tracing::error!("failed to send event to actor {}: {}", account_id, err); + tracing::error!( + account_id = %account_id, + error = %err, + "Failed to send event to actor" + ); failed_actors.push(*account_id); } } @@ -192,11 +199,11 @@ impl Coordinator { ActorShutdownReason::Cancelled(account_id) => { // Do not remove the actor from the registry, as it may be re-spawned. // The coordinator should always remove actors immediately after cancellation. - tracing::info!("account actor cancelled: {}", account_id); + tracing::info!(account_id = %account_id, "Account actor cancelled"); Ok(()) }, ActorShutdownReason::AccountReverted(account_id) => { - tracing::info!("account reverted: {}", account_id); + tracing::info!(account_id = %account_id, "Account reverted"); self.actor_registry.remove(&account_id); Ok(()) }, diff --git a/crates/ntx-builder/src/db/errors.rs b/crates/ntx-builder/src/db/errors.rs new file mode 100644 index 000000000..1ea43e382 --- /dev/null +++ b/crates/ntx-builder/src/db/errors.rs @@ -0,0 +1,69 @@ +use deadpool_sync::InteractError; + +use crate::db::manager::ConnectionManagerError; + +// DATABASE ERRORS +// ================================================================================================ + +#[derive(Debug, thiserror::Error)] +pub enum DatabaseError { + #[error("setup deadpool connection pool failed")] + ConnectionPoolObtainError(#[from] Box), + #[error(transparent)] + Diesel(#[from] diesel::result::Error), + #[error("SQLite pool interaction failed: {0}")] + InteractError(String), + #[error("schema verification failed")] + SchemaVerification(#[from] SchemaVerificationError), + #[error("connection manager error")] + ConnectionManager(#[source] ConnectionManagerError), +} + +impl DatabaseError { + /// Converts from `InteractError`. + /// + /// Required since `InteractError` has at least one enum variant that is _not_ `Send + + /// Sync` and hence prevents the `Sync` auto implementation. This does an internal + /// conversion to string while maintaining convenience. + pub fn interact(msg: &(impl ToString + ?Sized), e: &InteractError) -> Self { + let msg = msg.to_string(); + Self::InteractError(format!("{msg} failed: {e:?}")) + } +} + +// DATABASE SETUP ERRORS +// ================================================================================================ + +#[derive(Debug, thiserror::Error)] +pub enum DatabaseSetupError { + #[error("I/O error")] + Io(#[from] std::io::Error), + #[error("database error")] + Database(#[from] DatabaseError), + #[error("pool build error")] + PoolBuild(#[source] deadpool::managed::BuildError), +} + +// SCHEMA VERIFICATION ERRORS +// ================================================================================================ + +/// Errors that can occur during schema verification. +#[derive(Debug, thiserror::Error)] +pub enum SchemaVerificationError { + #[error("failed to create in-memory reference database")] + InMemoryDbCreation(#[source] diesel::ConnectionError), + #[error("failed to apply migrations to reference database")] + MigrationApplication(#[source] Box), + #[error("failed to extract schema from database")] + SchemaExtraction(#[source] diesel::result::Error), + #[error( + "schema mismatch: expected {expected_count} objects, found {actual_count} \ + ({missing_count} missing, {extra_count} unexpected)" + )] + Mismatch { + expected_count: usize, + actual_count: usize, + missing_count: usize, + extra_count: usize, + }, +} diff --git a/crates/ntx-builder/src/db/manager.rs b/crates/ntx-builder/src/db/manager.rs new file mode 100644 index 000000000..4234e09dd --- /dev/null +++ b/crates/ntx-builder/src/db/manager.rs @@ -0,0 +1,86 @@ +//! A minimal connection manager wrapper. +//! +//! Only required to setup connection parameters, specifically `WAL`. + +use deadpool_sync::InteractError; +use diesel::{RunQueryDsl, SqliteConnection}; + +#[derive(thiserror::Error, Debug)] +pub enum ConnectionManagerError { + #[error("failed to apply connection parameter")] + ConnectionParamSetup(#[source] diesel::result::Error), + #[error("SQLite pool interaction failed: {0}")] + InteractError(String), + #[error("failed to create a new connection")] + ConnectionCreate(#[source] deadpool_diesel::Error), + #[error("failed to recycle connection")] + PoolRecycle(#[source] deadpool::managed::RecycleError), +} + +impl ConnectionManagerError { + /// Converts from `InteractError`. + /// + /// Required since `InteractError` has at least one enum variant that is _not_ `Send + + /// Sync` and hence prevents the `Sync` auto implementation. + pub fn interact(msg: &(impl ToString + ?Sized), e: &InteractError) -> Self { + let msg = msg.to_string(); + Self::InteractError(format!("{msg} failed: {e:?}")) + } +} + +/// Create a connection manager with per-connection setup. +/// +/// Particularly, `foreign_key` checks are enabled and using a write-append-log for journaling. +pub(crate) struct ConnectionManager { + pub(crate) manager: deadpool_diesel::sqlite::Manager, +} + +impl ConnectionManager { + pub(crate) fn new(database_path: &str) -> Self { + let manager = deadpool_diesel::sqlite::Manager::new( + database_path.to_owned(), + deadpool_diesel::sqlite::Runtime::Tokio1, + ); + Self { manager } + } +} + +impl deadpool::managed::Manager for ConnectionManager { + type Type = deadpool_sync::SyncWrapper; + type Error = ConnectionManagerError; + + async fn create(&self) -> Result { + let conn = self.manager.create().await.map_err(ConnectionManagerError::ConnectionCreate)?; + + conn.interact(configure_connection_on_creation) + .await + .map_err(|e| ConnectionManagerError::interact("Connection setup", &e))??; + Ok(conn) + } + + async fn recycle( + &self, + conn: &mut Self::Type, + metrics: &deadpool_diesel::Metrics, + ) -> deadpool::managed::RecycleResult { + self.manager.recycle(conn, metrics).await.map_err(|err| { + deadpool::managed::RecycleError::Backend(ConnectionManagerError::PoolRecycle(err)) + })?; + Ok(()) + } +} + +pub(crate) fn configure_connection_on_creation( + conn: &mut SqliteConnection, +) -> Result<(), ConnectionManagerError> { + // Enable the WAL mode. This allows concurrent reads while a write is in progress. + diesel::sql_query("PRAGMA journal_mode=WAL") + .execute(conn) + .map_err(ConnectionManagerError::ConnectionParamSetup)?; + + // Enable foreign key checks. + diesel::sql_query("PRAGMA foreign_keys=ON") + .execute(conn) + .map_err(ConnectionManagerError::ConnectionParamSetup)?; + Ok(()) +} diff --git a/crates/ntx-builder/src/db/migrations.rs b/crates/ntx-builder/src/db/migrations.rs new file mode 100644 index 000000000..069bdd411 --- /dev/null +++ b/crates/ntx-builder/src/db/migrations.rs @@ -0,0 +1,29 @@ +use diesel::SqliteConnection; +use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations}; +use tracing::instrument; + +use crate::COMPONENT; +use crate::db::errors::DatabaseError; +use crate::db::schema_hash::verify_schema; + +// The rebuild is automatically triggered by `build.rs` as described in +// . +pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/db/migrations"); + +#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +pub fn apply_migrations(conn: &mut SqliteConnection) -> Result<(), DatabaseError> { + let migrations = conn.pending_migrations(MIGRATIONS).expect("In memory migrations never fail"); + tracing::info!(target: COMPONENT, migrations = migrations.len(), "Applying pending migrations"); + + let Err(e) = conn.run_pending_migrations(MIGRATIONS) else { + // Migrations applied successfully, verify schema hash. + verify_schema(conn)?; + return Ok(()); + }; + tracing::warn!(target: COMPONENT, "Failed to apply migration: {e:?}"); + // Something went wrong; revert the last migration. + conn.revert_last_migration(MIGRATIONS) + .expect("Duality is maintained by the developer"); + + Ok(()) +} diff --git a/crates/ntx-builder/src/db/migrations/2026020900000_setup/down.sql b/crates/ntx-builder/src/db/migrations/2026020900000_setup/down.sql new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/crates/ntx-builder/src/db/migrations/2026020900000_setup/down.sql @@ -0,0 +1 @@ + diff --git a/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql b/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql new file mode 100644 index 000000000..2588a85bd --- /dev/null +++ b/crates/ntx-builder/src/db/migrations/2026020900000_setup/up.sql @@ -0,0 +1,59 @@ +-- Singleton row storing the chain tip header. +-- The chain MMR is reconstructed on startup from the store and maintained in memory. +CREATE TABLE chain_state ( + -- Singleton constraint: only one row allowed. + id INTEGER PRIMARY KEY CHECK (id = 0), + -- Block number of the chain tip. + block_num INTEGER NOT NULL, + -- Serialized BlockHeader. + block_header BLOB NOT NULL, + + CONSTRAINT chain_state_block_num_is_u32 CHECK (block_num BETWEEN 0 AND 0xFFFFFFFF) +); + +-- Account states: both committed and inflight. +-- Committed rows have transaction_id = NULL. Inflight rows have transaction_id set. +-- The auto-incrementing order_id preserves insertion order (VecDeque semantics). +CREATE TABLE accounts ( + -- Auto-incrementing ID preserves insertion order. + order_id INTEGER PRIMARY KEY AUTOINCREMENT, + -- AccountId serialized bytes (8 bytes). + account_id BLOB NOT NULL, + -- Serialized Account state. + account_data BLOB NOT NULL, + -- NULL if this is the committed state; transaction ID if inflight. + transaction_id BLOB +); + +-- At most one committed row per account. +CREATE UNIQUE INDEX idx_accounts_committed ON accounts(account_id) WHERE transaction_id IS NULL; +CREATE INDEX idx_accounts_account ON accounts(account_id); +CREATE INDEX idx_accounts_tx ON accounts(transaction_id) WHERE transaction_id IS NOT NULL; + +-- Notes: committed, inflight, and nullified — all in one table. +-- created_by = NULL means committed note; non-NULL means created by inflight tx. +-- consumed_by = NULL means unconsumed; non-NULL means consumed by inflight tx. +-- Row is deleted once consumption is committed. +CREATE TABLE notes ( + -- Nullifier bytes (32 bytes). Primary key. + nullifier BLOB PRIMARY KEY, + -- Target account ID. + account_id BLOB NOT NULL, + -- Serialized SingleTargetNetworkNote. + note_data BLOB NOT NULL, + -- Backoff tracking: number of failed execution attempts. + attempt_count INTEGER NOT NULL DEFAULT 0, + -- Backoff tracking: block number of the last failed attempt. NULL if never attempted. + last_attempt INTEGER, + -- NULL if the note came from a committed block; transaction ID if created by inflight tx. + created_by BLOB, + -- NULL if unconsumed; transaction ID of the consuming inflight tx. + consumed_by BLOB, + + CONSTRAINT notes_attempt_count_non_negative CHECK (attempt_count >= 0), + CONSTRAINT notes_last_attempt_is_u32 CHECK (last_attempt BETWEEN 0 AND 0xFFFFFFFF) +) WITHOUT ROWID; + +CREATE INDEX idx_notes_account ON notes(account_id); +CREATE INDEX idx_notes_created_by ON notes(created_by) WHERE created_by IS NOT NULL; +CREATE INDEX idx_notes_consumed_by ON notes(consumed_by) WHERE consumed_by IS NOT NULL; diff --git a/crates/ntx-builder/src/db/mod.rs b/crates/ntx-builder/src/db/mod.rs new file mode 100644 index 000000000..488673b91 --- /dev/null +++ b/crates/ntx-builder/src/db/mod.rs @@ -0,0 +1,121 @@ +use std::path::PathBuf; + +use anyhow::Context; +use diesel::{Connection, SqliteConnection}; +use tracing::{info, instrument}; + +use crate::COMPONENT; +use crate::db::errors::{DatabaseError, DatabaseSetupError}; +use crate::db::manager::{ConnectionManager, configure_connection_on_creation}; +use crate::db::migrations::apply_migrations; + +pub mod errors; +pub(crate) mod manager; + +mod migrations; +mod schema_hash; + +/// [diesel](https://diesel.rs) generated schema. +pub(crate) mod schema; + +pub type Result = std::result::Result; + +pub struct Db { + pool: deadpool_diesel::Pool>, +} + +impl Db { + /// Creates a new database file, configures it, and applies migrations. + /// + /// This is a synchronous one-shot setup used during node initialization. + /// For runtime access with a connection pool, use [`Db::load`]. + #[instrument( + target = COMPONENT, + name = "ntx_builder.database.bootstrap", + skip_all, + fields(path=%database_filepath.display()), + err, + )] + pub fn bootstrap(database_filepath: PathBuf) -> anyhow::Result<()> { + let mut conn: SqliteConnection = diesel::sqlite::SqliteConnection::establish( + database_filepath.to_str().context("database filepath is invalid")?, + ) + .context("failed to open a database connection")?; + + configure_connection_on_creation(&mut conn)?; + + // Run migrations. + apply_migrations(&mut conn).context("failed to apply database migrations")?; + + Ok(()) + } + + /// Create and commit a transaction with the queries added in the provided closure. + #[expect(dead_code)] + pub(crate) async fn transact(&self, msg: M, query: Q) -> std::result::Result + where + Q: Send + + for<'a, 't> FnOnce(&'a mut SqliteConnection) -> std::result::Result + + 'static, + R: Send + 'static, + M: Send + ToString, + E: From, + E: From, + E: std::error::Error + Send + Sync + 'static, + { + let conn = self + .pool + .get() + .await + .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; + + conn.interact(|conn| <_ as diesel::Connection>::transaction::(conn, query)) + .await + .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? + } + + /// Run the query _without_ a transaction. + pub(crate) async fn query(&self, msg: M, query: Q) -> std::result::Result + where + Q: Send + FnOnce(&mut SqliteConnection) -> std::result::Result + 'static, + R: Send + 'static, + M: Send + ToString, + E: From, + E: std::error::Error + Send + Sync + 'static, + { + let conn = self + .pool + .get() + .await + .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; + + conn.interact(move |conn| { + let r = query(conn)?; + Ok(r) + }) + .await + .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? + } + + /// Opens a connection pool to an existing database and re-applies pending migrations. + /// + /// Use [`Db::bootstrap`] first to create and initialize the database file. + #[instrument(target = COMPONENT, skip_all)] + pub async fn load(database_filepath: PathBuf) -> Result { + let manager = ConnectionManager::new(database_filepath.to_str().unwrap()); + let pool = deadpool_diesel::Pool::builder(manager) + .max_size(16) + .build() + .map_err(DatabaseSetupError::PoolBuild)?; + + info!( + target: COMPONENT, + sqlite = %database_filepath.display(), + "Connected to the database" + ); + + let me = Db { pool }; + me.query("migrations", apply_migrations).await?; + Ok(me) + } +} diff --git a/crates/ntx-builder/src/db/schema.rs b/crates/ntx-builder/src/db/schema.rs new file mode 100644 index 000000000..74ee8d462 --- /dev/null +++ b/crates/ntx-builder/src/db/schema.rs @@ -0,0 +1,32 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + accounts (order_id) { + order_id -> Nullable, + account_id -> Binary, + account_data -> Binary, + transaction_id -> Nullable, + } +} + +diesel::table! { + chain_state (id) { + id -> Nullable, + block_num -> Integer, + block_header -> Binary, + } +} + +diesel::table! { + notes (nullifier) { + nullifier -> Binary, + account_id -> Binary, + note_data -> Binary, + attempt_count -> Integer, + last_attempt -> Nullable, + created_by -> Nullable, + consumed_by -> Nullable, + } +} + +diesel::allow_tables_to_appear_in_same_query!(accounts, chain_state, notes,); diff --git a/crates/ntx-builder/src/db/schema_hash.rs b/crates/ntx-builder/src/db/schema_hash.rs new file mode 100644 index 000000000..21ebb0c7b --- /dev/null +++ b/crates/ntx-builder/src/db/schema_hash.rs @@ -0,0 +1,190 @@ +//! Schema verification to detect database schema changes. +//! +//! Detects: +//! +//! - Direct modifications to the database schema outside of migrations +//! - Running a node against a database created with different set of migrations +//! - Forgetting to reset the database after schema changes i.e. for a specific migration +//! +//! The verification works by creating an in-memory reference database, applying all +//! migrations to it, and comparing its schema against the actual database schema. + +use diesel::{Connection, RunQueryDsl, SqliteConnection}; +use diesel_migrations::MigrationHarness; +use tracing::instrument; + +use crate::COMPONENT; +use crate::db::errors::SchemaVerificationError; +use crate::db::migrations::MIGRATIONS; + +/// Represents a schema object for comparison. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +struct SchemaObject { + object_type: String, + name: String, + sql: String, +} + +/// Represents a row from the `sqlite_schema` table. +#[derive(diesel::QueryableByName, Debug)] +struct SqliteSchemaRow { + #[diesel(sql_type = diesel::sql_types::Text)] + schema_type: String, + #[diesel(sql_type = diesel::sql_types::Text)] + name: String, + #[diesel(sql_type = diesel::sql_types::Nullable)] + sql: Option, +} + +/// Extracts all schema objects from a database connection. +fn extract_schema( + conn: &mut SqliteConnection, +) -> Result, SchemaVerificationError> { + let rows: Vec = diesel::sql_query( + "SELECT type as schema_type, name, sql FROM sqlite_schema \ + WHERE type IN ('table', 'index') \ + AND name NOT LIKE 'sqlite_%' \ + AND name NOT LIKE '__diesel_%' \ + ORDER BY type, name", + ) + .load(conn) + .map_err(SchemaVerificationError::SchemaExtraction)?; + + let mut objects: Vec = rows + .into_iter() + .filter_map(|row| { + row.sql.map(|sql| SchemaObject { + object_type: row.schema_type, + name: row.name, + sql, + }) + }) + .collect(); + + objects.sort(); + Ok(objects) +} + +/// Computes the expected schema by applying migrations to an in-memory database. +fn compute_expected_schema() -> Result, SchemaVerificationError> { + let mut conn = SqliteConnection::establish(":memory:") + .map_err(SchemaVerificationError::InMemoryDbCreation)?; + + conn.run_pending_migrations(MIGRATIONS) + .map_err(SchemaVerificationError::MigrationApplication)?; + + extract_schema(&mut conn) +} + +/// Verifies that the database schema matches the expected schema. +/// +/// Creates an in-memory database, applies all migrations, and compares schemas. +/// +/// # Errors +/// +/// Returns `SchemaVerificationError::Mismatch` if schemas differ. +#[instrument(level = "info", target = COMPONENT, skip_all, err)] +pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificationError> { + let expected = compute_expected_schema()?; + let actual = extract_schema(conn)?; + + if actual != expected { + let expected_names: Vec<_> = expected.iter().map(|o| &o.name).collect(); + let actual_names: Vec<_> = actual.iter().map(|o| &o.name).collect(); + + // Find differences for better error messages. + let missing: Vec<_> = expected.iter().filter(|e| !actual.contains(e)).collect(); + let extra: Vec<_> = actual.iter().filter(|a| !expected.contains(a)).collect(); + + tracing::error!( + target: COMPONENT, + ?expected_names, + ?actual_names, + missing_count = missing.len(), + extra_count = extra.len(), + "Database schema mismatch detected" + ); + + // Log specific differences at debug level. + for obj in &missing { + tracing::debug!(target: COMPONENT, name = %obj.name, "Missing or modified: {}", obj.sql); + } + for obj in &extra { + tracing::debug!(target: COMPONENT, name = %obj.name, "Extra or modified: {}", obj.sql); + } + + return Err(SchemaVerificationError::Mismatch { + expected_count: expected.len(), + actual_count: actual.len(), + missing_count: missing.len(), + extra_count: extra.len(), + }); + } + + tracing::info!( + target: COMPONENT, + objects = expected.len(), + "Database schema verification passed" + ); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::errors::DatabaseError; + use crate::db::migrations::apply_migrations; + + #[test] + fn verify_schema_passes_for_correct_schema() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + verify_schema(&mut conn).expect("Should pass for correct schema"); + } + + #[test] + fn verify_schema_fails_for_added_object() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("CREATE TABLE rogue_table (id INTEGER PRIMARY KEY)") + .execute(&mut conn) + .unwrap(); + + assert!(matches!( + verify_schema(&mut conn), + Err(SchemaVerificationError::Mismatch { .. }) + )); + } + + #[test] + fn verify_schema_fails_for_removed_object() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("DROP TABLE notes").execute(&mut conn).unwrap(); + + assert!(matches!( + verify_schema(&mut conn), + Err(SchemaVerificationError::Mismatch { .. }) + )); + } + + #[test] + fn apply_migrations_succeeds_on_fresh_database() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + apply_migrations(&mut conn).expect("Should succeed on fresh database"); + } + + #[test] + fn apply_migrations_fails_on_tampered_database() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("CREATE TABLE tampered (id INTEGER)") + .execute(&mut conn) + .unwrap(); + + assert!(matches!(apply_migrations(&mut conn), Err(DatabaseError::SchemaVerification(_)))); + } +} diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index 62088ce6c..d77a8dd7d 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -1,9 +1,23 @@ use std::num::NonZeroUsize; +use std::sync::Arc; + +use actor::AccountActorContext; +use anyhow::Context; +use block_producer::BlockProducerClient; +use builder::{ChainState, MempoolEventStream}; +use coordinator::Coordinator; +use futures::TryStreamExt; +use miden_node_utils::lru_cache::LruCache; +use store::StoreClient; +use tokio::sync::RwLock; +use url::Url; mod actor; mod block_producer; mod builder; mod coordinator; +#[expect(dead_code, reason = "will be used as part of follow-up work")] +pub(crate) mod db; mod store; pub use builder::NetworkTransactionBuilder; @@ -13,12 +27,224 @@ pub use builder::NetworkTransactionBuilder; const COMPONENT: &str = "miden-ntx-builder"; -/// Maximum number of network notes a network transaction is allowed to consume. -const MAX_NOTES_PER_TX: NonZeroUsize = NonZeroUsize::new(20).unwrap(); -const _: () = assert!(MAX_NOTES_PER_TX.get() <= miden_tx::MAX_NUM_CHECKER_NOTES); +/// Default maximum number of network notes a network transaction is allowed to consume. +const DEFAULT_MAX_NOTES_PER_TX: NonZeroUsize = NonZeroUsize::new(20).expect("literal is non-zero"); +const _: () = assert!(DEFAULT_MAX_NOTES_PER_TX.get() <= miden_tx::MAX_NUM_CHECKER_NOTES); -/// Maximum number of network transactions which should be in progress concurrently. +/// Default maximum number of network transactions which should be in progress concurrently. /// /// This only counts transactions which are being computed locally and does not include /// uncommitted transactions in the mempool. -const MAX_IN_PROGRESS_TXS: usize = 4; +const DEFAULT_MAX_CONCURRENT_TXS: usize = 4; + +/// Default maximum number of blocks to keep in the chain MMR. +const DEFAULT_MAX_BLOCK_COUNT: usize = 4; + +/// Default channel capacity for account loading from the store. +const DEFAULT_ACCOUNT_CHANNEL_CAPACITY: usize = 1_000; + +/// Default channel size for actor event channels. +const DEFAULT_ACTOR_CHANNEL_SIZE: usize = 100; + +/// Default maximum number of attempts to execute a failing note before dropping it. +const DEFAULT_MAX_NOTE_ATTEMPTS: usize = 30; + +/// Default script cache size. +const DEFAULT_SCRIPT_CACHE_SIZE: NonZeroUsize = + NonZeroUsize::new(1_000).expect("literal is non-zero"); + +// CONFIGURATION +// ================================================================================================= + +/// Configuration for the Network Transaction Builder. +/// +/// This struct contains all the settings needed to create and run a `NetworkTransactionBuilder`. +#[derive(Debug, Clone)] +pub struct NtxBuilderConfig { + /// Address of the store gRPC server (ntx-builder API). + pub store_url: Url, + + /// Address of the block producer gRPC server. + pub block_producer_url: Url, + + /// Address of the validator gRPC server. + pub validator_url: Url, + + /// Address of the remote transaction prover. If `None`, transactions will be proven locally. + pub tx_prover_url: Option, + + /// Size of the LRU cache for note scripts. Scripts are fetched from the store and cached + /// to avoid repeated gRPC calls. + pub script_cache_size: NonZeroUsize, + + /// Maximum number of network transactions which should be in progress concurrently across + /// all account actors. + pub max_concurrent_txs: usize, + + /// Maximum number of network notes a single transaction is allowed to consume. + pub max_notes_per_tx: NonZeroUsize, + + /// Maximum number of attempts to execute a failing note before dropping it. + /// Notes use exponential backoff between attempts. + pub max_note_attempts: usize, + + /// Maximum number of blocks to keep in the chain MMR. Older blocks are pruned. + pub max_block_count: usize, + + /// Channel capacity for loading accounts from the store during startup. + pub account_channel_capacity: usize, + + /// Channel size for each actor's event channel. + pub actor_channel_size: usize, +} + +impl NtxBuilderConfig { + pub fn new(store_url: Url, block_producer_url: Url, validator_url: Url) -> Self { + Self { + store_url, + block_producer_url, + validator_url, + tx_prover_url: None, + script_cache_size: DEFAULT_SCRIPT_CACHE_SIZE, + max_concurrent_txs: DEFAULT_MAX_CONCURRENT_TXS, + max_notes_per_tx: DEFAULT_MAX_NOTES_PER_TX, + max_note_attempts: DEFAULT_MAX_NOTE_ATTEMPTS, + max_block_count: DEFAULT_MAX_BLOCK_COUNT, + account_channel_capacity: DEFAULT_ACCOUNT_CHANNEL_CAPACITY, + actor_channel_size: DEFAULT_ACTOR_CHANNEL_SIZE, + } + } + + /// Sets the remote transaction prover URL. + /// + /// If not set, transactions will be proven locally. + #[must_use] + pub fn with_tx_prover_url(mut self, url: Option) -> Self { + self.tx_prover_url = url; + self + } + + /// Sets the script cache size. + #[must_use] + pub fn with_script_cache_size(mut self, size: NonZeroUsize) -> Self { + self.script_cache_size = size; + self + } + + /// Sets the maximum number of concurrent transactions. + #[must_use] + pub fn with_max_concurrent_txs(mut self, max: usize) -> Self { + self.max_concurrent_txs = max; + self + } + + /// Sets the maximum number of notes per transaction. + /// + /// # Panics + /// + /// Panics if `max` exceeds `miden_tx::MAX_NUM_CHECKER_NOTES`. + #[must_use] + pub fn with_max_notes_per_tx(mut self, max: NonZeroUsize) -> Self { + assert!( + max.get() <= miden_tx::MAX_NUM_CHECKER_NOTES, + "max_notes_per_tx ({}) exceeds MAX_NUM_CHECKER_NOTES ({})", + max, + miden_tx::MAX_NUM_CHECKER_NOTES + ); + self.max_notes_per_tx = max; + self + } + + /// Sets the maximum number of note execution attempts. + #[must_use] + pub fn with_max_note_attempts(mut self, max: usize) -> Self { + self.max_note_attempts = max; + self + } + + /// Sets the maximum number of blocks to keep in the chain MMR. + #[must_use] + pub fn with_max_block_count(mut self, max: usize) -> Self { + self.max_block_count = max; + self + } + + /// Sets the account channel capacity for startup loading. + #[must_use] + pub fn with_account_channel_capacity(mut self, capacity: usize) -> Self { + self.account_channel_capacity = capacity; + self + } + + /// Sets the actor event channel size. + #[must_use] + pub fn with_actor_channel_size(mut self, size: usize) -> Self { + self.actor_channel_size = size; + self + } + + /// Builds and initializes the network transaction builder. + /// + /// This method connects to the store and block producer services, fetches the current + /// chain tip, and subscribes to mempool events. + /// + /// # Errors + /// + /// Returns an error if: + /// - The store connection fails + /// - The mempool subscription fails (after retries) + /// - The store contains no blocks (not bootstrapped) + pub async fn build(self) -> anyhow::Result { + let script_cache = LruCache::new(self.script_cache_size); + let coordinator = Coordinator::new(self.max_concurrent_txs, self.actor_channel_size); + + let store = StoreClient::new(self.store_url.clone()); + let block_producer = BlockProducerClient::new(self.block_producer_url.clone()); + + let (chain_tip_header, chain_mmr, mempool_events) = loop { + let (chain_tip_header, chain_mmr) = store + .get_latest_blockchain_data_with_retry() + .await? + .context("store should contain a latest block")?; + + match block_producer + .subscribe_to_mempool_with_retry(chain_tip_header.block_num()) + .await + { + Ok(subscription) => { + let stream: MempoolEventStream = Box::pin(subscription.into_stream()); + break (chain_tip_header, chain_mmr, stream); + }, + Err(status) if status.code() == tonic::Code::InvalidArgument => { + tracing::warn!( + err = %status, + "mempool subscription failed due to chain tip desync, retrying" + ); + }, + Err(err) => return Err(err).context("failed to subscribe to mempool events"), + } + }; + + let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); + + let actor_context = AccountActorContext { + block_producer_url: self.block_producer_url.clone(), + validator_url: self.validator_url.clone(), + tx_prover_url: self.tx_prover_url.clone(), + chain_state: chain_state.clone(), + store: store.clone(), + script_cache, + max_notes_per_tx: self.max_notes_per_tx, + max_note_attempts: self.max_note_attempts, + }; + + Ok(NetworkTransactionBuilder::new( + self, + coordinator, + store, + chain_state, + actor_context, + mempool_events, + )) + } +} diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index 42a418cc2..ac5f4c863 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -236,10 +236,10 @@ impl StoreClient { &self, sender: tokio::sync::mpsc::Sender, ) -> Result<(), StoreError> { - let mut block_range = BlockNumber::from(0)..=BlockNumber::from(u32::MAX); + let mut block_range = BlockNumber::GENESIS..=BlockNumber::MAX; while let Some(next_start) = self.load_accounts_page(block_range, &sender).await? { - block_range = next_start..=BlockNumber::from(u32::MAX); + block_range = next_start..=BlockNumber::MAX; } Ok(()) diff --git a/crates/proto/Cargo.toml b/crates/proto/Cargo.toml index 6d3589ca3..2e9767f88 100644 --- a/crates/proto/Cargo.toml +++ b/crates/proto/Cargo.toml @@ -33,7 +33,8 @@ assert_matches = { workspace = true } proptest = { version = "1.7" } [build-dependencies] -fs-err = { workspace = true } -miden-node-proto-build = { features = ["internal"], workspace = true } -miette = { version = "7.6" } -tonic-prost-build = { workspace = true } +fs-err = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } +miette = { version = "7.6" } +tonic-prost-build = { workspace = true } diff --git a/crates/proto/build.rs b/crates/proto/build.rs index b0ac773a7..4f64f4e9d 100644 --- a/crates/proto/build.rs +++ b/crates/proto/build.rs @@ -1,4 +1,5 @@ use std::env; +use std::fmt::Write; use std::path::{Path, PathBuf}; use fs_err as fs; @@ -22,6 +23,8 @@ fn main() -> miette::Result<()> { println!("cargo::rerun-if-changed=../../proto/proto"); println!("cargo::rerun-if-env-changed=BUILD_PROTO"); + miden_node_rocksdb_cxx_linkage_fix::configure(); + // Skip this build script in BUILD_PROTO environment variable is not set to `1`. if env::var("BUILD_PROTO").unwrap_or("0".to_string()) == "0" { return Ok(()); @@ -90,16 +93,17 @@ fn generate_mod_rs(directory: impl AsRef) -> std::io::Result<()> { submodules.sort(); - let contents = submodules.iter().map(|f| format!("pub mod {f};\n")); - let contents = std::iter::once( - "#![allow(clippy::pedantic, reason = \"generated by build.rs and tonic\")]\n".to_string(), - ) - .chain(std::iter::once( - "#![allow(clippy::large_enum_variant, reason = \"generated by build.rs and tonic\")]\n\n" - .to_string(), - )) - .chain(contents) - .collect::(); + // Lints we need to allow for the generated code. + let lints = ["pedantic", "large_enum_variant", "allow_attributes"]; + let lints = lints.into_iter().fold(String::new(), |mut s, lint| { + writeln!(s, " clippy::{lint},").unwrap(); + s + }); + let lints = + format!("#![expect(\n{lints} reason = \"generated by build.rs and tonic\"\n)]\n\n"); + + let modules = submodules.iter().map(|f| format!("pub mod {f};\n")); + let contents = std::iter::once(lints).chain(modules).collect::(); fs::write(mod_filepath, contents) } diff --git a/crates/proto/src/domain/block.rs b/crates/proto/src/domain/block.rs index aa94f306d..112f84e50 100644 --- a/crates/proto/src/domain/block.rs +++ b/crates/proto/src/domain/block.rs @@ -3,7 +3,14 @@ use std::ops::RangeInclusive; use miden_protocol::account::AccountId; use miden_protocol::block::nullifier_tree::NullifierWitness; -use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, FeeParameters}; +use miden_protocol::block::{ + BlockBody, + BlockHeader, + BlockInputs, + BlockNumber, + FeeParameters, + SignedBlock, +}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, Signature}; use miden_protocol::note::{NoteId, NoteInclusionProof}; use miden_protocol::transaction::PartialBlockchain; @@ -115,6 +122,84 @@ impl TryFrom for BlockHeader { } } +// BLOCK BODY +// ================================================================================================ + +impl From<&BlockBody> for proto::blockchain::BlockBody { + fn from(body: &BlockBody) -> Self { + Self { block_body: body.to_bytes() } + } +} + +impl From for proto::blockchain::BlockBody { + fn from(body: BlockBody) -> Self { + (&body).into() + } +} + +impl TryFrom<&proto::blockchain::BlockBody> for BlockBody { + type Error = ConversionError; + + fn try_from(value: &proto::blockchain::BlockBody) -> Result { + value.try_into() + } +} + +impl TryFrom for BlockBody { + type Error = ConversionError; + fn try_from(value: proto::blockchain::BlockBody) -> Result { + BlockBody::read_from_bytes(&value.block_body) + .map_err(|source| ConversionError::deserialization_error("BlockBody", source)) + } +} + +// SIGNED BLOCK +// ================================================================================================ + +impl From<&SignedBlock> for proto::blockchain::SignedBlock { + fn from(block: &SignedBlock) -> Self { + Self { + header: Some(block.header().into()), + body: Some(block.body().into()), + signature: Some(block.signature().into()), + } + } +} + +impl From for proto::blockchain::SignedBlock { + fn from(block: SignedBlock) -> Self { + (&block).into() + } +} + +impl TryFrom<&proto::blockchain::SignedBlock> for SignedBlock { + type Error = ConversionError; + + fn try_from(value: &proto::blockchain::SignedBlock) -> Result { + value.try_into() + } +} + +impl TryFrom for SignedBlock { + type Error = ConversionError; + fn try_from(value: proto::blockchain::SignedBlock) -> Result { + let header = value + .header + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(header)))? + .try_into()?; + let body = value + .body + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(body)))? + .try_into()?; + let signature = value + .signature + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(signature)))? + .try_into()?; + + Ok(SignedBlock::new_unchecked(header, body, signature)) + } +} + // BLOCK INPUTS // ================================================================================================ diff --git a/crates/proto/src/domain/note.rs b/crates/proto/src/domain/note.rs index 94fea5beb..1f7c9cb0d 100644 --- a/crates/proto/src/domain/note.rs +++ b/crates/proto/src/domain/note.rs @@ -23,6 +23,33 @@ use super::account::NetworkAccountId; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated as proto; +// NOTE TYPE +// ================================================================================================ + +impl From for proto::note::NoteType { + fn from(note_type: NoteType) -> Self { + match note_type { + NoteType::Public => proto::note::NoteType::Public, + NoteType::Private => proto::note::NoteType::Private, + } + } +} + +impl TryFrom for NoteType { + type Error = ConversionError; + + fn try_from(note_type: proto::note::NoteType) -> Result { + match note_type { + proto::note::NoteType::Public => Ok(NoteType::Public), + proto::note::NoteType::Private => Ok(NoteType::Private), + proto::note::NoteType::Unspecified => Err(ConversionError::EnumDiscriminantOutOfRange), + } + } +} + +// NOTE METADATA +// ================================================================================================ + impl TryFrom for NoteMetadata { type Error = ConversionError; @@ -31,7 +58,9 @@ impl TryFrom for NoteMetadata { .sender .ok_or_else(|| proto::note::NoteMetadata::missing_field(stringify!(sender)))? .try_into()?; - let note_type = NoteType::try_from(u64::from(value.note_type))?; + let note_type = proto::note::NoteType::try_from(value.note_type) + .map_err(|_| ConversionError::EnumDiscriminantOutOfRange)? + .try_into()?; let tag = NoteTag::new(value.tag); // Deserialize attachment if present @@ -77,7 +106,7 @@ impl From for proto::note::NetworkNote { impl From for proto::note::NoteMetadata { fn from(val: NoteMetadata) -> Self { let sender = Some(val.sender().into()); - let note_type = val.note_type() as u32; + let note_type = proto::note::NoteType::from(val.note_type()) as i32; let tag = val.tag().as_u32(); let attachment = val.attachment().to_bytes(); diff --git a/crates/proto/src/generated/blockchain.rs b/crates/proto/src/generated/blockchain.rs index 69bbe2e28..135d763e1 100644 --- a/crates/proto/src/generated/blockchain.rs +++ b/crates/proto/src/generated/blockchain.rs @@ -1,11 +1,13 @@ // This file is @generated by prost-build. -/// Represents a block. +/// Represents a signed block. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Block { - /// Block data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_protocol::block::Block\]. - #[prost(bytes = "vec", tag = "1")] - pub block: ::prost::alloc::vec::Vec, +pub struct SignedBlock { + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub body: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub signature: ::core::option::Option, } /// Represents a proposed block. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] diff --git a/crates/proto/src/generated/mod.rs b/crates/proto/src/generated/mod.rs index 61e3a5379..4ec0ae408 100644 --- a/crates/proto/src/generated/mod.rs +++ b/crates/proto/src/generated/mod.rs @@ -1,5 +1,9 @@ -#![allow(clippy::pedantic, reason = "generated by build.rs and tonic")] -#![allow(clippy::large_enum_variant, reason = "generated by build.rs and tonic")] +#![expect( + clippy::pedantic, + clippy::large_enum_variant, + clippy::allow_attributes, + reason = "generated by build.rs and tonic" +)] pub mod account; pub mod block_producer; diff --git a/crates/proto/src/generated/note.rs b/crates/proto/src/generated/note.rs index 83d56aeb6..8bff5858c 100644 --- a/crates/proto/src/generated/note.rs +++ b/crates/proto/src/generated/note.rs @@ -19,9 +19,9 @@ pub struct NoteMetadata { /// The account which sent the note. #[prost(message, optional, tag = "1")] pub sender: ::core::option::Option, - /// The type of the note (0b01 = public, 0b10 = private, 0b11 = encrypted). - #[prost(uint32, tag = "2")] - pub note_type: u32, + /// The type of the note. + #[prost(enumeration = "NoteType", tag = "2")] + pub note_type: i32, /// A value which can be used by the recipient(s) to identify notes intended for them. /// /// See `miden_protocol::note::note_tag` for more info. @@ -128,3 +128,36 @@ pub struct NoteScript { #[prost(bytes = "vec", tag = "2")] pub mast: ::prost::alloc::vec::Vec, } +/// The type of a note. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum NoteType { + /// Unspecified note type (default value, should not be used). + Unspecified = 0, + /// Public note - details are visible on-chain. + Public = 1, + /// Private note - details are not visible on-chain. + Private = 2, +} +impl NoteType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Unspecified => "NOTE_TYPE_UNSPECIFIED", + Self::Public => "NOTE_TYPE_PUBLIC", + Self::Private => "NOTE_TYPE_PRIVATE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NOTE_TYPE_UNSPECIFIED" => Some(Self::Unspecified), + "NOTE_TYPE_PUBLIC" => Some(Self::Public), + "NOTE_TYPE_PRIVATE" => Some(Self::Private), + _ => None, + } + } +} diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs index 798a1d18e..0f436386a 100644 --- a/crates/proto/src/generated/rpc.rs +++ b/crates/proto/src/generated/rpc.rs @@ -713,6 +713,29 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "Status")); self.inner.unary(req, path, codec).await } + /// Returns the query parameter limits configured for RPC methods. + /// + /// These define the maximum number of each parameter a method will accept. + /// Exceeding the limit will result in the request being rejected and you should instead send + /// multiple smaller requests. + pub async fn get_limits( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetLimits"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetLimits")); + self.inner.unary(req, path, codec).await + } /// Returns a Sparse Merkle Tree opening proof for each requested nullifier /// /// Each proof demonstrates either: @@ -928,14 +951,12 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SubmitProvenBatch")); self.inner.unary(req, path, codec).await } - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - pub async fn sync_nullifiers( + /// Returns transactions records for specific accounts within a block range. + pub async fn sync_transactions( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -947,17 +968,27 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNullifiers"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncTransactions"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNullifiers")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncTransactions")); self.inner.unary(req, path, codec).await } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( + /// Returns info which can be used by the client to sync up to the tip of chain for the notes + /// they are interested in. + /// + /// Client specifies the `note_tags` they are interested in, and the block height from which to + /// search for new for matching notes for. The request will then return the next block containing + /// any note matching the provided tags. + /// + /// The response includes each note's metadata and inclusion proof. + /// + /// A basic note sync can be implemented by repeatedly requesting the previous response's block + /// until reaching the tip of the chain. + pub async fn sync_notes( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -969,25 +1000,19 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncAccountVault"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNotes"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncAccountVault")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNotes")); self.inner.unary(req, path, codec).await } - /// Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. - /// - /// Client specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. + /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - pub async fn sync_notes( + /// Note that only 16-bit prefixes are supported at this time. + pub async fn sync_nullifiers( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -999,31 +1024,17 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNotes"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncNullifiers"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNotes")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncNullifiers")); self.inner.unary(req, path, codec).await } - /// Returns info which can be used by the client to sync up to the latest state of the chain - /// for the objects (accounts and notes) the client is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. Client is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the client is fully synchronized with the chain. - /// - /// Each update response also contains info about new notes, accounts etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags contain only high - /// part of hashes. Thus, returned data contains excessive notes, client can make - /// additional filtering of that data on its side. - pub async fn sync_state( + /// Returns account vault updates for specified account within a block range. + pub async fn sync_account_vault( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -1035,9 +1046,9 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncState"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncAccountVault"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncState")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncAccountVault")); self.inner.unary(req, path, codec).await } /// Returns storage map updates for specified account and storage slots within a block range. @@ -1065,12 +1076,26 @@ pub mod api_client { .insert(GrpcMethod::new("rpc.Api", "SyncAccountStorageMaps")); self.inner.unary(req, path, codec).await } - /// Returns transactions records for specific accounts within a block range. - pub async fn sync_transactions( + /// Returns info which can be used by the client to sync up to the latest state of the chain + /// for the objects (accounts and notes) the client is interested in. + /// + /// This request returns the next block containing requested data. It also returns `chain_tip` + /// which is the latest block number in the chain. Client is expected to repeat these requests + /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point + /// the client is fully synchronized with the chain. + /// + /// Each update response also contains info about new notes, accounts etc. created. It also + /// returns Chain MMR delta that can be used to update the state of Chain MMR. This includes + /// both chain MMR peaks and chain MMR nodes. + /// + /// For preserving some degree of privacy, note tags contain only high + /// part of hashes. Thus, returned data contains excessive notes, client can make + /// additional filtering of that data on its side. + pub async fn sync_state( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -1082,32 +1107,9 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncTransactions"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncTransactions")); - self.inner.unary(req, path, codec).await - } - /// Returns the query parameter limits configured for RPC methods. - /// - /// These define the maximum number of each parameter a method will accept. - /// Exceeding the limit will result in the request being rejected and you should instead send - /// multiple smaller requests. - pub async fn get_limits( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetLimits"); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncState"); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetLimits")); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncState")); self.inner.unary(req, path, codec).await } } @@ -1130,6 +1132,15 @@ pub mod api_server { &self, request: tonic::Request<()>, ) -> std::result::Result, tonic::Status>; + /// Returns the query parameter limits configured for RPC methods. + /// + /// These define the maximum number of each parameter a method will accept. + /// Exceeding the limit will result in the request being rejected and you should instead send + /// multiple smaller requests. + async fn get_limits( + &self, + request: tonic::Request<()>, + ) -> std::result::Result, tonic::Status>; /// Returns a Sparse Merkle Tree opening proof for each requested nullifier /// /// Each proof demonstrates either: @@ -1212,6 +1223,32 @@ pub mod api_server { tonic::Response, tonic::Status, >; + /// Returns transactions records for specific accounts within a block range. + async fn sync_transactions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns info which can be used by the client to sync up to the tip of chain for the notes + /// they are interested in. + /// + /// Client specifies the `note_tags` they are interested in, and the block height from which to + /// search for new for matching notes for. The request will then return the next block containing + /// any note matching the provided tags. + /// + /// The response includes each note's metadata and inclusion proof. + /// + /// A basic note sync can be implemented by repeatedly requesting the previous response's block + /// until reaching the tip of the chain. + async fn sync_notes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. /// /// Note that only 16-bit prefixes are supported at this time. @@ -1230,20 +1267,12 @@ pub mod api_server { tonic::Response, tonic::Status, >; - /// Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. - /// - /// Client specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - async fn sync_notes( + /// Returns storage map updates for specified account and storage slots within a block range. + async fn sync_account_storage_maps( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns info which can be used by the client to sync up to the latest state of the chain @@ -1254,9 +1283,9 @@ pub mod api_server { /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point /// the client is fully synchronized with the chain. /// - /// Each update response also contains info about new notes, accounts etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. + /// Each update response also contains info about new notes, accounts etc. created. It also + /// returns Chain MMR delta that can be used to update the state of Chain MMR. This includes + /// both chain MMR peaks and chain MMR nodes. /// /// For preserving some degree of privacy, note tags contain only high /// part of hashes. Thus, returned data contains excessive notes, client can make @@ -1268,31 +1297,6 @@ pub mod api_server { tonic::Response, tonic::Status, >; - /// Returns storage map updates for specified account and storage slots within a block range. - async fn sync_account_storage_maps( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns transactions records for specific accounts within a block range. - async fn sync_transactions( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the query parameter limits configured for RPC methods. - /// - /// These define the maximum number of each parameter a method will accept. - /// Exceeding the limit will result in the request being rejected and you should instead send - /// multiple smaller requests. - async fn get_limits( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; } /// RPC API for the RPC component #[derive(Debug)] @@ -1410,6 +1414,45 @@ pub mod api_server { }; Box::pin(fut) } + "/rpc.Api/GetLimits" => { + #[allow(non_camel_case_types)] + struct GetLimitsSvc(pub Arc); + impl tonic::server::UnaryService<()> for GetLimitsSvc { + type Response = super::RpcLimits; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call(&mut self, request: tonic::Request<()>) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_limits(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetLimitsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/rpc.Api/CheckNullifiers" => { #[allow(non_camel_case_types)] struct CheckNullifiersSvc(pub Arc); @@ -1775,25 +1818,25 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncNullifiers" => { + "/rpc.Api/SyncTransactions" => { #[allow(non_camel_case_types)] - struct SyncNullifiersSvc(pub Arc); + struct SyncTransactionsSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService - for SyncNullifiersSvc { - type Response = super::SyncNullifiersResponse; + > tonic::server::UnaryService + for SyncTransactionsSvc { + type Response = super::SyncTransactionsResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_nullifiers(&inner, request).await + ::sync_transactions(&inner, request).await }; Box::pin(fut) } @@ -1804,7 +1847,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncNullifiersSvc(inner); + let method = SyncTransactionsSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1820,25 +1863,23 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncAccountVault" => { + "/rpc.Api/SyncNotes" => { #[allow(non_camel_case_types)] - struct SyncAccountVaultSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncAccountVaultSvc { - type Response = super::SyncAccountVaultResponse; + struct SyncNotesSvc(pub Arc); + impl tonic::server::UnaryService + for SyncNotesSvc { + type Response = super::SyncNotesResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_account_vault(&inner, request).await + ::sync_notes(&inner, request).await }; Box::pin(fut) } @@ -1849,7 +1890,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncAccountVaultSvc(inner); + let method = SyncNotesSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1865,23 +1906,25 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncNotes" => { + "/rpc.Api/SyncNullifiers" => { #[allow(non_camel_case_types)] - struct SyncNotesSvc(pub Arc); - impl tonic::server::UnaryService - for SyncNotesSvc { - type Response = super::SyncNotesResponse; + struct SyncNullifiersSvc(pub Arc); + impl< + T: Api, + > tonic::server::UnaryService + for SyncNullifiersSvc { + type Response = super::SyncNullifiersResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_notes(&inner, request).await + ::sync_nullifiers(&inner, request).await }; Box::pin(fut) } @@ -1892,7 +1935,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncNotesSvc(inner); + let method = SyncNullifiersSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1908,23 +1951,25 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncState" => { + "/rpc.Api/SyncAccountVault" => { #[allow(non_camel_case_types)] - struct SyncStateSvc(pub Arc); - impl tonic::server::UnaryService - for SyncStateSvc { - type Response = super::SyncStateResponse; + struct SyncAccountVaultSvc(pub Arc); + impl< + T: Api, + > tonic::server::UnaryService + for SyncAccountVaultSvc { + type Response = super::SyncAccountVaultResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_state(&inner, request).await + ::sync_account_vault(&inner, request).await }; Box::pin(fut) } @@ -1935,7 +1980,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncStateSvc(inner); + let method = SyncAccountVaultSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -1996,64 +2041,23 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncTransactions" => { + "/rpc.Api/SyncState" => { #[allow(non_camel_case_types)] - struct SyncTransactionsSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService - for SyncTransactionsSvc { - type Response = super::SyncTransactionsResponse; + struct SyncStateSvc(pub Arc); + impl tonic::server::UnaryService + for SyncStateSvc { + type Response = super::SyncStateResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_transactions(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncTransactionsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc.Api/GetLimits" => { - #[allow(non_camel_case_types)] - struct GetLimitsSvc(pub Arc); - impl tonic::server::UnaryService<()> for GetLimitsSvc { - type Response = super::RpcLimits; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_limits(&inner, request).await + ::sync_state(&inner, request).await }; Box::pin(fut) } @@ -2064,7 +2068,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = GetLimitsSvc(inner); + let method = SyncStateSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs index be9d1d646..5fad016e1 100644 --- a/crates/proto/src/generated/store.rs +++ b/crates/proto/src/generated/store.rs @@ -1,4 +1,15 @@ // This file is @generated by prost-build. +/// Applies a block to the state. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct ApplyBlockRequest { + /// Ordered batches encoded using \[winter_utils::Serializable\] implementation for + /// \[miden_objects::batch::OrderedBatches\]. + #[prost(bytes = "vec", tag = "1")] + pub ordered_batches: ::prost::alloc::vec::Vec, + /// Block signed by the Validator. + #[prost(message, optional, tag = "2")] + pub block: ::core::option::Option, +} /// Returns data required to prove the next block. #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockInputsRequest { @@ -1707,7 +1718,7 @@ pub mod block_producer_client { /// Applies changes of a new block to the DB and in-memory data structures. pub async fn apply_block( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { self.inner .ready() @@ -1843,7 +1854,7 @@ pub mod block_producer_server { /// Applies changes of a new block to the DB and in-memory data structures. async fn apply_block( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result, tonic::Status>; /// Retrieves block header by given block number. Optionally, it also returns the MMR path /// and current chain length to authenticate the block's inclusion. @@ -1955,7 +1966,7 @@ pub mod block_producer_server { struct ApplyBlockSvc(pub Arc); impl< T: BlockProducer, - > tonic::server::UnaryService + > tonic::server::UnaryService for ApplyBlockSvc { type Response = (); type Future = BoxFuture< @@ -1964,7 +1975,7 @@ pub mod block_producer_server { >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { diff --git a/crates/remote-prover-client/src/lib.rs b/crates/remote-prover-client/src/lib.rs index d2e0d0182..a319793d9 100644 --- a/crates/remote-prover-client/src/lib.rs +++ b/crates/remote-prover-client/src/lib.rs @@ -2,7 +2,7 @@ // We allow unused imports here in order because this `macro_use` only makes sense for code // generated by prost under certain circumstances (when `tx-prover` is enabled and the environment // is not wasm) -#![allow(unused_imports)] +#![expect(unused_imports)] #[macro_use] extern crate alloc; @@ -15,7 +15,14 @@ extern crate std; use thiserror::Error; -pub mod remote_prover; +mod remote_prover; + +#[cfg(feature = "batch-prover")] +pub use remote_prover::batch_prover::RemoteBatchProver; +#[cfg(feature = "block-prover")] +pub use remote_prover::block_prover::RemoteBlockProver; +#[cfg(feature = "tx-prover")] +pub use remote_prover::tx_prover::RemoteTransactionProver; /// ERRORS /// =============================================================================================== diff --git a/crates/remote-prover-client/src/remote_prover/block_prover.rs b/crates/remote-prover-client/src/remote_prover/block_prover.rs index d1fa43548..c1562e597 100644 --- a/crates/remote-prover-client/src/remote_prover/block_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/block_prover.rs @@ -105,7 +105,7 @@ impl RemoteBlockProver { pub async fn prove( &self, tx_batches: OrderedBatches, - block_header: BlockHeader, + block_header: &BlockHeader, block_inputs: BlockInputs, ) -> Result { use miden_protocol::utils::Serializable; diff --git a/crates/remote-prover-client/src/remote_prover/generated/mod.rs b/crates/remote-prover-client/src/remote_prover/generated/mod.rs index 806afe903..2cd709029 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/mod.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/mod.rs @@ -1,4 +1,5 @@ #![allow(clippy::pedantic, reason = "generated by build.rs and tonic")] +#![allow(clippy::allow_attributes, reason = "generated by build.rs and tonic")] #[cfg(all(feature = "std", target_arch = "wasm32"))] compile_error!("The `std` feature cannot be used when targeting `wasm32`."); diff --git a/crates/rocksdb-cxx-linkage-fix/Cargo.toml b/crates/rocksdb-cxx-linkage-fix/Cargo.toml new file mode 100644 index 000000000..9e0eb23f7 --- /dev/null +++ b/crates/rocksdb-cxx-linkage-fix/Cargo.toml @@ -0,0 +1,19 @@ +[package] +authors.workspace = true +description = "Miden C++ stdlib link helper" +edition.workspace = true +homepage.workspace = true +license.workspace = true +name = "miden-node-rocksdb-cxx-linkage-fix" +readme.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[lib] +path = "src/lib.rs" + +[lints] +workspace = true + +[dependencies] diff --git a/crates/rocksdb-cxx-linkage-fix/src/lib.rs b/crates/rocksdb-cxx-linkage-fix/src/lib.rs new file mode 100644 index 000000000..35bc05d00 --- /dev/null +++ b/crates/rocksdb-cxx-linkage-fix/src/lib.rs @@ -0,0 +1,51 @@ +//! A temporary solution to missing c++ std library linkage when using a precompile static library +//! +//! For more information see: + +use std::env; + +pub fn configure() { + println!("cargo:rerun-if-env-changed=ROCKSDB_COMPILE"); + println!("cargo:rerun-if-env-changed=ROCKSDB_LIB_DIR"); + println!("cargo:rerun-if-env-changed=ROCKSDB_STATIC"); + println!("cargo:rerun-if-env-changed=CXXSTDLIB"); + let target = env::var("TARGET").unwrap_or_default(); + if should_link_cpp_stdlib() { + link_cpp_stdlib(&target); + } +} + +fn should_compile() -> bool { + // in sync with + if let Ok(v) = env::var("ROCKSDB_COMPILE") { + if v.to_lowercase() == "true" || v == "1" { + return true; + } + } + false +} + +fn should_link_cpp_stdlib() -> bool { + if should_compile() { + return false; + } + // the value doesn't matter + // + env::var("ROCKSDB_STATIC").is_ok() + // `ROCKSDB_LIB_DIR` is not really discriminative, it only adds extra lookup dirs for the linker +} + +fn link_cpp_stdlib(target: &str) { + // aligned with + // + if let Ok(stdlib) = env::var("CXXSTDLIB") { + println!("cargo:rustc-link-lib=dylib={stdlib}"); + } else if target.contains("apple") || target.contains("freebsd") || target.contains("openbsd") { + println!("cargo:rustc-link-lib=dylib=c++"); + } else if target.contains("linux") { + println!("cargo:rustc-link-lib=dylib=stdc++"); + } else if target.contains("aix") { + println!("cargo:rustc-link-lib=dylib=c++"); + println!("cargo:rustc-link-lib=dylib=c++abi"); + } +} diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 13d26962e..f5e3c2b82 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -294,7 +294,7 @@ impl api_server::Api for RpcService { Arc::make_mut(&mut mast).strip_decorators(); let script = NoteScript::from_parts(mast, note.script().entrypoint()); let recipient = - NoteRecipient::new(note.serial_num(), script, note.inputs().clone()); + NoteRecipient::new(note.serial_num(), script, note.storage().clone()); let new_note = Note::new(note.assets().clone(), note.metadata().clone(), recipient); OutputNote::Full(new_note) }, @@ -356,7 +356,7 @@ impl api_server::Api for RpcService { Arc::make_mut(&mut mast).strip_decorators(); let script = NoteScript::from_parts(mast, note.script().entrypoint()); let recipient = - NoteRecipient::new(note.serial_num(), script, note.inputs().clone()); + NoteRecipient::new(note.serial_num(), script, note.storage().clone()); let new_note = Note::new(note.assets().clone(), note.metadata().clone(), recipient); OutputNote::Full(new_note) @@ -407,8 +407,7 @@ impl api_server::Api for RpcService { request: Request, ) -> Result, Status> { use proto::rpc::account_request::account_detail_request::storage_map_detail_request::{ - SlotData::MapKeys as ProtoMapKeys, - SlotData::AllEntries as ProtoMapAllEntries + SlotData::AllEntries as ProtoMapAllEntries, SlotData::MapKeys as ProtoMapKeys, }; let request = request.into_inner(); @@ -505,7 +504,6 @@ fn out_of_range_error(err: E) -> Status { } /// Check, but don't repeat ourselves mapping the error -#[allow(clippy::result_large_err)] fn check(n: usize) -> Result<(), Status> { ::check(n).map_err(out_of_range_error) } diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index b35fe8b6d..a0b7854e5 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -19,6 +19,7 @@ use miden_node_utils::limiter::{ use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ + Account, AccountBuilder, AccountDelta, AccountId, @@ -28,7 +29,7 @@ use miden_protocol::account::{ }; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::testing::noop_auth_component::NoopAuthComponent; -use miden_protocol::transaction::ProvenTransactionBuilder; +use miden_protocol::transaction::{ProvenTransaction, ProvenTransactionBuilder}; use miden_protocol::utils::Serializable; use miden_protocol::vm::ExecutionProof; use miden_standards::account::wallets::BasicWallet; @@ -40,6 +41,53 @@ use url::Url; use crate::Rpc; +/// Byte offset of the account delta commitment in serialized `ProvenTransaction`. +/// Layout: `AccountId` (15) + `initial_commitment` (32) + `final_commitment` (32) = 79 +const DELTA_COMMITMENT_BYTE_OFFSET: usize = 15 + 32 + 32; + +/// Creates a minimal account and its delta for testing proven transaction building. +fn build_test_account(seed: [u8; 32]) -> (Account, AccountDelta) { + let account = AccountBuilder::new(seed) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_assets(vec![]) + .with_component(BasicWallet) + .with_auth_component(NoopAuthComponent) + .build_existing() + .unwrap(); + + let delta: AccountDelta = account.clone().try_into().unwrap(); + (account, delta) +} + +/// Creates a minimal proven transaction for testing. +/// +/// This uses `ExecutionProof::new_dummy()` and is intended for tests that +/// need to test validation logic. +fn build_test_proven_tx(account: &Account, delta: &AccountDelta) -> ProvenTransaction { + let account_id = AccountId::dummy( + [0; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + ProvenTransactionBuilder::new( + account_id, + [8; 32].try_into().unwrap(), + account.commitment(), + delta.to_commitment(), + 0.into(), + Word::default(), + test_fee(), + u32::MAX.into(), + ExecutionProof::new_dummy(), + ) + .account_update_details(AccountUpdateDetails::Delta(delta.clone())) + .build() + .unwrap() +} + #[tokio::test] async fn rpc_server_accepts_requests_without_accept_header() { // Start the RPC. @@ -199,6 +247,9 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { let (_, rpc_addr, store_addr) = start_rpc().await; let (store_runtime, _data_directory, genesis) = start_store(store_addr).await; + // Wait for the store to be ready before sending requests. + tokio::time::sleep(Duration::from_millis(100)).await; + // Override the client so that the ACCEPT header is not set. let mut rpc_client = miden_node_proto::clients::Builder::new(Url::parse(&format!("http://{rpc_addr}")).unwrap()) @@ -209,54 +260,19 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { .without_otel_context_injection() .connect_lazy::(); - let account_id = AccountId::dummy( - [0; 15], - AccountIdVersion::Version0, - AccountType::RegularAccountImmutableCode, - AccountStorageMode::Public, - ); - - let account = AccountBuilder::new([0; 32]) - .account_type(AccountType::RegularAccountImmutableCode) - .storage_mode(AccountStorageMode::Public) - .with_assets(vec![]) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .unwrap(); - - let other_account = AccountBuilder::new([1; 32]) - .account_type(AccountType::RegularAccountUpdatableCode) - .storage_mode(AccountStorageMode::Private) - .with_assets(vec![]) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .unwrap(); - let incorrect_commitment_delta: AccountDelta = other_account.try_into().unwrap(); - let incorrect_commitment_delta_bytes = incorrect_commitment_delta.to_commitment().as_bytes(); + // Build a valid proven transaction + let (account, account_delta) = build_test_account([0; 32]); + let tx = build_test_proven_tx(&account, &account_delta); - let account_delta: AccountDelta = account.clone().try_into().unwrap(); - - // Send any request to the RPC. - let tx = ProvenTransactionBuilder::new( - account_id, - [8; 32].try_into().unwrap(), - account.commitment(), - account_delta.clone().to_commitment(), // delta commitment - 0.into(), - Word::default(), - test_fee(), - u32::MAX.into(), - ExecutionProof::new_dummy(), - ) - .account_update_details(AccountUpdateDetails::Delta(account_delta)) - .build() - .unwrap(); + // Create an incorrect delta commitment from a different account + let (other_account, _) = build_test_account([1; 32]); + let incorrect_delta: AccountDelta = other_account.try_into().unwrap(); + let incorrect_commitment_bytes = incorrect_delta.to_commitment().as_bytes(); + // Corrupt the transaction bytes with the incorrect delta commitment let mut tx_bytes = tx.to_bytes(); - let offset = 15 + 32 + 32; - tx_bytes[offset..offset + 32].copy_from_slice(&incorrect_commitment_delta_bytes); + tx_bytes[DELTA_COMMITMENT_BYTE_OFFSET..DELTA_COMMITMENT_BYTE_OFFSET + 32] + .copy_from_slice(&incorrect_commitment_bytes); let request = proto::transaction::ProvenTransaction { transaction: tx_bytes, @@ -295,39 +311,8 @@ async fn rpc_server_rejects_tx_submissions_without_genesis() { .without_otel_context_injection() .connect_lazy::(); - let account_id = AccountId::dummy( - [0; 15], - AccountIdVersion::Version0, - AccountType::RegularAccountImmutableCode, - AccountStorageMode::Public, - ); - - let account = AccountBuilder::new([0; 32]) - .account_type(AccountType::RegularAccountImmutableCode) - .storage_mode(AccountStorageMode::Public) - .with_assets(vec![]) - .with_component(BasicWallet) - .with_auth_component(NoopAuthComponent) - .build_existing() - .unwrap(); - - let account_delta: AccountDelta = account.clone().try_into().unwrap(); - - // Send any request to the RPC. - let tx = ProvenTransactionBuilder::new( - account_id, - [8; 32].try_into().unwrap(), - account.commitment(), - account_delta.clone().to_commitment(), // delta commitment - 0.into(), - Word::default(), - test_fee(), - u32::MAX.into(), - ExecutionProof::new_dummy(), - ) - .account_update_details(AccountUpdateDetails::Delta(account_delta)) - .build() - .unwrap(); + let (account, account_delta) = build_test_account([0; 32]); + let tx = build_test_proven_tx(&account, &account_delta); let request = proto::transaction::ProvenTransaction { transaction: tx.to_bytes(), @@ -439,6 +424,7 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { store_runtime.spawn(async move { Store { rpc_listener, + block_prover_url: None, ntx_builder_listener, block_producer_listener, data_directory: dir, @@ -479,6 +465,7 @@ async fn restart_store(store_addr: SocketAddr, data_directory: &std::path::Path) store_runtime.spawn(async move { Store { rpc_listener, + block_prover_url: None, ntx_builder_listener, block_producer_listener, data_directory: dir, diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index dd06567ea..d2a7b3e69 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -15,20 +15,25 @@ version.workspace = true workspace = true [dependencies] -anyhow = { workspace = true } -deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } -deadpool-diesel = { features = ["sqlite"], version = "0.6" } -deadpool-sync = { default-features = false, features = ["tracing"], version = "0.1" } -diesel = { features = ["numeric", "sqlite"], version = "2.3" } -diesel_migrations = { features = ["sqlite"], version = "2.3" } -fs-err = { workspace = true } -hex = { version = "0.4" } -indexmap = { workspace = true } -miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } -miden-node-proto = { workspace = true } -miden-node-proto-build = { features = ["internal"], workspace = true } -miden-node-utils = { workspace = true } -miden-standards = { workspace = true } +anyhow = { workspace = true } +deadpool = { features = ["managed", "rt_tokio_1"], workspace = true } +deadpool-diesel = { features = ["sqlite"], workspace = true } +deadpool-sync = { features = ["tracing"], workspace = true } +diesel = { features = ["numeric", "sqlite"], workspace = true } +diesel_migrations = { features = ["sqlite"], workspace = true } +fs-err = { workspace = true } +futures = { workspace = true } +hex = { version = "0.4" } +indexmap = { workspace = true } +libsqlite3-sys = { workspace = true } +miden-block-prover = { workspace = true } +miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } +miden-node-proto = { workspace = true } +miden-node-proto-build = { features = ["internal"], workspace = true } +miden-node-rocksdb-cxx-linkage-fix = { optional = true, workspace = true } +miden-node-utils = { workspace = true } +miden-remote-prover-client = { features = ["block-prover"], workspace = true } +miden-standards = { workspace = true } # TODO remove `testing` from `miden-protocol`, required for `BlockProof::new_dummy` miden-protocol = { features = ["std", "testing"], workspace = true } pretty_assertions = { workspace = true } @@ -43,6 +48,10 @@ tonic = { default-features = true, workspace = true } tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } +url = { workspace = true } + +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } @@ -58,7 +67,7 @@ termtree = { version = "0.5" } [features] default = ["rocksdb"] -rocksdb = ["miden-crypto/rocksdb"] +rocksdb = ["miden-crypto/rocksdb", "miden-node-rocksdb-cxx-linkage-fix"] [[bench]] harness = false @@ -66,7 +75,7 @@ name = "account_tree" required-features = ["rocksdb"] [package.metadata.cargo-machete] -# This is an indirect dependency for which we need to enable optimisations +# This is an indirect dependency for which we need to enable optimisations/features # via feature flags. Because we don't use it directly in code, machete # identifies it as unused. -ignored = ["miden-crypto"] +ignored = ["libsqlite3-sys", "miden-crypto", "miden-node-rocksdb-cxx-linkage-fix"] diff --git a/crates/store/build.rs b/crates/store/build.rs index d08f3fd0e..a911bea19 100644 --- a/crates/store/build.rs +++ b/crates/store/build.rs @@ -1,9 +1,12 @@ // This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in // `store/src/db/migrations.rs` to include the latest version of the migrations into the binary, see . + fn main() { println!("cargo:rerun-if-changed=./src/db/migrations"); // If we do one re-write, the default rules are disabled, // hence we need to trigger explicitly on `Cargo.toml`. // println!("cargo:rerun-if-changed=Cargo.toml"); + + miden_node_rocksdb_cxx_linkage_fix::configure(); } diff --git a/crates/store/src/accounts/mod.rs b/crates/store/src/accounts/mod.rs index d015408ad..2508c9d2d 100644 --- a/crates/store/src/accounts/mod.rs +++ b/crates/store/src/accounts/mod.rs @@ -37,7 +37,7 @@ pub type PersistentAccountTree = AccountTree std::result::Result<(), crate::errors::DatabaseError> { let migrations = conn.pending_migrations(MIGRATIONS).expect("In memory migrations never fail"); - tracing::info!(target = COMPONENT, "Applying {} migration(s)", migrations.len()); + tracing::info!(target = COMPONENT, migrations = migrations.len(), "Applying migrations"); let Err(e) = conn.run_pending_migrations(MIGRATIONS) else { // Migrations applied successfully, verify schema hash verify_schema(conn)?; return Ok(()); }; - tracing::warn!(target = COMPONENT, "Failed to apply migration: {e:?}"); + tracing::warn!(target = COMPONENT, error = ?e, "Failed to apply migration"); // something went wrong, MIGRATIONS contains conn.revert_last_migration(MIGRATIONS) .expect("Duality is maintained by the developer"); diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index 0858e71d1..40491d4d5 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -1,6 +1,7 @@ CREATE TABLE block_headers ( block_num INTEGER NOT NULL, block_header BLOB NOT NULL, + signature BLOB NOT NULL, PRIMARY KEY (block_num), CONSTRAINT block_header_block_num_is_u32 CHECK (block_num BETWEEN 0 AND 0xFFFFFFFF) @@ -59,7 +60,7 @@ CREATE TABLE notes ( consumed_at INTEGER, -- Block number when the note was consumed nullifier BLOB, -- Only known for public notes, null for private notes assets BLOB, - inputs BLOB, + storage BLOB, script_root BLOB, serial_num BLOB, diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 7fc4a5cab..a9b77eb9b 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -10,7 +10,7 @@ use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader}; use miden_protocol::asset::{Asset, AssetVaultKey}; -use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, ProvenBlock}; +use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, SignedBlock}; use miden_protocol::crypto::merkle::SparseMerklePath; use miden_protocol::note::{ NoteDetails, @@ -30,6 +30,11 @@ use crate::db::manager::{ConnectionManager, configure_connection_on_creation}; use crate::db::migrations::apply_migrations; use crate::db::models::conv::SqlTypeConvert; use crate::db::models::queries::StorageMapValuesPage; +pub use crate::db::models::queries::{ + AccountCommitmentsPage, + NullifiersPage, + PublicAccountIdsPage, +}; use crate::db::models::{Page, queries}; use crate::errors::{DatabaseError, DatabaseSetupError, NoteSyncError, StateSyncError}; use crate::genesis::GenesisBlock; @@ -249,6 +254,7 @@ impl Db { models::queries::apply_block( conn, genesis.header(), + genesis.signature(), &[], &[], genesis.body().updated_accounts(), @@ -324,18 +330,28 @@ impl Db { Ok(me) } - /// Loads all the nullifiers from the DB. + /// Returns a page of nullifiers for tree rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub(crate) async fn select_all_nullifiers(&self) -> Result> { - self.transact("all nullifiers", move |conn| { - let nullifiers = queries::select_all_nullifiers(conn)?; - Ok(nullifiers) + pub async fn select_nullifiers_paged( + &self, + page_size: std::num::NonZeroUsize, + after_nullifier: Option, + ) -> Result { + self.transact("read nullifiers paged", move |conn| { + queries::select_nullifiers_paged(conn, page_size, after_nullifier) }) .await } /// Loads the nullifiers that match the prefixes from the DB. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + #[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(prefix_len, prefixes = nullifier_prefixes.len()), + ret(level = "debug"), + err + )] pub async fn select_nullifiers_by_prefix( &self, prefix_len: u32, @@ -395,20 +411,28 @@ impl Db { .await } - /// TODO marked for removal, replace with paged version + /// Returns a page of account commitments for tree rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_all_account_commitments(&self) -> Result> { - self.transact("read all account commitments", move |conn| { - queries::select_all_account_commitments(conn) + pub async fn select_account_commitments_paged( + &self, + page_size: std::num::NonZeroUsize, + after_account_id: Option, + ) -> Result { + self.transact("read account commitments paged", move |conn| { + queries::select_account_commitments_paged(conn, page_size, after_account_id) }) .await } - /// Returns all account IDs that have public state. + /// Returns a page of public account IDs for forest rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_all_public_account_ids(&self) -> Result> { - self.transact("read all public account IDs", move |conn| { - queries::select_all_public_account_ids(conn) + pub async fn select_public_account_ids_paged( + &self, + page_size: std::num::NonZeroUsize, + after_account_id: Option, + ) -> Result { + self.transact("read public account IDs paged", move |conn| { + queries::select_public_account_ids_paged(conn, page_size, after_account_id) }) .await } @@ -566,17 +590,18 @@ impl Db { &self, allow_acquire: oneshot::Sender<()>, acquire_done: oneshot::Receiver<()>, - block: ProvenBlock, + signed_block: SignedBlock, notes: Vec<(NoteRecord, Option)>, ) -> Result<()> { self.transact("apply block", move |conn| -> Result<()> { models::queries::apply_block( conn, - block.header(), + signed_block.header(), + signed_block.signature(), ¬es, - block.body().created_nullifiers(), - block.body().updated_accounts(), - block.body().transactions(), + signed_block.body().created_nullifiers(), + signed_block.body().updated_accounts(), + signed_block.body().transactions(), )?; // XXX FIXME TODO free floating mutex MUST NOT exist diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index 2e6313bf6..de6f7e950 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -14,19 +14,19 @@ //! Notice: Changing any of these are _backwards-incompatible_ changes that are not caught/covered //! by migrations! -#![allow( +#![expect( clippy::inline_always, reason = "Just unification helpers of 1-2 lines of casting types" )] -#![allow( +#![expect( dead_code, reason = "Not all converters are used bidirectionally, however, keeping them is a good thing" )] -#![allow( +#![expect( clippy::cast_sign_loss, reason = "This is the one file where we map the signed database types to the working types" )] -#![allow( +#![expect( clippy::cast_possible_wrap, reason = "We will not approach the item count where i64 and usize casting will cause issues on relevant platforms" @@ -107,7 +107,7 @@ impl SqlTypeConvert for NoteTag { #[inline(always)] fn from_raw_sql(raw: Self::Raw) -> Result { - #[allow(clippy::cast_sign_loss)] + #[expect(clippy::cast_sign_loss)] Ok(NoteTag::new(raw as u32)) } @@ -189,7 +189,7 @@ pub(crate) fn fungible_delta_to_raw_sql(delta: i64) -> i64 { } #[inline(always)] -#[allow(clippy::cast_sign_loss)] +#[expect(clippy::cast_sign_loss)] pub(crate) fn raw_sql_to_note_type(raw: i32) -> u8 { raw as u8 } diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 1f4f67533..0a252b550 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -1,4 +1,5 @@ use std::collections::BTreeMap; +use std::num::NonZeroUsize; use std::ops::RangeInclusive; use diesel::prelude::{Queryable, QueryableByName}; @@ -254,11 +255,19 @@ pub(crate) fn select_network_account_by_id( } } -/// Select all account commitments from the DB using the given [`SqliteConnection`]. -/// -/// # Returns +/// Page of account commitments returned by [`select_account_commitments_paged`]. +#[derive(Debug)] +pub struct AccountCommitmentsPage { + /// The account commitments in this page. + pub commitments: Vec<(AccountId, Word)>, + /// If `Some`, there are more results. Use this as the `after_account_id` for the next page. + pub next_cursor: Option, +} + +/// Selects account commitments with pagination. /// -/// The vector with the account id and corresponding commitment, or an error. +/// Returns up to `page_size` account commitments, starting after `after_account_id` if provided. +/// Results are ordered by `account_id` for stable pagination. /// /// # Raw SQL /// @@ -270,31 +279,71 @@ pub(crate) fn select_network_account_by_id( /// accounts /// WHERE /// is_latest = 1 +/// AND (account_id > :after_account_id OR :after_account_id IS NULL) /// ORDER BY -/// block_num ASC +/// account_id ASC +/// LIMIT :page_size + 1 /// ``` -pub(crate) fn select_all_account_commitments( +pub(crate) fn select_account_commitments_paged( conn: &mut SqliteConnection, -) -> Result, DatabaseError> { - let raw = SelectDsl::select( + page_size: NonZeroUsize, + after_account_id: Option, +) -> Result { + use miden_protocol::utils::Serializable; + + // Fetch one extra to determine if there are more results + #[expect(clippy::cast_possible_wrap)] + let limit = (page_size.get() + 1) as i64; + + let mut query = SelectDsl::select( schema::accounts::table, (schema::accounts::account_id, schema::accounts::account_commitment), ) .filter(schema::accounts::is_latest.eq(true)) - .order_by(schema::accounts::block_num.asc()) - .load::<(Vec, Vec)>(conn)?; + .order_by(schema::accounts::account_id.asc()) + .limit(limit) + .into_boxed(); + + if let Some(cursor) = after_account_id { + query = query.filter(schema::accounts::account_id.gt(cursor.to_bytes())); + } - Result::, DatabaseError>::from_iter(raw.into_iter().map( + let raw = query.load::<(Vec, Vec)>(conn)?; + + let mut commitments = Result::, DatabaseError>::from_iter(raw.into_iter().map( |(ref account, ref commitment)| { Ok((AccountId::read_from_bytes(account)?, Word::read_from_bytes(commitment)?)) }, - )) + ))?; + + // If we got more than page_size, there are more results + let next_cursor = if commitments.len() > page_size.get() { + commitments.pop(); // Remove the extra element + commitments.last().map(|(id, _)| *id) + } else { + None + }; + + Ok(AccountCommitmentsPage { commitments, next_cursor }) +} + +/// Page of public account IDs returned by [`select_public_account_ids_paged`]. +#[derive(Debug)] +pub struct PublicAccountIdsPage { + /// The public account IDs in this page. + pub account_ids: Vec, + /// If `Some`, there are more results. Use this as the `after_account_id` for the next page. + pub next_cursor: Option, } -/// Select all account IDs that have public state. +/// Selects public account IDs with pagination. /// -/// This filters accounts in-memory after loading only the account IDs (not commitments), -/// which is more efficient than loading full commitments when only IDs are needed. +/// Returns up to `page_size` public account IDs, starting after `after_account_id` if provided. +/// Results are ordered by `account_id` for stable pagination. +/// +/// Public accounts are those with `AccountStorageMode::Public` or `AccountStorageMode::Network`. +/// We identify them by checking `code_commitment IS NOT NULL` - public accounts store their full +/// state (including `code_commitment`), while private accounts only store the `account_commitment`. /// /// # Raw SQL /// @@ -305,31 +354,48 @@ pub(crate) fn select_all_account_commitments( /// accounts /// WHERE /// is_latest = 1 +/// AND code_commitment IS NOT NULL +/// AND (account_id > :after_account_id OR :after_account_id IS NULL) /// ORDER BY -/// block_num ASC +/// account_id ASC +/// LIMIT :page_size + 1 /// ``` -pub(crate) fn select_all_public_account_ids( +pub(crate) fn select_public_account_ids_paged( conn: &mut SqliteConnection, -) -> Result, DatabaseError> { - // We could technically use a `LIKE` constraint for both postgres and sqlite backends, - // but diesel doesn't expose that. - let raw: Vec> = - SelectDsl::select(schema::accounts::table, schema::accounts::account_id) - .filter(schema::accounts::is_latest.eq(true)) - .order_by(schema::accounts::block_num.asc()) - .load::>(conn)?; + page_size: NonZeroUsize, + after_account_id: Option, +) -> Result { + use miden_protocol::utils::Serializable; - Result::from_iter( - raw.into_iter() - .map(|bytes| { - AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError) - }) - .filter_map(|result| match result { - Ok(id) if id.has_public_state() => Some(Ok(id)), - Ok(_) => None, - Err(e) => Some(Err(e)), - }), - ) + #[expect(clippy::cast_possible_wrap)] + let limit = (page_size.get() + 1) as i64; + + let mut query = SelectDsl::select(schema::accounts::table, schema::accounts::account_id) + .filter(schema::accounts::is_latest.eq(true)) + .filter(schema::accounts::code_commitment.is_not_null()) + .order_by(schema::accounts::account_id.asc()) + .limit(limit) + .into_boxed(); + + if let Some(cursor) = after_account_id { + query = query.filter(schema::accounts::account_id.gt(cursor.to_bytes())); + } + + let raw = query.load::>(conn)?; + + let mut account_ids: Vec = Result::from_iter(raw.into_iter().map(|bytes| { + AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError) + }))?; + + // If we got more than page_size, there are more results + let next_cursor = if account_ids.len() > page_size.get() { + account_ids.pop(); // Remove the extra element + account_ids.last().copied() + } else { + None + }; + + Ok(PublicAccountIdsPage { account_ids, next_cursor }) } /// Select account vault assets within a block range (inclusive). @@ -919,7 +985,7 @@ pub(crate) fn insert_account_storage_map_value( } /// Attention: Assumes the account details are NOT null! The schema explicitly allows this though! -#[allow(clippy::too_many_lines)] +#[expect(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index 9206311a1..fa1e77e85 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -161,6 +161,7 @@ fn create_test_account_with_storage() -> (Account, AccountId) { fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { use crate::db::schema::block_headers; + let secret_key = SecretKey::new(); let block_header = BlockHeader::new( 1_u8.into(), Word::default(), @@ -171,15 +172,17 @@ fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { Word::default(), Word::default(), Word::default(), - SecretKey::new().public_key(), + secret_key.public_key(), test_fee_params(), 0_u8.into(), ); + let signature = secret_key.sign(block_header.commitment()); diesel::insert_into(block_headers::table) .values(( block_headers::block_num.eq(i64::from(block_num.as_u32())), block_headers::block_header.eq(block_header.to_bytes()), + block_headers::signature.eq(signature.to_bytes()), )) .execute(conn) .expect("Failed to insert block header"); @@ -649,7 +652,11 @@ fn test_select_account_vault_at_block_historical_with_updates() { account.commitment(), AccountUpdateDetails::Delta(delta), ); - upsert_accounts(&mut conn, &[account_update], block_1).expect("upsert_accounts failed"); + + for block in [block_1, block_2, block_3] { + upsert_accounts(&mut conn, std::slice::from_ref(&account_update), block) + .expect("upsert_accounts failed"); + } // Insert vault asset at block 1: vault_key_1 = 1000 tokens let vault_key_1 = AssetVaultKey::new_unchecked(Word::from([ @@ -751,7 +758,11 @@ fn test_select_account_vault_at_block_with_deletion() { account.commitment(), AccountUpdateDetails::Delta(delta), ); - upsert_accounts(&mut conn, &[account_update], block_1).expect("upsert_accounts failed"); + + for block in [block_1, block_2, block_3] { + upsert_accounts(&mut conn, std::slice::from_ref(&account_update), block) + .expect("upsert_accounts failed"); + } // Insert vault asset at block 1 let vault_key = AssetVaultKey::new_unchecked(Word::from([ diff --git a/crates/store/src/db/models/queries/block_headers.rs b/crates/store/src/db/models/queries/block_headers.rs index 3c295c72b..553430ddb 100644 --- a/crates/store/src/db/models/queries/block_headers.rs +++ b/crates/store/src/db/models/queries/block_headers.rs @@ -11,6 +11,7 @@ use diesel::{ SelectableHelper, SqliteConnection, }; +use miden_crypto::dsa::ecdsa_k256_keccak::Signature; use miden_node_utils::limiter::{QueryParamBlockLimit, QueryParamLimiter}; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::utils::{Deserializable, Serializable}; @@ -128,9 +129,10 @@ pub fn select_all_block_headers( #[diesel(table_name = schema::block_headers)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] pub struct BlockHeaderRawRow { - #[allow(dead_code)] + #[expect(dead_code)] pub block_num: i64, pub block_header: Vec, + pub signature: Vec, } impl TryInto for BlockHeaderRawRow { type Error = DatabaseError; @@ -140,18 +142,29 @@ impl TryInto for BlockHeaderRawRow { } } +impl TryInto<(BlockHeader, Signature)> for BlockHeaderRawRow { + type Error = DatabaseError; + fn try_into(self) -> Result<(BlockHeader, Signature), Self::Error> { + let block_header = BlockHeader::read_from_bytes(&self.block_header[..])?; + let signature = Signature::read_from_bytes(&self.signature[..])?; + Ok((block_header, signature)) + } +} + #[derive(Debug, Clone, Insertable)] #[diesel(table_name = schema::block_headers)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] pub struct BlockHeaderInsert { pub block_num: i64, pub block_header: Vec, + pub signature: Vec, } -impl From<&BlockHeader> for BlockHeaderInsert { - fn from(block_header: &BlockHeader) -> Self { +impl From<(&BlockHeader, &Signature)> for BlockHeaderInsert { + fn from(from: (&BlockHeader, &Signature)) -> Self { Self { - block_num: block_header.block_num().to_raw_sql(), - block_header: block_header.to_bytes(), + block_num: from.0.block_num().to_raw_sql(), + block_header: from.0.to_bytes(), + signature: from.1.to_bytes(), } } } @@ -174,8 +187,9 @@ impl From<&BlockHeader> for BlockHeaderInsert { pub(crate) fn insert_block_header( conn: &mut SqliteConnection, block_header: &BlockHeader, + signature: &Signature, ) -> Result { - let block_header = BlockHeaderInsert::from(block_header); + let block_header = BlockHeaderInsert::from((block_header, signature)); let count = diesel::insert_into(schema::block_headers::table) .values(&[block_header]) .execute(conn)?; diff --git a/crates/store/src/db/models/queries/mod.rs b/crates/store/src/db/models/queries/mod.rs index 0f29b0015..2cec3523e 100644 --- a/crates/store/src/db/models/queries/mod.rs +++ b/crates/store/src/db/models/queries/mod.rs @@ -25,12 +25,13 @@ //! transaction, any nesting of further `transaction(conn, || {})` has no effect and should be //! considered unnecessary boilerplate by default. -#![allow( +#![expect( clippy::needless_pass_by_value, reason = "The parent scope does own it, passing by value avoids additional boilerplate" )] use diesel::SqliteConnection; +use miden_crypto::dsa::ecdsa_k256_keccak::Signature; use miden_protocol::account::AccountId; use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; use miden_protocol::note::Nullifier; @@ -47,6 +48,7 @@ pub use block_headers::*; mod accounts; pub use accounts::*; mod nullifiers; +pub use nullifiers::NullifiersPage; pub(crate) use nullifiers::*; mod notes; pub(crate) use notes::*; @@ -59,6 +61,7 @@ pub(crate) use notes::*; pub(crate) fn apply_block( conn: &mut SqliteConnection, block_header: &BlockHeader, + signature: &Signature, notes: &[(NoteRecord, Option)], nullifiers: &[Nullifier], accounts: &[BlockAccountUpdate], @@ -66,7 +69,7 @@ pub(crate) fn apply_block( ) -> Result { let mut count = 0; // Note: ordering here is important as the relevant tables have FK dependencies. - count += insert_block_header(conn, block_header)?; + count += insert_block_header(conn, block_header, signature)?; count += upsert_accounts(conn, accounts, block_header.block_num())?; count += insert_scripts(conn, notes.iter().map(|(note, _)| note))?; count += insert_notes(conn, notes)?; diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index a2ab7b1bb..083cb15aa 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -1,4 +1,4 @@ -#![allow( +#![expect( clippy::cast_possible_wrap, reason = "We will not approach the item count where i64 and usize cause issues" )] @@ -41,10 +41,10 @@ use miden_protocol::note::{ NoteDetails, NoteId, NoteInclusionProof, - NoteInputs, NoteMetadata, NoteRecipient, NoteScript, + NoteStorage, NoteTag, NoteType, Nullifier, @@ -203,7 +203,7 @@ pub(crate) fn select_notes_since_block_by_tag_and_sender( /// notes.tag, /// notes.attachment, /// notes.assets, -/// notes.inputs, +/// notes.storage, /// notes.serial_num, /// notes.inclusion_path, /// note_scripts.script @@ -283,7 +283,7 @@ pub(crate) fn select_existing_note_commitments( /// notes.tag, /// notes.attachment, /// notes.assets, -/// notes.inputs, +/// notes.storage, /// notes.serial_num, /// notes.inclusion_path, /// note_scripts.script @@ -427,7 +427,7 @@ pub(crate) fn select_note_script_by_root( /// notes.tag, /// notes.attachment, /// notes.assets, -/// notes.inputs, +/// notes.storage, /// notes.serial_num, /// notes.inclusion_path, /// note_scripts.script, @@ -441,14 +441,7 @@ pub(crate) fn select_note_script_by_root( /// ORDER BY notes.rowid ASC /// LIMIT ?4 /// ``` -#[allow( - clippy::cast_sign_loss, - reason = "We need custom SQL statements which has given types that we need to convert" -)] -#[allow( - clippy::too_many_lines, - reason = "Lines will be reduced when schema is updated to simplify logic" -)] +#[expect(clippy::cast_sign_loss, reason = "row_id is a positive integer")] pub(crate) fn select_unconsumed_network_notes_by_account_id( conn: &mut SqliteConnection, account_id: AccountId, @@ -460,7 +453,7 @@ pub(crate) fn select_unconsumed_network_notes_by_account_id( diesel::dsl::sql::("notes.rowid >= ") .bind::(page.token.unwrap_or_default() as i64); - #[allow( + #[expect( clippy::items_after_statements, reason = "It's only relevant for a single call function" )] @@ -470,7 +463,7 @@ pub(crate) fn select_unconsumed_network_notes_by_account_id( i64, // rowid (from sql::("notes.rowid")) ); - #[allow( + #[expect( clippy::items_after_statements, reason = "It's only relevant for a single call function" )] @@ -550,7 +543,6 @@ pub struct NoteSyncRecordRawRow { pub inclusion_path: Vec, // SparseMerklePath } -#[allow(clippy::cast_sign_loss, reason = "Indices are cast to usize for ease of use")] impl TryInto for NoteSyncRecordRawRow { type Error = DatabaseError; fn try_into(self) -> Result { @@ -575,7 +567,7 @@ impl TryInto for NoteSyncRecordRawRow { #[diesel(check_for_backend(Sqlite))] pub struct NoteDetailsRawRow { pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub serial_num: Option>, } @@ -601,7 +593,7 @@ pub struct NoteRecordWithScriptRawJoined { // #[diesel(embed)] // pub metadata: NoteMetadataRaw, pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub serial_num: Option>, // #[diesel(embed)] @@ -623,7 +615,7 @@ impl From<(NoteRecordRawRow, Option>)> for NoteRecordWithScriptRawJoined tag, attachment, assets, - inputs, + storage, serial_num, inclusion_path, } = note; @@ -638,7 +630,7 @@ impl From<(NoteRecordRawRow, Option>)> for NoteRecordWithScriptRawJoined tag, attachment, assets, - inputs, + storage, serial_num, inclusion_path, script, @@ -666,7 +658,7 @@ impl TryInto for NoteRecordWithScriptRawJoined { attachment, // metadata ^^^, assets, - inputs, + storage, serial_num, //details ^^^, inclusion_path, @@ -675,7 +667,7 @@ impl TryInto for NoteRecordWithScriptRawJoined { } = raw; let index = BlockNoteIndexRawRow { batch_index, note_index }; let metadata = NoteMetadataRawRow { note_type, sender, tag, attachment }; - let details = NoteDetailsRawRow { assets, inputs, serial_num }; + let details = NoteDetailsRawRow { assets, storage, serial_num }; let metadata = metadata.try_into()?; let committed_at = BlockNumber::from_raw_sql(committed_at)?; @@ -684,16 +676,16 @@ impl TryInto for NoteRecordWithScriptRawJoined { let script = script.map(|script| NoteScript::read_from_bytes(&script[..])).transpose()?; let details = if let NoteDetailsRawRow { assets: Some(assets), - inputs: Some(inputs), + storage: Some(storage), serial_num: Some(serial_num), } = details { - let inputs = NoteInputs::read_from_bytes(&inputs[..])?; + let storage = NoteStorage::read_from_bytes(&storage[..])?; let serial_num = Word::read_from_bytes(&serial_num[..])?; let script = script.ok_or_else(|| { DatabaseError::conversiont_from_sql::(None) })?; - let recipient = NoteRecipient::new(serial_num, script, inputs); + let recipient = NoteRecipient::new(serial_num, script, storage); let assets = NoteAssets::read_from_bytes(&assets[..])?; Some(NoteDetails::new(assets, recipient)) } else { @@ -730,7 +722,7 @@ pub struct NoteRecordRawRow { pub attachment: Vec, pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub serial_num: Option>, pub inclusion_path: Vec, @@ -746,7 +738,7 @@ pub struct NoteMetadataRawRow { attachment: Vec, } -#[allow(clippy::cast_sign_loss)] +#[expect(clippy::cast_sign_loss)] impl TryInto for NoteMetadataRawRow { type Error = DatabaseError; fn try_into(self) -> Result { @@ -767,7 +759,7 @@ pub struct BlockNoteIndexRawRow { pub note_index: i32, // index within batch } -#[allow(clippy::cast_sign_loss, reason = "Indices are cast to usize for ease of use")] +#[expect(clippy::cast_sign_loss, reason = "Indices are cast to usize for ease of use")] impl TryInto for BlockNoteIndexRawRow { type Error = DatabaseError; fn try_into(self) -> Result { @@ -791,7 +783,6 @@ impl TryInto for BlockNoteIndexRawRow { /// /// The [`SqliteConnection`] object is not consumed. It's up to the caller to commit or rollback the /// transaction. -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -822,7 +813,6 @@ pub(crate) fn insert_notes( /// /// The [`SqliteConnection`] object is not consumed. It's up to the caller to commit or rollback the /// transaction. -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -868,7 +858,7 @@ pub struct NoteInsertRow { pub consumed_at: Option, pub nullifier: Option>, pub assets: Option>, - pub inputs: Option>, + pub storage: Option>, pub script_root: Option>, pub serial_num: Option>, } @@ -902,7 +892,7 @@ impl From<(NoteRecord, Option)> for NoteInsertRow { consumed_at: None::, // New notes are always unconsumed. nullifier: nullifier.as_ref().map(Nullifier::to_bytes), assets: note.details.as_ref().map(|d| d.assets().to_bytes()), - inputs: note.details.as_ref().map(|d| d.inputs().to_bytes()), + storage: note.details.as_ref().map(|d| d.storage().to_bytes()), script_root: note.details.as_ref().map(|d| d.script().root().to_bytes()), serial_num: note.details.as_ref().map(|d| d.serial_num().to_bytes()), } diff --git a/crates/store/src/db/models/queries/nullifiers.rs b/crates/store/src/db/models/queries/nullifiers.rs index 5ab578537..84e89ebad 100644 --- a/crates/store/src/db/models/queries/nullifiers.rs +++ b/crates/store/src/db/models/queries/nullifiers.rs @@ -1,3 +1,4 @@ +use std::num::NonZeroUsize; use std::ops::RangeInclusive; use diesel::query_dsl::methods::SelectDsl; @@ -128,6 +129,7 @@ pub(crate) fn select_nullifiers_by_prefix( /// ORDER BY /// block_num ASC /// ``` +#[cfg(test)] pub(crate) fn select_all_nullifiers( conn: &mut SqliteConnection, ) -> Result, DatabaseError> { @@ -137,6 +139,67 @@ pub(crate) fn select_all_nullifiers( vec_raw_try_into(nullifiers_raw) } +/// Page of nullifiers returned by [`select_nullifiers_paged`]. +#[derive(Debug)] +pub struct NullifiersPage { + /// The nullifiers in this page. + pub nullifiers: Vec, + /// If `Some`, there are more results. Use this as the `after_nullifier` for the next page. + pub next_cursor: Option, +} + +/// Selects nullifiers with pagination. +/// +/// Returns up to `page_size` nullifiers, starting after `after_nullifier` if provided. +/// Results are ordered by nullifier bytes for stable pagination. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT +/// nullifier, +/// block_num +/// FROM +/// nullifiers +/// WHERE +/// (nullifier > :after_nullifier OR :after_nullifier IS NULL) +/// ORDER BY +/// nullifier ASC +/// LIMIT :page_size + 1 +/// ``` +pub(crate) fn select_nullifiers_paged( + conn: &mut SqliteConnection, + page_size: NonZeroUsize, + after_nullifier: Option, +) -> Result { + // Fetch one extra to determine if there are more results + #[expect(clippy::cast_possible_wrap)] + let limit = (page_size.get() + 1) as i64; + + let mut query = + SelectDsl::select(schema::nullifiers::table, NullifierWithoutPrefixRawRow::as_select()) + .order_by(schema::nullifiers::nullifier.asc()) + .limit(limit) + .into_boxed(); + + if let Some(cursor) = after_nullifier { + query = query.filter(schema::nullifiers::nullifier.gt(cursor.to_bytes())); + } + + let nullifiers_raw = query.load::(conn)?; + let mut nullifiers: Vec = vec_raw_try_into(nullifiers_raw)?; + + // If we got more than page_size, there are more results + let next_cursor = if nullifiers.len() > page_size.get() { + nullifiers.pop(); // Remove the extra element + nullifiers.last().map(|info| info.nullifier) + } else { + None + }; + + Ok(NullifiersPage { nullifiers, next_cursor }) +} + /// Insert nullifiers for a block into the database. /// /// # Parameters @@ -163,7 +226,6 @@ pub(crate) fn select_all_nullifiers( /// INSERT INTO nullifiers (nullifier, nullifier_prefix, block_num) /// VALUES (?1, ?2, ?3) /// ``` -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, diff --git a/crates/store/src/db/models/queries/transactions.rs b/crates/store/src/db/models/queries/transactions.rs index be132e1a5..1331d7ea5 100644 --- a/crates/store/src/db/models/queries/transactions.rs +++ b/crates/store/src/db/models/queries/transactions.rs @@ -150,7 +150,6 @@ impl TryInto for TransactionRecordRaw { /// /// The [`SqliteConnection`] object is not consumed. It's up to the caller to commit or rollback the /// transaction. -#[allow(clippy::too_many_lines)] #[tracing::instrument( target = COMPONENT, skip_all, @@ -161,7 +160,7 @@ pub(crate) fn insert_transactions( block_num: BlockNumber, transactions: &OrderedTransactionHeaders, ) -> Result { - #[allow(clippy::into_iter_on_ref)] // false positive + #[expect(clippy::into_iter_on_ref)] // false positive let rows: Vec<_> = transactions .as_slice() .into_iter() @@ -187,7 +186,7 @@ pub struct TransactionSummaryRowInsert { } impl TransactionSummaryRowInsert { - #[allow( + #[expect( clippy::cast_possible_wrap, reason = "We will not approach the item count where i64 and usize cause issues" )] diff --git a/crates/store/src/db/models/utils.rs b/crates/store/src/db/models/utils.rs index c472940e4..1ace2abaa 100644 --- a/crates/store/src/db/models/utils.rs +++ b/crates/store/src/db/models/utils.rs @@ -14,7 +14,7 @@ pub(crate) fn vec_raw_try_into>( ) } -#[allow(dead_code)] +#[expect(dead_code)] /// Deserialize an iterable container full of byte blobs `B` to types `T` pub(crate) fn deserialize_raw_vec, T: Deserializable>( raw: impl IntoIterator, @@ -38,7 +38,6 @@ pub fn get_nullifier_prefix(nullifier: &Nullifier) -> u16 { /// Converts a slice of length `N` to an array, returns `None` if invariant /// isn'crates/store/src/db/mod.rs upheld. -#[allow(dead_code)] pub fn slice_to_array(bytes: &[u8]) -> Option<[u8; N]> { if bytes.len() != N { return None; @@ -48,7 +47,7 @@ pub fn slice_to_array(bytes: &[u8]) -> Option<[u8; N]> { Some(arr) } -#[allow(dead_code)] +#[expect(dead_code)] #[inline] pub fn from_be_to_u32(bytes: &[u8]) -> Option { slice_to_array::<4>(bytes).map(u32::from_be_bytes) @@ -62,8 +61,8 @@ pub struct PragmaSchemaVersion { } /// Returns the schema version of the database. -#[allow(dead_code)] -#[allow( +#[expect(dead_code)] +#[expect( clippy::cast_sign_loss, reason = "schema version is always positive and we will never reach 0xEFFF_..._FFFF" )] diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index 0ae4b8e1e..ebb8c280f 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -47,6 +47,7 @@ diesel::table! { block_headers (block_num) { block_num -> BigInt, block_header -> Binary, + signature -> Binary, } } @@ -74,7 +75,7 @@ diesel::table! { consumed_at -> Nullable, nullifier -> Nullable, assets -> Nullable, - inputs -> Nullable, + storage -> Nullable, script_root -> Nullable, serial_num -> Nullable, } diff --git a/crates/store/src/db/schema_hash.rs b/crates/store/src/db/schema_hash.rs index 28e480fc0..bcb417ce9 100644 --- a/crates/store/src/db/schema_hash.rs +++ b/crates/store/src/db/schema_hash.rs @@ -107,10 +107,20 @@ pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificati // Log specific differences at debug level for obj in &missing { - tracing::debug!(target: COMPONENT, name = %obj.name, "Missing or modified: {}", obj.sql); + tracing::debug!( + target: COMPONENT, + name = %obj.name, + sql = %obj.sql, + "Missing or modified" + ); } for obj in &extra { - tracing::debug!(target: COMPONENT, name = %obj.name, "Extra or modified: {}", obj.sql); + tracing::debug!( + target: COMPONENT, + name = %obj.name, + sql = %obj.sql, + "Extra or modified" + ); } return Err(SchemaVerificationError::Mismatch { diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 6bd26dda1..65e93c283 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -1,6 +1,3 @@ -#![allow(clippy::similar_names, reason = "naming dummy test values is hard")] -#![allow(clippy::too_many_lines, reason = "test code can be long")] - use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; @@ -101,7 +98,8 @@ fn create_block(conn: &mut SqliteConnection, block_num: BlockNumber) { 11_u8.into(), ); - conn.transaction(|conn| queries::insert_block_header(conn, &block_header)) + let dummy_signature = SecretKey::new().sign(block_header.commitment()); + conn.transaction(|conn| queries::insert_block_header(conn, &block_header, &dummy_signature)) .unwrap(); } @@ -169,7 +167,7 @@ fn sql_select_transactions() { queries::select_transactions_by_accounts_and_block_range( conn, &[AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap()], - BlockNumber::from(0)..=BlockNumber::from(2), + BlockNumber::GENESIS..=BlockNumber::from(2), ) .unwrap() } @@ -501,9 +499,10 @@ fn sync_account_vault_basic_validation() { create_block(conn, block_mid); create_block(conn, block_to); - // Create accounts - one public for vault assets, one private for testing - queries::upsert_accounts(conn, &[mock_block_account_update(public_account_id, 0)], block_from) - .unwrap(); + for block in [block_from, block_mid, block_to] { + queries::upsert_accounts(conn, &[mock_block_account_update(public_account_id, 0)], block) + .unwrap(); + } // Create some test vault assets let vault_key_1 = AssetVaultKey::new_unchecked(num_to_word(100)); @@ -767,7 +766,8 @@ fn db_block_header() { ); // test insertion - queries::insert_block_header(conn, &block_header).unwrap(); + let dummy_signature = SecretKey::new().sign(block_header.commitment()); + queries::insert_block_header(conn, &block_header, &dummy_signature).unwrap(); // test fetch unknown block header let block_number = 1; @@ -798,7 +798,8 @@ fn db_block_header() { 21_u8.into(), ); - queries::insert_block_header(conn, &block_header2).unwrap(); + let dummy_signature = SecretKey::new().sign(block_header2.commitment()); + queries::insert_block_header(conn, &block_header2, &dummy_signature).unwrap(); let res = queries::select_block_header_by_block_num(conn, None).unwrap(); assert_eq!(res.unwrap(), block_header2); @@ -824,7 +825,7 @@ fn db_account() { let res = queries::select_accounts_by_block_range( conn, &account_ids, - BlockNumber::from(0)..=u32::MAX.into(), + BlockNumber::GENESIS..=u32::MAX.into(), ) .unwrap(); assert!(res.is_empty()); @@ -850,7 +851,7 @@ fn db_account() { let res = queries::select_accounts_by_block_range( conn, &account_ids, - BlockNumber::from(0)..=u32::MAX.into(), + BlockNumber::GENESIS..=u32::MAX.into(), ) .unwrap(); assert_eq!( @@ -889,7 +890,7 @@ fn notes() { let block_num_1 = 1.into(); create_block(conn, block_num_1); - let block_range = BlockNumber::from(0)..=BlockNumber::from(1); + let block_range = BlockNumber::GENESIS..=BlockNumber::from(1); // test empty table let (res, last_included_block) = @@ -1048,6 +1049,9 @@ fn sql_account_storage_map_values_insertion() { let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); + queries::upsert_accounts(conn, &[mock_block_account_update(account_id, 0)], block1).unwrap(); + queries::upsert_accounts(conn, &[mock_block_account_update(account_id, 0)], block2).unwrap(); + let slot_name = StorageSlotName::mock(3); let key1 = Word::from([1u32, 2, 3, 4]); let key2 = Word::from([5u32, 6, 7, 8]); @@ -1119,6 +1123,11 @@ fn select_storage_map_sync_values() { let block2 = BlockNumber::from(2); let block3 = BlockNumber::from(3); + for block in [block1, block2, block3] { + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block) + .unwrap(); + } + // Insert data across multiple blocks using individual inserts // Block 1: key1 -> value1, key2 -> value2 queries::insert_account_storage_map_value( @@ -1872,7 +1881,8 @@ fn db_roundtrip_block_header() { ); // Insert - queries::insert_block_header(&mut conn, &block_header).unwrap(); + let dummy_signature = SecretKey::new().sign(block_header.commitment()); + queries::insert_block_header(&mut conn, &block_header, &dummy_signature).unwrap(); // Retrieve let retrieved = @@ -2021,7 +2031,7 @@ fn db_roundtrip_transactions() { let retrieved = queries::select_transactions_by_accounts_and_block_range( &mut conn, &[account_id], - BlockNumber::from(0)..=BlockNumber::from(2), + BlockNumber::GENESIS..=BlockNumber::from(2), ) .unwrap(); @@ -2087,6 +2097,8 @@ fn db_roundtrip_storage_map_values() { create_block(&mut conn, block_num); let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block_num) + .unwrap(); let slot_name = StorageSlotName::mock(5); let key = num_to_word(12345); let value = num_to_word(67890); diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 679650580..cbd98af75 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -5,6 +5,7 @@ use deadpool_sync::InteractError; use miden_node_proto::domain::account::NetworkAccountError; use miden_node_proto::domain::block::InvalidBlockRange; use miden_node_proto::errors::{ConversionError, GrpcError}; +use miden_node_utils::ErrorReport; use miden_node_utils::limiter::QueryLimitError; use miden_protocol::Word; use miden_protocol::account::AccountId; @@ -313,6 +314,16 @@ pub enum ApplyBlockError { DbUpdateTaskFailed(String), } +impl From for Status { + fn from(err: ApplyBlockError) -> Self { + match err { + ApplyBlockError::InvalidBlockError(_) => Status::invalid_argument(err.as_report()), + + _ => Status::internal(err.as_report()), + } + } +} + #[derive(Error, Debug, GrpcError)] pub enum GetBlockHeaderError { #[error("database error")] @@ -478,6 +489,26 @@ pub enum GetBlockByNumberError { DeserializationFailed(#[from] DeserializationError), } +// GET ACCOUNT ERRORS +// ================================================================================================ + +#[derive(Debug, Error, GrpcError)] +pub enum GetAccountError { + #[error("database error")] + #[grpc(internal)] + DatabaseError(#[from] DatabaseError), + #[error("malformed request")] + DeserializationFailed(#[from] ConversionError), + #[error("account {0} not found at block {1}")] + AccountNotFound(AccountId, BlockNumber), + #[error("account {0} is not public")] + AccountNotPublic(AccountId), + #[error("block {0} is unknown")] + UnknownBlock(BlockNumber), + #[error("block {0} has been pruned")] + BlockPruned(BlockNumber), +} + // GET NOTES BY ID ERRORS // ================================================================================================ @@ -570,6 +601,83 @@ pub enum SchemaVerificationError { }, } +#[cfg(test)] +mod get_account_error_tests { + use miden_protocol::account::AccountId; + use miden_protocol::block::BlockNumber; + use miden_protocol::testing::account_id::AccountIdBuilder; + use tonic::Status; + + use super::GetAccountError; + + fn test_account_id() -> AccountId { + AccountIdBuilder::new().build_with_seed([1; 32]) + } + + #[test] + fn unknown_block_returns_invalid_argument() { + let block = BlockNumber::from(999); + let err = GetAccountError::UnknownBlock(block); + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + assert!(!status.metadata().is_empty() || !status.details().is_empty()); + } + + #[test] + fn block_pruned_returns_invalid_argument() { + let block = BlockNumber::from(1); + let err = GetAccountError::BlockPruned(block); + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn account_not_public_returns_invalid_argument() { + let err = GetAccountError::AccountNotPublic(test_account_id()); + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn account_not_found_returns_invalid_argument_with_block_context() { + let account_id = test_account_id(); + let block = BlockNumber::from(5); + let err = GetAccountError::AccountNotFound(account_id, block); + let msg = err.to_string(); + assert!(msg.contains("not found"), "error message should mention 'not found'"); + assert!(msg.contains("block"), "error message should include block context"); + + let status: Status = err.into(); + assert_eq!(status.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn each_variant_has_unique_discriminant() { + let account_id = test_account_id(); + let block = BlockNumber::from(1); + + let errors = [ + GetAccountError::AccountNotFound(account_id, block), + GetAccountError::AccountNotPublic(account_id), + GetAccountError::UnknownBlock(block), + GetAccountError::BlockPruned(block), + ]; + + let codes: Vec = errors.iter().map(|e| e.api_error().api_code()).collect(); + + // All non-internal variants should have unique, non-zero discriminants + for &code in &codes { + assert_ne!(code, 0, "non-internal variants should not map to Internal (0)"); + } + + // Check uniqueness + let mut sorted = codes.clone(); + sorted.sort_unstable(); + sorted.dedup(); + assert_eq!(sorted.len(), codes.len(), "all error variants should have unique codes"); + } +} + // Do not scope for `cfg(test)` - if it the traitbounds don't suffice the issue will already appear // in the compilation of the library or binary, which would prevent getting to compiling the // following code. @@ -591,7 +699,7 @@ mod compile_tests { /// Ensure all enum variants remain compat with the desired /// trait bounds. Otherwise one gets very unwieldy errors. - #[allow(dead_code)] + #[expect(dead_code)] fn assumed_trait_bounds_upheld() { fn ensure_is_error(_phony: PhantomData) where diff --git a/crates/store/src/genesis/config/errors.rs b/crates/store/src/genesis/config/errors.rs index b39495c87..4d360e925 100644 --- a/crates/store/src/genesis/config/errors.rs +++ b/crates/store/src/genesis/config/errors.rs @@ -12,7 +12,6 @@ use miden_standards::account::wallets::BasicWalletError; use crate::genesis::config::TokenSymbolStr; -#[allow(missing_docs, reason = "Error variants must be descriptive by themselves")] #[derive(Debug, thiserror::Error)] pub enum GenesisConfigError { #[error(transparent)] diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index e7abe8b58..283208182 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -96,7 +96,7 @@ impl GenesisConfig { /// Convert the in memory representation into the new genesis state /// /// Also returns the set of secrets for the generated accounts. - #[allow(clippy::too_many_lines)] + #[expect(clippy::too_many_lines)] pub fn into_state( self, signer: S, @@ -158,7 +158,7 @@ impl GenesisConfig { for (index, WalletConfig { has_updatable_code, storage_mode, assets }) in wallet_configs.into_iter().enumerate() { - tracing::debug!("Adding wallet account {index} with {assets:?}"); + tracing::debug!(index, assets = ?assets, "Adding wallet account"); let mut rng = ChaCha20Rng::from_seed(rand::random()); let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index a63c92276..330a63d80 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -290,7 +290,7 @@ impl InnerForest { /// account, returns an empty SMT root. fn get_latest_vault_root(&self, account_id: AccountId) -> Word { self.vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::from(u32::MAX))) + .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::MAX)) .next_back() .map_or_else(Self::empty_smt_root, |(_, root)| *root) } @@ -449,7 +449,7 @@ impl InnerForest { self.storage_map_roots .range( (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ..=(account_id, slot_name.clone(), BlockNumber::MAX), ) .next_back() .map_or_else(Self::empty_smt_root, |(_, root)| *root) @@ -465,7 +465,7 @@ impl InnerForest { self.storage_entries .range( (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ..(account_id, slot_name.clone(), BlockNumber::MAX), ) .next_back() .map(|(_, entries)| entries.clone()) diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index 1d345dcf0..1cc028ac3 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -11,6 +11,7 @@ pub mod state; pub use accounts::PersistentAccountTree; pub use accounts::{AccountTreeWithHistory, HistoricalError, InMemoryAccountTree}; pub use genesis::GenesisState; +pub use server::block_prover_client::BlockProver; pub use server::{DataDirectory, Store}; // CONSTANTS diff --git a/crates/store/src/server/api.rs b/crates/store/src/server/api.rs index 292842e77..56bfcafb4 100644 --- a/crates/store/src/server/api.rs +++ b/crates/store/src/server/api.rs @@ -6,13 +6,15 @@ use miden_node_proto::generated as proto; use miden_node_utils::ErrorReport; use miden_protocol::Word; use miden_protocol::account::AccountId; -use miden_protocol::block::BlockNumber; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockInputs, BlockNumber}; use miden_protocol::note::Nullifier; use tonic::{Request, Response, Status}; use tracing::{info, instrument}; -use crate::COMPONENT; +use crate::errors::GetBlockInputsError; use crate::state::State; +use crate::{BlockProver, COMPONENT}; // STORE API // ================================================================================================ @@ -20,6 +22,7 @@ use crate::state::State; #[derive(Clone)] pub struct StoreApi { pub(super) state: Arc, + pub(super) block_prover: Arc, } impl StoreApi { @@ -43,6 +46,40 @@ impl StoreApi { mmr_path: mmr_proof.map(|p| Into::into(&p.merkle_path)), })) } + + /// Retrieves block inputs from state based on the contents of the supplied ordered batches. + pub(crate) async fn block_inputs_from_ordered_batches( + &self, + batches: &OrderedBatches, + ) -> Result { + // Construct fields required to retrieve block inputs. + let mut account_ids = BTreeSet::new(); + let mut nullifiers = Vec::new(); + let mut unauthenticated_note_commitments = BTreeSet::new(); + let mut reference_blocks = BTreeSet::new(); + + for batch in batches.as_slice() { + account_ids.extend(batch.updated_accounts()); + nullifiers.extend(batch.created_nullifiers()); + reference_blocks.insert(batch.reference_block_num()); + + for note in batch.input_notes().iter() { + if let Some(header) = note.header() { + unauthenticated_note_commitments.insert(header.commitment()); + } + } + } + + // Retrieve block inputs from the store. + self.state + .get_block_inputs( + account_ids.into_iter().collect(), + nullifiers, + unauthenticated_note_commitments, + reference_blocks, + ) + .await + } } // UTILITIES @@ -138,8 +175,13 @@ where .map_err(Into::into) } -#[allow(clippy::result_large_err)] -#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +#[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(nullifiers = nullifiers.len()), + err +)] pub fn validate_nullifiers(nullifiers: &[proto::primitives::Digest]) -> Result, E> where E: From + std::fmt::Display, @@ -152,8 +194,13 @@ where .map_err(Into::into) } -#[allow(clippy::result_large_err)] -#[instrument(level = "debug", target = COMPONENT, skip_all, err)] +#[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(notes = notes.len()), + err +)] pub fn validate_note_commitments(notes: &[proto::primitives::Digest]) -> Result, Status> { notes .iter() @@ -162,7 +209,12 @@ pub fn validate_note_commitments(notes: &[proto::primitives::Digest]) -> Result< .map_err(|_| invalid_argument("Digest field is not in the modulus range")) } -#[instrument(level = "debug",target = COMPONENT, skip_all)] +#[instrument( + level = "debug", + target = COMPONENT, + skip_all, + fields(block_numbers = block_numbers.len()) +)] pub fn read_block_numbers(block_numbers: &[u32]) -> BTreeSet { BTreeSet::from_iter(block_numbers.iter().map(|raw_number| BlockNumber::from(*raw_number))) } diff --git a/crates/store/src/server/block_producer.rs b/crates/store/src/server/block_producer.rs index 9dd2b39c4..25f6b05f6 100644 --- a/crates/store/src/server/block_producer.rs +++ b/crates/store/src/server/block_producer.rs @@ -1,12 +1,16 @@ use std::convert::Infallible; +use futures::TryFutureExt; +use miden_crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_node_proto::errors::MissingFieldHelper; use miden_node_proto::generated::store::block_producer_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; use miden_node_utils::ErrorReport; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; -use miden_protocol::block::{BlockNumber, ProvenBlock}; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockBody, BlockHeader, BlockNumber, SignedBlock}; use miden_protocol::utils::Deserializable; use tonic::{Request, Response, Status}; use tracing::Instrument; @@ -40,33 +44,69 @@ impl block_producer_server::BlockProducer for StoreApi { /// Updates the local DB by inserting a new block header and the related data. async fn apply_block( &self, - request: Request, + request: Request, ) -> Result, Status> { let request = request.into_inner(); - - let block = ProvenBlock::read_from_bytes(&request.block).map_err(|err| { - Status::invalid_argument(err.as_report_context("block deserialization error")) - })?; + // Read ordered batches. + let ordered_batches = + OrderedBatches::read_from_bytes(&request.ordered_batches).map_err(|err| { + Status::invalid_argument( + err.as_report_context("failed to deserialize ordered batches"), + ) + })?; + // Read block. + let block = request + .block + .ok_or(proto::store::ApplyBlockRequest::missing_field(stringify!(block)))?; + // Read block header. + let header: BlockHeader = block + .header + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(header)))? + .try_into()?; + // Read block body. + let body: BlockBody = block + .body + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(body)))? + .try_into()?; + // Read signature. + let signature: Signature = block + .signature + .ok_or(proto::blockchain::SignedBlock::missing_field(stringify!(signature)))? + .try_into()?; + + // Get block inputs from ordered batches. + let block_inputs = + self.block_inputs_from_ordered_batches(&ordered_batches).await.map_err(|err| { + Status::invalid_argument( + err.as_report_context("failed to get block inputs from ordered batches"), + ) + })?; let span = tracing::Span::current(); - span.set_attribute("block.number", block.header().block_num()); - span.set_attribute("block.commitment", block.header().commitment()); - span.set_attribute("block.accounts.count", block.body().updated_accounts().len()); - span.set_attribute("block.output_notes.count", block.body().output_notes().count()); - span.set_attribute("block.nullifiers.count", block.body().created_nullifiers().len()); - - // We perform the apply_block work in a separate task. This prevents the caller cancelling - // the request and thereby cancelling the task at an arbitrary point of execution. + span.set_attribute("block.number", header.block_num()); + span.set_attribute("block.commitment", header.commitment()); + span.set_attribute("block.accounts.count", body.updated_accounts().len()); + span.set_attribute("block.output_notes.count", body.output_notes().count()); + span.set_attribute("block.nullifiers.count", body.created_nullifiers().len()); + + // We perform the apply/prove block work in a separate task. This prevents the caller + // cancelling the request and thereby cancelling the task at an arbitrary point of + // execution. // // Normally this shouldn't be a problem, however our apply_block isn't quite ACID compliant // so things get a bit messy. This is more a temporary hack-around to minimize this risk. let this = self.clone(); - tokio::spawn( + // TODO(sergerad): Use block proof. + let _block_proof = tokio::spawn( async move { + // SAFETY: The header, body, and signature are assumed to + // correspond to each other because they are provided by the Block + // Producer. + let signed_block = SignedBlock::new_unchecked(header.clone(), body, signature); // TODO(sergerad): Use `SignedBlock::new()` when available. + // Note: This is an internal endpoint, so its safe to expose the full error + // report. this.state - .apply_block(block) - .await - .map(Response::new) + .apply_block(signed_block) .inspect_err(|err| { span.set_error(err); }) @@ -75,11 +115,15 @@ impl block_producer_server::BlockProducer for StoreApi { ApplyBlockError::InvalidBlockError(_) => tonic::Code::InvalidArgument, _ => tonic::Code::Internal, }; - - // This is an internal endpoint, so its safe to expose the full error - // report. Status::new(code, err.as_report()) }) + .and_then(|_| { + this.block_prover + .prove(ordered_batches, block_inputs, &header) + .map_err(|err| Status::new(tonic::Code::Internal, err.as_report())) + }) + .await + .map(Response::new) } .in_current_span(), ) @@ -87,7 +131,8 @@ impl block_producer_server::BlockProducer for StoreApi { .map_err(|err| { tonic::Status::internal(err.as_report_context("joining apply_block task failed")) }) - .flatten() + .flatten()?; + Ok(Response::new(())) } /// Returns data needed by the block producer to construct and prove the next block. diff --git a/crates/store/src/server/block_prover_client.rs b/crates/store/src/server/block_prover_client.rs new file mode 100644 index 000000000..5af15ac43 --- /dev/null +++ b/crates/store/src/server/block_prover_client.rs @@ -0,0 +1,55 @@ +use miden_block_prover::{BlockProverError, LocalBlockProver}; +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockProof}; +use miden_remote_prover_client::{RemoteBlockProver, RemoteProverClientError}; +use tracing::instrument; + +use crate::COMPONENT; + +#[derive(Debug, thiserror::Error)] +pub enum StoreProverError { + #[error("local proving failed")] + LocalProvingFailed(#[source] BlockProverError), + #[error("remote proving failed")] + RemoteProvingFailed(#[source] RemoteProverClientError), +} + +// BLOCK PROVER +// ================================================================================================ + +/// Block prover which allows for proving via either local or remote backend. +/// +/// The local proving variant is intended for development and testing purposes. +/// The remote proving variant is intended for production use. +pub enum BlockProver { + Local(LocalBlockProver), + Remote(RemoteBlockProver), +} + +impl BlockProver { + pub fn local() -> Self { + Self::Local(LocalBlockProver::new(0)) + } + + pub fn remote(endpoint: impl Into) -> Self { + Self::Remote(RemoteBlockProver::new(endpoint)) + } + + #[instrument(target = COMPONENT, skip_all, err)] + pub async fn prove( + &self, + tx_batches: OrderedBatches, + block_inputs: BlockInputs, + block_header: &BlockHeader, + ) -> Result { + match self { + Self::Local(prover) => Ok(prover + .prove(tx_batches, block_header, block_inputs) + .map_err(StoreProverError::LocalProvingFailed)?), + Self::Remote(prover) => Ok(prover + .prove(tx_batches, block_header, block_inputs) + .await + .map_err(StoreProverError::RemoteProvingFailed)?), + } + } +} diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index b4b5798db..3a284ceff 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -18,15 +18,17 @@ use tokio::task::JoinSet; use tokio_stream::wrappers::TcpListenerStream; use tower_http::trace::TraceLayer; use tracing::{info, instrument}; +use url::Url; use crate::blocks::BlockStore; use crate::db::Db; use crate::errors::ApplyBlockError; use crate::state::State; -use crate::{COMPONENT, GenesisState}; +use crate::{BlockProver, COMPONENT, GenesisState}; mod api; mod block_producer; +pub mod block_prover_client; mod ntx_builder; mod rpc_api; @@ -35,6 +37,8 @@ pub struct Store { pub rpc_listener: TcpListener, pub ntx_builder_listener: TcpListener, pub block_producer_listener: TcpListener, + /// URL for the Block Prover client. Uses local prover if `None`. + pub block_prover_url: Option, pub data_directory: PathBuf, /// Server-side timeout for an individual gRPC request. /// @@ -100,14 +104,25 @@ impl Store { .context("failed to load state")?, ); - let rpc_service = - store::rpc_server::RpcServer::new(api::StoreApi { state: Arc::clone(&state) }); + // Initialize local or remote block prover. + let block_prover = if let Some(url) = self.block_prover_url { + Arc::new(BlockProver::remote(url)) + } else { + Arc::new(BlockProver::local()) + }; + + let rpc_service = store::rpc_server::RpcServer::new(api::StoreApi { + state: Arc::clone(&state), + block_prover: Arc::clone(&block_prover), + }); let ntx_builder_service = store::ntx_builder_server::NtxBuilderServer::new(api::StoreApi { state: Arc::clone(&state), + block_prover: Arc::clone(&block_prover), }); let block_producer_service = store::block_producer_server::BlockProducerServer::new(api::StoreApi { state: Arc::clone(&state), + block_prover: Arc::clone(&block_prover), }); let reflection_service = tonic_reflection::server::Builder::configure() .register_file_descriptor_set(store_rpc_api_descriptor()) diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index a0fefa0e7..6a61b4daf 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -16,7 +16,12 @@ use tracing::debug; use crate::COMPONENT; use crate::db::models::Page; -use crate::errors::{GetNetworkAccountIdsError, GetNoteScriptByRootError, GetWitnessesError}; +use crate::errors::{ + GetAccountError, + GetNetworkAccountIdsError, + GetNoteScriptByRootError, + GetWitnessesError, +}; use crate::server::api::{ StoreApi, internal_error, @@ -167,7 +172,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { ) -> Result, Status> { debug!(target: COMPONENT, ?request); let request = request.into_inner(); - let account_request = request.try_into()?; + let account_request = request.try_into().map_err(GetAccountError::DeserializationFailed)?; let proof = self.state.get_account(account_request).await?; diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index 1f9f19aec..6c78e1ebf 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -17,6 +17,7 @@ use tracing::{debug, info}; use crate::COMPONENT; use crate::errors::{ CheckNullifiersError, + GetAccountError, GetBlockByNumberError, GetNoteScriptByRootError, GetNotesByIdError, @@ -250,7 +251,7 @@ impl rpc_server::Rpc for StoreApi { ) -> Result, Status> { debug!(target: COMPONENT, ?request); let request = request.into_inner(); - let account_request = request.try_into()?; + let account_request = request.try_into().map_err(GetAccountError::DeserializationFailed)?; let account_data = self.state.get_account(account_request).await?; @@ -327,7 +328,7 @@ impl rpc_server::Rpc for StoreApi { let storage_maps_page = self .state - .get_storage_map_sync_values(account_id, block_range) + .sync_account_storage_maps(account_id, block_range) .await .map_err(SyncAccountStorageMapsError::from)?; diff --git a/crates/store/src/state/apply_block.rs b/crates/store/src/state/apply_block.rs new file mode 100644 index 000000000..145432c97 --- /dev/null +++ b/crates/store/src/state/apply_block.rs @@ -0,0 +1,293 @@ +use std::sync::Arc; + +use miden_node_utils::ErrorReport; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::SignedBlock; +use miden_protocol::note::NoteDetails; +use miden_protocol::transaction::OutputNote; +use miden_protocol::utils::Serializable; +use tokio::sync::oneshot; +use tracing::{Instrument, info, info_span, instrument}; + +use crate::db::NoteRecord; +use crate::errors::{ApplyBlockError, InvalidBlockError}; +use crate::state::State; +use crate::{COMPONENT, HistoricalError}; + +impl State { + /// Apply changes of a new block to the DB and in-memory data structures. + /// + /// ## Note on state consistency + /// + /// The server contains in-memory representations of the existing trees, the in-memory + /// representation must be kept consistent with the committed data, this is necessary so to + /// provide consistent results for all endpoints. In order to achieve consistency, the + /// following steps are used: + /// + /// - the request data is validated, prior to starting any modifications. + /// - block is being saved into the store in parallel with updating the DB, but before + /// committing. This block is considered as candidate and not yet available for reading + /// because the latest block pointer is not updated yet. + /// - a transaction is open in the DB and the writes are started. + /// - while the transaction is not committed, concurrent reads are allowed, both the DB and the + /// in-memory representations, which are consistent at this stage. + /// - prior to committing the changes to the DB, an exclusive lock to the in-memory data is + /// acquired, preventing concurrent reads to the in-memory data, since that will be + /// out-of-sync w.r.t. the DB. + /// - the DB transaction is committed, and requests that read only from the DB can proceed to + /// use the fresh data. + /// - the in-memory structures are updated, including the latest block pointer and the lock is + /// released. + // TODO: This span is logged in a root span, we should connect it to the parent span. + #[expect(clippy::too_many_lines)] + #[instrument(target = COMPONENT, skip_all, err)] + pub async fn apply_block(&self, signed_block: SignedBlock) -> Result<(), ApplyBlockError> { + let _lock = self.writer.try_lock().map_err(|_| ApplyBlockError::ConcurrentWrite)?; + + let header = signed_block.header(); + let body = signed_block.body(); + + // Validate that header and body match. + let tx_commitment = body.transactions().commitment(); + if header.tx_commitment() != tx_commitment { + return Err(InvalidBlockError::InvalidBlockTxCommitment { + expected: tx_commitment, + actual: header.tx_commitment(), + } + .into()); + } + + let block_num = header.block_num(); + let block_commitment = header.commitment(); + + // Validate that the applied block is the next block in sequence. + let prev_block = self + .db + .select_block_header_by_block_num(None) + .await? + .ok_or(ApplyBlockError::DbBlockHeaderEmpty)?; + let expected_block_num = prev_block.block_num().child(); + if block_num != expected_block_num { + return Err(InvalidBlockError::NewBlockInvalidBlockNum { + expected: expected_block_num, + submitted: block_num, + } + .into()); + } + if header.prev_block_commitment() != prev_block.commitment() { + return Err(InvalidBlockError::NewBlockInvalidPrevCommitment.into()); + } + + // Save the block to the block store. In a case of a rolled-back DB transaction, the + // in-memory state will be unchanged, but the block might still be written into the + // block store. Thus, such block should be considered as block candidates, but not + // finalized blocks. So we should check for the latest block when getting block from + // the store. + let signed_block_bytes = signed_block.to_bytes(); + let store = Arc::clone(&self.block_store); + let block_save_task = tokio::spawn( + async move { store.save_block(block_num, &signed_block_bytes).await }.in_current_span(), + ); + + // Scope to read in-memory data, compute mutations required for updating account + // and nullifier trees, and validate the request. + let ( + nullifier_tree_old_root, + nullifier_tree_update, + account_tree_old_root, + account_tree_update, + ) = { + let inner = self.inner.read().await; + + let _span = info_span!(target: COMPONENT, "update_in_memory_structs").entered(); + + // nullifiers can be produced only once + let duplicate_nullifiers: Vec<_> = body + .created_nullifiers() + .iter() + .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) + .copied() + .collect(); + if !duplicate_nullifiers.is_empty() { + return Err(InvalidBlockError::DuplicatedNullifiers(duplicate_nullifiers).into()); + } + + // compute updates for the in-memory data structures + + // new_block.chain_root must be equal to the chain MMR root prior to the update + let peaks = inner.blockchain.peaks(); + if peaks.hash_peaks() != header.chain_commitment() { + return Err(InvalidBlockError::NewBlockInvalidChainCommitment.into()); + } + + // compute update for nullifier tree + let nullifier_tree_update = inner + .nullifier_tree + .compute_mutations( + body.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), + ) + .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; + + if nullifier_tree_update.as_mutation_set().root() != header.nullifier_root() { + // We do our best here to notify the serve routine, if it doesn't care (dropped the + // receiver) we can't do much. + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidNullifierRoot, + )); + return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into()); + } + + // compute update for account tree + let account_tree_update = inner + .account_tree + .compute_mutations( + body.updated_accounts() + .iter() + .map(|update| (update.account_id(), update.final_state_commitment())), + ) + .map_err(|e| match e { + HistoricalError::AccountTreeError(err) => { + InvalidBlockError::NewBlockDuplicateAccountIdPrefix(err) + }, + HistoricalError::MerkleError(_) => { + panic!("Unexpected MerkleError during account tree mutation computation") + }, + })?; + + if account_tree_update.as_mutation_set().root() != header.account_root() { + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidAccountRoot, + )); + return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into()); + } + + ( + inner.nullifier_tree.root(), + nullifier_tree_update, + inner.account_tree.root_latest(), + account_tree_update, + ) + }; + + // Build note tree. + let note_tree = body.compute_block_note_tree(); + if note_tree.root() != header.note_root() { + return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); + } + + let notes = body + .output_notes() + .map(|(note_index, note)| { + let (details, nullifier) = match note { + OutputNote::Full(note) => { + (Some(NoteDetails::from(note)), Some(note.nullifier())) + }, + OutputNote::Header(_) => (None, None), + note @ OutputNote::Partial(_) => { + return Err(InvalidBlockError::InvalidOutputNoteType(Box::new( + note.clone(), + ))); + }, + }; + + let inclusion_path = note_tree.open(note_index); + + let note_record = NoteRecord { + block_num, + note_index, + note_id: note.id().as_word(), + note_commitment: note.commitment(), + metadata: note.metadata().clone(), + details, + inclusion_path, + }; + + Ok((note_record, nullifier)) + }) + .collect::, InvalidBlockError>>()?; + + // Signals the transaction is ready to be committed, and the write lock can be acquired. + let (allow_acquire, acquired_allowed) = oneshot::channel::<()>(); + // Signals the write lock has been acquired, and the transaction can be committed. + let (inform_acquire_done, acquire_done) = oneshot::channel::<()>(); + + // Extract public account updates with deltas before block is moved into async task. + // Private accounts are filtered out since they don't expose their state changes. + let account_deltas = + Vec::from_iter(body.updated_accounts().iter().filter_map( + |update| match update.details() { + AccountUpdateDetails::Delta(delta) => Some(delta.clone()), + AccountUpdateDetails::Private => None, + }, + )); + + // The DB and in-memory state updates need to be synchronized and are partially + // overlapping. Namely, the DB transaction only proceeds after this task acquires the + // in-memory write lock. This requires the DB update to run concurrently, so a new task is + // spawned. + let db = Arc::clone(&self.db); + let db_update_task = tokio::spawn( + async move { db.apply_block(allow_acquire, acquire_done, signed_block, notes).await } + .in_current_span(), + ); + + // Wait for the message from the DB update task, that we ready to commit the DB transaction. + acquired_allowed.await.map_err(ApplyBlockError::ClosedChannel)?; + + // Awaiting the block saving task to complete without errors. + block_save_task.await??; + + // Scope to update the in-memory data. + async move { + // We need to hold the write lock here to prevent inconsistency between the in-memory + // state and the DB state. Thus, we need to wait for the DB update task to complete + // successfully. + let mut inner = self.inner.write().await; + + // We need to check that neither the nullifier tree nor the account tree have changed + // while we were waiting for the DB preparation task to complete. If either of them + // did change, we do not proceed with in-memory and database updates, since it may + // lead to an inconsistent state. + if inner.nullifier_tree.root() != nullifier_tree_old_root + || inner.account_tree.root_latest() != account_tree_old_root + { + return Err(ApplyBlockError::ConcurrentWrite); + } + + // Notify the DB update task that the write lock has been acquired, so it can commit + // the DB transaction. + inform_acquire_done + .send(()) + .map_err(|_| ApplyBlockError::DbUpdateTaskFailed("Receiver was dropped".into()))?; + + // TODO: shutdown #91 + // Await for successful commit of the DB transaction. If the commit fails, we mustn't + // change in-memory state, so we return a block applying error and don't proceed with + // in-memory updates. + db_update_task + .await? + .map_err(|err| ApplyBlockError::DbUpdateTaskFailed(err.as_report()))?; + + // Update the in-memory data structures after successful commit of the DB transaction + inner + .nullifier_tree + .apply_mutations(nullifier_tree_update) + .expect("Unreachable: old nullifier tree root must be checked before this step"); + inner + .account_tree + .apply_mutations(account_tree_update) + .expect("Unreachable: old account tree root must be checked before this step"); + inner.blockchain.push(block_commitment); + + Ok(()) + } + .in_current_span() + .await?; + + self.forest.write().await.apply_block_updates(block_num, account_deltas)?; + + info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); + + Ok(()) + } +} diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index 504ea0631..66c5efb44 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -9,15 +9,16 @@ //! data exists, otherwise rebuilt from the database and persisted. use std::future::Future; +use std::num::NonZeroUsize; use std::path::Path; -use miden_protocol::Word; use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; use miden_protocol::block::nullifier_tree::NullifierTree; use miden_protocol::block::{BlockHeader, BlockNumber, Blockchain}; #[cfg(not(feature = "rocksdb"))] use miden_protocol::crypto::merkle::smt::MemoryStorage; use miden_protocol::crypto::merkle::smt::{LargeSmt, LargeSmtError, SmtStorage}; +use miden_protocol::{Felt, FieldElement, Word}; #[cfg(feature = "rocksdb")] use tracing::info; use tracing::instrument; @@ -41,6 +42,18 @@ pub const ACCOUNT_TREE_STORAGE_DIR: &str = "accounttree"; /// Directory name for the nullifier tree storage within the data directory. pub const NULLIFIER_TREE_STORAGE_DIR: &str = "nullifiertree"; +/// Page size for loading account commitments from the database during tree rebuilding. +/// This limits memory usage when rebuilding trees with millions of accounts. +const ACCOUNT_COMMITMENTS_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(10_000).unwrap(); + +/// Page size for loading nullifiers from the database during tree rebuilding. +/// This limits memory usage when rebuilding trees with millions of nullifiers. +const NULLIFIERS_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(10_000).unwrap(); + +/// Page size for loading public account IDs from the database during forest rebuilding. +/// This limits memory usage when rebuilding with millions of public accounts. +const PUBLIC_ACCOUNT_IDS_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(1_000).unwrap(); + // STORAGE TYPE ALIAS // ================================================================================================ @@ -66,6 +79,14 @@ pub fn account_tree_large_smt_error_to_init_error(e: LargeSmtError) -> StateInit } } +/// Converts a block number to the leaf value format used in the nullifier tree. +/// +/// This matches the format used by `NullifierBlock::from(BlockNumber)::into()`, +/// which is `[Felt::from(block_num), 0, 0, 0]`. +fn block_num_to_nullifier_leaf(block_num: BlockNumber) -> Word { + Word::from([Felt::from(block_num), Felt::ZERO, Felt::ZERO, Felt::ZERO]) +} + // STORAGE LOADER TRAIT // ================================================================================================ @@ -103,27 +124,82 @@ impl StorageLoader for MemoryStorage { Ok(MemoryStorage::default()) } + #[instrument(target = COMPONENT, skip_all)] async fn load_account_tree( self, db: &mut Db, ) -> Result>, StateInitializationError> { - let account_data = db.select_all_account_commitments().await?; - let smt_entries = account_data - .into_iter() - .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - let smt = LargeSmt::with_entries(self, smt_entries) + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load account commitments in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db + .select_account_commitments_paged(ACCOUNT_COMMITMENTS_PAGE_SIZE, cursor) + .await?; + + cursor = page.next_cursor; + if page.commitments.is_empty() { + break; + } + + let entries = page + .commitments + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree) } + // TODO: Make the loading methodology for account and nullifier trees consistent. + // Currently we use `NullifierTree::new_unchecked()` for nullifiers but `AccountTree::new()` + // for accounts. Consider using `NullifierTree::with_storage_from_entries()` for consistency. + #[instrument(target = COMPONENT, skip_all)] async fn load_nullifier_tree( self, db: &mut Db, ) -> Result>, StateInitializationError> { - let nullifiers = db.select_all_nullifiers().await?; - let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); - NullifierTree::with_storage_from_entries(self, entries) - .map_err(StateInitializationError::FailedToCreateNullifierTree) + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) + .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load nullifiers in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db.select_nullifiers_paged(NULLIFIERS_PAGE_SIZE, cursor).await?; + + cursor = page.next_cursor; + if page.nullifiers.is_empty() { + break; + } + + let entries = page.nullifiers.into_iter().map(|info| { + (info.nullifier.as_word(), block_num_to_nullifier_leaf(info.block_num)) + }); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + + Ok(NullifierTree::new_unchecked(smt)) } } @@ -141,6 +217,7 @@ impl StorageLoader for RocksDbStorage { .map_err(|e| StateInitializationError::AccountTreeIoError(e.to_string())) } + #[instrument(target = COMPONENT, skip_all)] async fn load_account_tree( self, db: &mut Db, @@ -156,15 +233,42 @@ impl StorageLoader for RocksDbStorage { } info!(target: COMPONENT, "RocksDB account tree storage is empty, populating from SQLite"); - let account_data = db.select_all_account_commitments().await?; - let smt_entries = account_data - .into_iter() - .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - let smt = LargeSmt::with_entries(self, smt_entries) + + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load account commitments in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db + .select_account_commitments_paged(ACCOUNT_COMMITMENTS_PAGE_SIZE, cursor) + .await?; + + cursor = page.next_cursor; + if page.commitments.is_empty() { + break; + } + + let entries = page + .commitments + .into_iter() + .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree) } + #[instrument(target = COMPONENT, skip_all)] async fn load_nullifier_tree( self, db: &mut Db, @@ -179,10 +283,36 @@ impl StorageLoader for RocksDbStorage { } info!(target: COMPONENT, "RocksDB nullifier tree storage is empty, populating from SQLite"); - let nullifiers = db.select_all_nullifiers().await?; - let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); - NullifierTree::with_storage_from_entries(self, entries) - .map_err(StateInitializationError::FailedToCreateNullifierTree) + + let mut smt = LargeSmt::with_entries(self, std::iter::empty()) + .map_err(account_tree_large_smt_error_to_init_error)?; + + // Load nullifiers in pages to avoid loading millions of entries at once + let mut cursor = None; + loop { + let page = db.select_nullifiers_paged(NULLIFIERS_PAGE_SIZE, cursor).await?; + + cursor = page.next_cursor; + if page.nullifiers.is_empty() { + break; + } + + let entries = page.nullifiers.into_iter().map(|info| { + (info.nullifier.as_word(), block_num_to_nullifier_leaf(info.block_num)) + }); + + let mutations = smt + .compute_mutations(entries) + .map_err(account_tree_large_smt_error_to_init_error)?; + smt.apply_mutations(mutations) + .map_err(account_tree_large_smt_error_to_init_error)?; + + if cursor.is_none() { + break; + } + } + + Ok(NullifierTree::new_unchecked(smt)) } } @@ -223,23 +353,38 @@ pub async fn load_smt_forest( ) -> Result { use miden_protocol::account::delta::AccountDelta; - let public_account_ids = db.select_all_public_account_ids().await?; - - // Acquire write lock once for the entire initialization let mut forest = InnerForest::new(); + let mut cursor = None; + + loop { + let page = db.select_public_account_ids_paged(PUBLIC_ACCOUNT_IDS_PAGE_SIZE, cursor).await?; - // Process each account - for account_id in public_account_ids { - // Get the full account from the database - let account_info = db.select_account(account_id).await?; - let account = account_info.details.expect("public accounts always have details in DB"); + if page.account_ids.is_empty() { + break; + } - // Convert the full account to a full-state delta - let delta = - AccountDelta::try_from(account).expect("accounts from DB should not have seeds"); + // Process each account in this page + for account_id in page.account_ids { + // TODO: Loading the full account from the database is inefficient and will need to + // go away. + let account_info = db.select_account(account_id).await?; + let account = account_info + .details + .ok_or(StateInitializationError::PublicAccountMissingDetails(account_id))?; + + // Convert the full account to a full-state delta + let delta = AccountDelta::try_from(account).map_err(|e| { + StateInitializationError::AccountToDeltaConversionFailed(e.to_string()) + })?; + + // Use the unified update method (will recognize it's a full-state delta) + forest.update_account(block_num, &delta)?; + } - // Use the unified update method (will recognize it's a full-state delta) - forest.update_account(block_num, &delta)?; + cursor = page.next_cursor; + if cursor.is_none() { + break; + } } Ok(forest) diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index b584f37b4..40f6f29e6 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -21,59 +21,51 @@ use miden_node_proto::domain::account::{ StorageMapRequest, }; use miden_node_proto::domain::batch::BatchInputs; -use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; use miden_protocol::Word; -use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{AccountId, StorageMapWitness, StorageSlotName}; use miden_protocol::asset::{AssetVaultKey, AssetWitness}; use miden_protocol::block::account_tree::AccountWitness; use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; -use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; -use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain}; +use miden_protocol::crypto::merkle::mmr::{MmrPeaks, MmrProof, PartialMmr}; use miden_protocol::crypto::merkle::smt::{LargeSmt, SmtProof, SmtStorage}; -use miden_protocol::note::{NoteDetails, NoteId, NoteScript, Nullifier}; -use miden_protocol::transaction::{OutputNote, PartialBlockchain}; -use miden_protocol::utils::Serializable; -use tokio::sync::{Mutex, RwLock, oneshot}; -use tracing::{Instrument, info, info_span, instrument}; +use miden_protocol::note::{NoteId, NoteScript, Nullifier}; +use miden_protocol::transaction::PartialBlockchain; +use tokio::sync::{Mutex, RwLock}; +use tracing::{info, instrument}; -use crate::accounts::{AccountTreeWithHistory, HistoricalError}; +use crate::accounts::AccountTreeWithHistory; use crate::blocks::BlockStore; use crate::db::models::Page; -use crate::db::models::queries::StorageMapValuesPage; -use crate::db::{ - AccountVaultValue, - Db, - NoteRecord, - NoteSyncUpdate, - NullifierInfo, - StateSyncUpdate, -}; +use crate::db::{Db, NoteRecord, NullifierInfo}; use crate::errors::{ ApplyBlockError, DatabaseError, + GetAccountError, GetBatchInputsError, GetBlockHeaderError, GetBlockInputsError, GetCurrentBlockchainDataError, - InvalidBlockError, - NoteSyncError, StateInitializationError, - StateSyncError, }; use crate::inner_forest::{InnerForest, WitnessError}; use crate::{COMPONENT, DataDirectory}; mod loader; -pub use loader::{ +use loader::{ ACCOUNT_TREE_STORAGE_DIR, NULLIFIER_TREE_STORAGE_DIR, StorageLoader, TreeStorage, + load_mmr, + load_smt_forest, + verify_tree_consistency, }; -use loader::{load_mmr, load_smt_forest, verify_tree_consistency}; + +mod apply_block; +mod sync_state; // STRUCTURES // ================================================================================================ @@ -190,294 +182,6 @@ impl State { }) } - // STATE MUTATOR - // -------------------------------------------------------------------------------------------- - - /// Apply changes of a new block to the DB and in-memory data structures. - /// - /// ## Note on state consistency - /// - /// The server contains in-memory representations of the existing trees, the in-memory - /// representation must be kept consistent with the committed data, this is necessary so to - /// provide consistent results for all endpoints. In order to achieve consistency, the - /// following steps are used: - /// - /// - the request data is validated, prior to starting any modifications. - /// - block is being saved into the store in parallel with updating the DB, but before - /// committing. This block is considered as candidate and not yet available for reading - /// because the latest block pointer is not updated yet. - /// - a transaction is open in the DB and the writes are started. - /// - while the transaction is not committed, concurrent reads are allowed, both the DB and the - /// in-memory representations, which are consistent at this stage. - /// - prior to committing the changes to the DB, an exclusive lock to the in-memory data is - /// acquired, preventing concurrent reads to the in-memory data, since that will be - /// out-of-sync w.r.t. the DB. - /// - the DB transaction is committed, and requests that read only from the DB can proceed to - /// use the fresh data. - /// - the in-memory structures are updated, including the latest block pointer and the lock is - /// released. - // TODO: This span is logged in a root span, we should connect it to the parent span. - #[allow(clippy::too_many_lines)] - #[instrument(target = COMPONENT, skip_all, err)] - pub async fn apply_block(&self, block: ProvenBlock) -> Result<(), ApplyBlockError> { - let _lock = self.writer.try_lock().map_err(|_| ApplyBlockError::ConcurrentWrite)?; - - let header = block.header(); - - let tx_commitment = block.body().transactions().commitment(); - - if header.tx_commitment() != tx_commitment { - return Err(InvalidBlockError::InvalidBlockTxCommitment { - expected: tx_commitment, - actual: header.tx_commitment(), - } - .into()); - } - - let block_num = header.block_num(); - let block_commitment = header.commitment(); - - // ensures the right block header is being processed - let prev_block = self - .db - .select_block_header_by_block_num(None) - .await? - .ok_or(ApplyBlockError::DbBlockHeaderEmpty)?; - - let expected_block_num = prev_block.block_num().child(); - if block_num != expected_block_num { - return Err(InvalidBlockError::NewBlockInvalidBlockNum { - expected: expected_block_num, - submitted: block_num, - } - .into()); - } - if header.prev_block_commitment() != prev_block.commitment() { - return Err(InvalidBlockError::NewBlockInvalidPrevCommitment.into()); - } - - let block_data = block.to_bytes(); - - // Save the block to the block store. In a case of a rolled-back DB transaction, the - // in-memory state will be unchanged, but the block might still be written into the - // block store. Thus, such block should be considered as block candidates, but not - // finalized blocks. So we should check for the latest block when getting block from - // the store. - let store = Arc::clone(&self.block_store); - let block_save_task = tokio::spawn( - async move { store.save_block(block_num, &block_data).await }.in_current_span(), - ); - - // scope to read in-memory data, compute mutations required for updating account - // and nullifier trees, and validate the request - let ( - nullifier_tree_old_root, - nullifier_tree_update, - account_tree_old_root, - account_tree_update, - ) = { - let inner = self.inner.read().await; - - let _span = info_span!(target: COMPONENT, "update_in_memory_structs").entered(); - - // nullifiers can be produced only once - let duplicate_nullifiers: Vec<_> = block - .body() - .created_nullifiers() - .iter() - .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) - .copied() - .collect(); - if !duplicate_nullifiers.is_empty() { - return Err(InvalidBlockError::DuplicatedNullifiers(duplicate_nullifiers).into()); - } - - // compute updates for the in-memory data structures - - // new_block.chain_root must be equal to the chain MMR root prior to the update - let peaks = inner.blockchain.peaks(); - if peaks.hash_peaks() != header.chain_commitment() { - return Err(InvalidBlockError::NewBlockInvalidChainCommitment.into()); - } - - // compute update for nullifier tree - let nullifier_tree_update = inner - .nullifier_tree - .compute_mutations( - block - .body() - .created_nullifiers() - .iter() - .map(|nullifier| (*nullifier, block_num)), - ) - .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; - - if nullifier_tree_update.as_mutation_set().root() != header.nullifier_root() { - // We do our best here to notify the serve routine, if it doesn't care (dropped the - // receiver) we can't do much. - let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( - InvalidBlockError::NewBlockInvalidNullifierRoot, - )); - return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into()); - } - - // compute update for account tree - let account_tree_update = inner - .account_tree - .compute_mutations( - block - .body() - .updated_accounts() - .iter() - .map(|update| (update.account_id(), update.final_state_commitment())), - ) - .map_err(|e| match e { - HistoricalError::AccountTreeError(err) => { - InvalidBlockError::NewBlockDuplicateAccountIdPrefix(err) - }, - HistoricalError::MerkleError(_) => { - panic!("Unexpected MerkleError during account tree mutation computation") - }, - })?; - - if account_tree_update.as_mutation_set().root() != header.account_root() { - let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( - InvalidBlockError::NewBlockInvalidAccountRoot, - )); - return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into()); - } - - ( - inner.nullifier_tree.root(), - nullifier_tree_update, - inner.account_tree.root_latest(), - account_tree_update, - ) - }; - - // build note tree - let note_tree = block.body().compute_block_note_tree(); - if note_tree.root() != header.note_root() { - return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); - } - - let notes = block - .body() - .output_notes() - .map(|(note_index, note)| { - let (details, nullifier) = match note { - OutputNote::Full(note) => { - (Some(NoteDetails::from(note)), Some(note.nullifier())) - }, - OutputNote::Header(_) => (None, None), - note @ OutputNote::Partial(_) => { - return Err(InvalidBlockError::InvalidOutputNoteType(Box::new( - note.clone(), - ))); - }, - }; - - let inclusion_path = note_tree.open(note_index); - - let note_record = NoteRecord { - block_num, - note_index, - note_id: note.id().as_word(), - note_commitment: note.commitment(), - metadata: note.metadata().clone(), - details, - inclusion_path, - }; - - Ok((note_record, nullifier)) - }) - .collect::, InvalidBlockError>>()?; - - // Signals the transaction is ready to be committed, and the write lock can be acquired - let (allow_acquire, acquired_allowed) = oneshot::channel::<()>(); - // Signals the write lock has been acquired, and the transaction can be committed - let (inform_acquire_done, acquire_done) = oneshot::channel::<()>(); - - // Extract public account updates with deltas before block is moved into async task. - // Private accounts are filtered out since they don't expose their state changes. - let account_deltas = - Vec::from_iter(block.body().updated_accounts().iter().filter_map(|update| { - match update.details() { - AccountUpdateDetails::Delta(delta) => Some(delta.clone()), - AccountUpdateDetails::Private => None, - } - })); - - // The DB and in-memory state updates need to be synchronized and are partially - // overlapping. Namely, the DB transaction only proceeds after this task acquires the - // in-memory write lock. This requires the DB update to run concurrently, so a new task is - // spawned. - let db = Arc::clone(&self.db); - let db_update_task = tokio::spawn( - async move { db.apply_block(allow_acquire, acquire_done, block, notes).await } - .in_current_span(), - ); - - // Wait for the message from the DB update task, that we ready to commit the DB transaction - acquired_allowed.await.map_err(ApplyBlockError::ClosedChannel)?; - - // Awaiting the block saving task to complete without errors - block_save_task.await??; - - // Scope to update the in-memory data - async move { - // We need to hold the write lock here to prevent inconsistency between the in-memory - // state and the DB state. Thus, we need to wait for the DB update task to complete - // successfully. - let mut inner = self.inner.write().await; - - // We need to check that neither the nullifier tree nor the account tree have changed - // while we were waiting for the DB preparation task to complete. If either of them - // did change, we do not proceed with in-memory and database updates, since it may - // lead to an inconsistent state. - if inner.nullifier_tree.root() != nullifier_tree_old_root - || inner.account_tree.root_latest() != account_tree_old_root - { - return Err(ApplyBlockError::ConcurrentWrite); - } - - // Notify the DB update task that the write lock has been acquired, so it can commit - // the DB transaction - inform_acquire_done - .send(()) - .map_err(|_| ApplyBlockError::DbUpdateTaskFailed("Receiver was dropped".into()))?; - - // TODO: shutdown #91 - // Await for successful commit of the DB transaction. If the commit fails, we mustn't - // change in-memory state, so we return a block applying error and don't proceed with - // in-memory updates. - db_update_task - .await? - .map_err(|err| ApplyBlockError::DbUpdateTaskFailed(err.as_report()))?; - - // Update the in-memory data structures after successful commit of the DB transaction - inner - .nullifier_tree - .apply_mutations(nullifier_tree_update) - .expect("Unreachable: old nullifier tree root must be checked before this step"); - inner - .account_tree - .apply_mutations(account_tree_update) - .expect("Unreachable: old account tree root must be checked before this step"); - inner.blockchain.push(block_commitment); - - Ok(()) - } - .in_current_span() - .await?; - - self.forest.write().await.apply_block_updates(block_num, account_deltas)?; - - info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); - - Ok(()) - } - // STATE ACCESSORS // -------------------------------------------------------------------------------------------- @@ -506,17 +210,6 @@ impl State { } } - pub async fn sync_nullifiers( - &self, - prefix_len: u32, - nullifier_prefixes: Vec, - block_range: RangeInclusive, - ) -> Result<(Vec, BlockNumber), DatabaseError> { - self.db - .select_nullifiers_by_prefix(prefix_len, nullifier_prefixes, block_range) - .await - } - /// Generates membership proofs for each one of the `nullifiers` against the latest nullifier /// tree. /// @@ -689,85 +382,6 @@ impl State { }) } - /// Loads data to synchronize a client. - /// - /// The client's request contains a list of note tags, this method will return the first - /// block with a matching tag, or the chain tip. All the other values are filtered based on this - /// block range. - /// - /// # Arguments - /// - /// - `block_num`: The last block *known* by the client, updates start from the next block. - /// - `account_ids`: Include the account's commitment if their _last change_ was in the result's - /// block range. - /// - `note_tags`: The tags the client is interested in, result is restricted to the first block - /// with any matches tags. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn sync_state( - &self, - block_num: BlockNumber, - account_ids: Vec, - note_tags: Vec, - ) -> Result<(StateSyncUpdate, MmrDelta), StateSyncError> { - let inner = self.inner.read().await; - - let state_sync = self.db.get_state_sync(block_num, account_ids, note_tags).await?; - - let delta = if block_num == state_sync.block_header.block_num() { - // The client is in sync with the chain tip. - MmrDelta { - forest: Forest::new(block_num.as_usize()), - data: vec![], - } - } else { - // Important notes about the boundary conditions: - // - // - The Mmr forest is 1-indexed whereas the block number is 0-indexed. The Mmr root - // contained in the block header always lag behind by one block, this is because the Mmr - // leaves are hashes of block headers, and we can't have self-referential hashes. These - // two points cancel out and don't require adjusting. - // - Mmr::get_delta is inclusive, whereas the sync_state request block_num is defined to - // be - // exclusive, so the from_forest has to be adjusted with a +1 - let from_forest = (block_num + 1).as_usize(); - let to_forest = state_sync.block_header.block_num().as_usize(); - inner - .blockchain - .as_mmr() - .get_delta(Forest::new(from_forest), Forest::new(to_forest)) - .map_err(StateSyncError::FailedToBuildMmrDelta)? - }; - - Ok((state_sync, delta)) - } - - /// Loads data to synchronize a client's notes. - /// - /// The client's request contains a list of tags, this method will return the first - /// block with a matching tag, or the chain tip. All the other values are filter based on this - /// block range. - /// - /// # Arguments - /// - /// - `note_tags`: The tags the client is interested in, resulting notes are restricted to the - /// first block containing a matching note. - /// - `block_range`: The range of blocks from which to synchronize notes. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn sync_notes( - &self, - note_tags: Vec, - block_range: RangeInclusive, - ) -> Result<(NoteSyncUpdate, MmrProof, BlockNumber), NoteSyncError> { - let inner = self.inner.read().await; - - let (note_sync, last_included_block) = - self.db.get_note_sync(block_range, note_tags).await?; - - let mmr_proof = inner.blockchain.open(note_sync.block_header.block_num())?; - - Ok((note_sync, mmr_proof, last_included_block)) - } - /// Returns data needed by the block producer to construct and prove the next block. pub async fn get_block_inputs( &self, @@ -996,11 +610,11 @@ impl State { pub async fn get_account( &self, account_request: AccountRequest, - ) -> Result { + ) -> Result { let AccountRequest { block_num, account_id, details } = account_request; if details.is_some() && !account_id.has_public_state() { - return Err(DatabaseError::AccountNotPublic(account_id)); + return Err(GetAccountError::AccountNotPublic(account_id)); } let (block_num, witness) = self.get_account_witness(block_num, account_id).await?; @@ -1022,19 +636,20 @@ impl State { &self, block_num: Option, account_id: AccountId, - ) -> Result<(BlockNumber, AccountWitness), DatabaseError> { + ) -> Result<(BlockNumber, AccountWitness), GetAccountError> { let inner_state = self.inner.read().await; // Determine which block to query let (block_num, witness) = if let Some(requested_block) = block_num { // Historical query: use the account tree with history - let witness = inner_state - .account_tree - .open_at(account_id, requested_block) - .ok_or_else(|| DatabaseError::HistoricalBlockNotAvailable { - block_num: requested_block, - reason: "Block is either in the future or has been pruned from history" - .to_string(), + let witness = + inner_state.account_tree.open_at(account_id, requested_block).ok_or_else(|| { + let latest_block = inner_state.account_tree.block_number_latest(); + if requested_block > latest_block { + GetAccountError::UnknownBlock(requested_block) + } else { + GetAccountError::BlockPruned(requested_block) + } })?; (requested_block, witness) } else { @@ -1061,7 +676,7 @@ impl State { account_id: AccountId, block_num: BlockNumber, detail_request: AccountDetailRequest, - ) -> Result { + ) -> Result { let AccountDetailRequest { code_commitment, asset_vault_commitment, @@ -1069,18 +684,25 @@ impl State { } = detail_request; if !account_id.has_public_state() { - return Err(DatabaseError::AccountNotPublic(account_id)); + return Err(GetAccountError::AccountNotPublic(account_id)); } // Validate block exists in the blockchain before querying the database - self.validate_block_exists(block_num).await?; + { + let inner = self.inner.read().await; + let latest_block_num = inner.latest_block_num(); + + if block_num > latest_block_num { + return Err(GetAccountError::UnknownBlock(block_num)); + } + } // Query account header and storage header together in a single DB call let (account_header, storage_header) = self .db .select_account_header_with_storage_header_at_block(account_id, block_num) .await? - .ok_or(DatabaseError::AccountAtBlockHeightNotFoundInDb(account_id, block_num))?; + .ok_or(GetAccountError::AccountNotFound(account_id, block_num))?; let account_code = match code_commitment { Some(commitment) if commitment == account_header.code_commitment() => None, @@ -1143,15 +765,6 @@ impl State { }) } - /// Returns storage map values for syncing within a block range. - pub(crate) async fn get_storage_map_sync_values( - &self, - account_id: AccountId, - block_range: RangeInclusive, - ) -> Result { - self.db.select_storage_map_sync_values(account_id, block_range).await - } - /// Loads a block from the block store. Return `Ok(None)` if the block is not found. pub async fn load_block( &self, @@ -1168,39 +781,11 @@ impl State { self.inner.read().await.latest_block_num() } - /// Validates that a block exists in the blockchain - /// - /// # Attention - /// - /// Acquires a *read lock** on `self.inner`. - /// - /// # Errors - /// - /// Returns `DatabaseError::BlockNotFound` if the block doesn't exist in the blockchain. - async fn validate_block_exists(&self, block_num: BlockNumber) -> Result<(), DatabaseError> { - let inner = self.inner.read().await; - let latest_block_num = inner.latest_block_num(); - - if block_num > latest_block_num { - return Err(DatabaseError::BlockNotFound(block_num)); - } - - Ok(()) - } - /// Emits metrics for each database table's size. pub async fn analyze_table_sizes(&self) -> Result<(), DatabaseError> { self.db.analyze_table_sizes().await } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( - &self, - account_id: AccountId, - block_range: RangeInclusive, - ) -> Result<(BlockNumber, Vec), DatabaseError> { - self.db.get_account_vault_sync(account_id, block_range).await - } /// Returns the network notes for an account that are unconsumed by a specified block number, /// along with the next pagination token. pub async fn get_unconsumed_network_notes_for_account( @@ -1220,16 +805,6 @@ impl State { self.db.select_note_script_by_root(root).await } - /// Returns the complete transaction records for the specified accounts within the specified - /// block range, including state commitments and note IDs. - pub async fn sync_transactions( - &self, - account_ids: Vec, - block_range: RangeInclusive, - ) -> Result<(BlockNumber, Vec), DatabaseError> { - self.db.select_transactions_records(account_ids, block_range).await - } - /// Returns vault asset witnesses for the specified account and block number. pub async fn get_vault_asset_witnesses( &self, diff --git a/crates/store/src/state/sync_state.rs b/crates/store/src/state/sync_state.rs new file mode 100644 index 000000000..59d891ebd --- /dev/null +++ b/crates/store/src/state/sync_state.rs @@ -0,0 +1,141 @@ +use std::ops::RangeInclusive; + +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrProof}; +use tracing::instrument; + +use super::State; +use crate::COMPONENT; +use crate::db::models::queries::StorageMapValuesPage; +use crate::db::{AccountVaultValue, NoteSyncUpdate, NullifierInfo, StateSyncUpdate}; +use crate::errors::{DatabaseError, NoteSyncError, StateSyncError}; + +// STATE SYNCHRONIZATION ENDPOINTS +// ================================================================================================ + +impl State { + /// Returns the complete transaction records for the specified accounts within the specified + /// block range, including state commitments and note IDs. + pub async fn sync_transactions( + &self, + account_ids: Vec, + block_range: RangeInclusive, + ) -> Result<(BlockNumber, Vec), DatabaseError> { + self.db.select_transactions_records(account_ids, block_range).await + } + + /// Loads data to synchronize a client's notes. + /// + /// The client's request contains a list of tags, this method will return the first + /// block with a matching tag, or the chain tip. All the other values are filter based on this + /// block range. + /// + /// # Arguments + /// + /// - `note_tags`: The tags the client is interested in, resulting notes are restricted to the + /// first block containing a matching note. + /// - `block_range`: The range of blocks from which to synchronize notes. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn sync_notes( + &self, + note_tags: Vec, + block_range: RangeInclusive, + ) -> Result<(NoteSyncUpdate, MmrProof, BlockNumber), NoteSyncError> { + let inner = self.inner.read().await; + + let (note_sync, last_included_block) = + self.db.get_note_sync(block_range, note_tags).await?; + + let mmr_proof = inner.blockchain.open(note_sync.block_header.block_num())?; + + Ok((note_sync, mmr_proof, last_included_block)) + } + + pub async fn sync_nullifiers( + &self, + prefix_len: u32, + nullifier_prefixes: Vec, + block_range: RangeInclusive, + ) -> Result<(Vec, BlockNumber), DatabaseError> { + self.db + .select_nullifiers_by_prefix(prefix_len, nullifier_prefixes, block_range) + .await + } + + // ACCOUNT STATE SYNCHRONIZATION + // -------------------------------------------------------------------------------------------- + + /// Returns account vault updates for specified account within a block range. + pub async fn sync_account_vault( + &self, + account_id: AccountId, + block_range: RangeInclusive, + ) -> Result<(BlockNumber, Vec), DatabaseError> { + self.db.get_account_vault_sync(account_id, block_range).await + } + + /// Returns storage map values for syncing within a block range. + pub async fn sync_account_storage_maps( + &self, + account_id: AccountId, + block_range: RangeInclusive, + ) -> Result { + self.db.select_storage_map_sync_values(account_id, block_range).await + } + + // FULL STATE SYNCHRONIZATION + // -------------------------------------------------------------------------------------------- + + /// Loads data to synchronize a client. + /// + /// The client's request contains a list of note tags, this method will return the first + /// block with a matching tag, or the chain tip. All the other values are filtered based on this + /// block range. + /// + /// # Arguments + /// + /// - `block_num`: The last block *known* by the client, updates start from the next block. + /// - `account_ids`: Include the account's commitment if their _last change_ was in the result's + /// block range. + /// - `note_tags`: The tags the client is interested in, result is restricted to the first block + /// with any matches tags. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn sync_state( + &self, + block_num: BlockNumber, + account_ids: Vec, + note_tags: Vec, + ) -> Result<(StateSyncUpdate, MmrDelta), StateSyncError> { + let inner = self.inner.read().await; + + let state_sync = self.db.get_state_sync(block_num, account_ids, note_tags).await?; + + let delta = if block_num == state_sync.block_header.block_num() { + // The client is in sync with the chain tip. + MmrDelta { + forest: Forest::new(block_num.as_usize()), + data: vec![], + } + } else { + // Important notes about the boundary conditions: + // + // - The Mmr forest is 1-indexed whereas the block number is 0-indexed. The Mmr root + // contained in the block header always lag behind by one block, this is because the Mmr + // leaves are hashes of block headers, and we can't have self-referential hashes. These + // two points cancel out and don't require adjusting. + // - Mmr::get_delta is inclusive, whereas the sync_state request block_num is defined to + // be + // exclusive, so the from_forest has to be adjusted with a +1 + let from_forest = (block_num + 1).as_usize(); + let to_forest = state_sync.block_header.block_num().as_usize(); + inner + .blockchain + .as_mmr() + .get_delta(Forest::new(from_forest), Forest::new(to_forest)) + .map_err(StateSyncError::FailedToBuildMmrDelta)? + }; + + Ok((state_sync, delta)) + } +} diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index e61930937..2c5fea6e5 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -42,5 +42,8 @@ tracing-opentelemetry = { version = "0.32" } tracing-subscriber = { workspace = true } url = { workspace = true } +[build-dependencies] +miden-node-rocksdb-cxx-linkage-fix = { workspace = true } + [dev-dependencies] thiserror = { workspace = true } diff --git a/crates/utils/build.rs b/crates/utils/build.rs new file mode 100644 index 000000000..ed4038d06 --- /dev/null +++ b/crates/utils/build.rs @@ -0,0 +1,3 @@ +fn main() { + miden_node_rocksdb_cxx_linkage_fix::configure(); +} diff --git a/crates/utils/src/config.rs b/crates/utils/src/config.rs index e0fc1a0a6..b29c9060f 100644 --- a/crates/utils/src/config.rs +++ b/crates/utils/src/config.rs @@ -15,7 +15,7 @@ pub const DEFAULT_FAUCET_SERVER_PORT: u16 = 8080; /// relative, searches in parent directories all the way to the root as well. /// /// The above configuration options are indented to support easy of packaging and deployment. -#[allow(clippy::result_large_err, reason = "This error crashes the node")] +#[expect(clippy::result_large_err, reason = "This error crashes the node")] pub fn load_config Deserialize<'a>>( config_file: impl AsRef, ) -> figment::Result { diff --git a/crates/utils/src/limiter.rs b/crates/utils/src/limiter.rs index 1adf5be41..2b222e23e 100644 --- a/crates/utils/src/limiter.rs +++ b/crates/utils/src/limiter.rs @@ -13,7 +13,7 @@ /// Basic request limit. pub const GENERAL_REQUEST_LIMIT: usize = 1000; -#[allow(missing_docs)] +#[expect(missing_docs)] #[derive(Debug, thiserror::Error)] #[error("parameter {which} exceeded limit {limit}: {size}")] pub struct QueryLimitError { diff --git a/crates/utils/src/logging.rs b/crates/utils/src/logging.rs index 6593943f4..589365030 100644 --- a/crates/utils/src/logging.rs +++ b/crates/utils/src/logging.rs @@ -10,6 +10,8 @@ use tracing_opentelemetry::OpenTelemetryLayer; use tracing_subscriber::layer::{Filter, SubscriberExt}; use tracing_subscriber::{Layer, Registry}; +use crate::tracing::OpenTelemetrySpanExt; + /// Global tracer provider for flushing traces on panic. /// /// This is necessary because the panic hook needs access to the tracer provider to flush @@ -89,7 +91,12 @@ pub fn setup_tracing(otel: OpenTelemetry) -> anyhow::Result> { // This chains with the default panic hook to preserve backtrace printing. let default_hook = std::panic::take_hook(); std::panic::set_hook(Box::new(move |info| { - tracing::error!(panic = true, "{info}"); + tracing::error!(panic = true, info = %info, "panic"); + + // Mark the current span as failed for OpenTelemetry. + let info_str = info.to_string(); + let wrapped = anyhow::Error::msg(info_str); + tracing::Span::current().set_error(wrapped.as_ref()); // Flush traces before the program terminates. // This ensures the panic trace is exported even though the OtelGuard won't be dropped. diff --git a/crates/utils/src/panic.rs b/crates/utils/src/panic.rs index 1b899ee61..c330fe362 100644 --- a/crates/utils/src/panic.rs +++ b/crates/utils/src/panic.rs @@ -4,14 +4,21 @@ use http::{Response, StatusCode, header}; use http_body_util::Full; pub use tower_http::catch_panic::CatchPanicLayer; +use crate::tracing::OpenTelemetrySpanExt; + /// Custom callback that is used by Tower to fulfill the /// [`tower_http::catch_panic::ResponseForPanic`] trait. /// /// This should be added to tonic server builder as a layer via [`CatchPanicLayer::custom()`]. +#[track_caller] pub fn catch_panic_layer_fn(err: Box) -> Response> { // Log the panic error details. let err = stringify_panic_error(err); - tracing::error!(panic = true, "{err}"); + tracing::error!(panic = true, error = %err, "panic"); + + // Mark the current span as failed for OpenTelemetry. + let wrapped = anyhow::Error::msg(err.clone()); + tracing::Span::current().set_error(wrapped.as_ref()); // Return generic error response. Response::builder() diff --git a/crates/utils/src/tracing/grpc.rs b/crates/utils/src/tracing/grpc.rs index f5d0951bf..985a2e4ba 100644 --- a/crates/utils/src/tracing/grpc.rs +++ b/crates/utils/src/tracing/grpc.rs @@ -9,6 +9,7 @@ use crate::tracing::OpenTelemetrySpanExt; /// The span name is dynamically set using the HTTP path via the `otel.name` field. /// Additionally also pulls in remote tracing context which allows the server trace to be connected /// to the client's origin trace. +#[track_caller] pub fn grpc_trace_fn(request: &http::Request) -> tracing::Span { // A gRPC request's path ends with `..//`. let mut path_segments = request.uri().path().rsplit('/'); diff --git a/docs/external/src/operator/installation.md b/docs/external/src/operator/installation.md index 1f27c639d..662d76851 100644 --- a/docs/external/src/operator/installation.md +++ b/docs/external/src/operator/installation.md @@ -39,6 +39,18 @@ command ensures that all required libraries are installed. sudo apt install llvm clang bindgen pkg-config libssl-dev libsqlite3-dev ``` +On macOS, ensure the Xcode Command Line Tools are installed: + +```sh +xcode-select --install +``` + +If you still see `'cstdint' file not found` errors after installing the Command Line Tools (common after a macOS upgrade), try setting the SDK root explicitly: + +```sh +export SDKROOT="$(xcrun --sdk macosx --show-sdk-path)" +``` + Install the latest node binary: ```sh diff --git a/docs/external/src/rpc.md b/docs/external/src/rpc.md index b26e88131..e25bbd54d 100644 --- a/docs/external/src/rpc.md +++ b/docs/external/src/rpc.md @@ -107,6 +107,19 @@ The witness proves the account's state commitment in the account tree. If detail If `block_num` is provided, returns the state at that historical block; otherwise, returns the latest state. +#### Error Codes + +When the request fails, detailed error information is provided through gRPC status details. The following error codes may be returned: + +| Error Code | Value | gRPC Status | Description | +|---------------------------|-------|--------------------|------------------------------------------------------| +| `INTERNAL_ERROR` | 0 | `INTERNAL` | Internal server error occurred | +| `DESERIALIZATION_FAILED` | 1 | `INVALID_ARGUMENT` | Request could not be deserialized | +| `ACCOUNT_NOT_FOUND` | 2 | `INVALID_ARGUMENT` | Account not found at the requested block | +| `ACCOUNT_NOT_PUBLIC` | 3 | `INVALID_ARGUMENT` | Account details requested for a non-public account | +| `UNKNOWN_BLOCK` | 4 | `INVALID_ARGUMENT` | Requested block number is unknown | +| `BLOCK_PRUNED` | 5 | `INVALID_ARGUMENT` | Requested block has been pruned | + ### GetBlockByNumber Request the raw data for a specific block. diff --git a/packaging/node/miden-validator.service b/packaging/node/miden-validator.service new file mode 100644 index 000000000..7b6c5de87 --- /dev/null +++ b/packaging/node/miden-validator.service @@ -0,0 +1,16 @@ +[Unit] +Description=Miden validator +Wants=network-online.target + +[Install] +WantedBy=multi-user.target + +[Service] +Type=exec +Environment="OTEL_SERVICE_NAME=miden-validator" +EnvironmentFile=/lib/systemd/system/miden-validator.env +ExecStart=/usr/bin/miden-node validator start +WorkingDirectory=/opt/miden-validator +User=miden-validator +RestartSec=5 +Restart=always diff --git a/packaging/node/postinst b/packaging/node/postinst index 8967f9e54..036b2d112 100644 --- a/packaging/node/postinst +++ b/packaging/node/postinst @@ -2,25 +2,28 @@ # # This is a postinstallation script so the service can be configured and started when requested. -# user is expected by the systemd service file and `/opt/` is its working directory, -sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent miden-node +for svc in miden-node miden-validator; do + # user is expected by the systemd service file and `/opt/` is its working directory, + sudo adduser --disabled-password --disabled-login --shell /usr/sbin/nologin --quiet --system --no-create-home --home /nonexistent "$svc" -# Working folder. -if [ -d "/opt/miden-node" ] -then - echo "Directory /opt/miden-node exists." -else - mkdir -p /opt/miden-node -fi -sudo chown -R miden-node /opt/miden-node + # Working folder. + if [ -d "/opt/$svc" ] + then + echo "Directory /opt/$svc exists." + else + mkdir -p "/opt/$svc" + fi + sudo chown -R "$svc" "/opt/$svc" -# Configuration folder -if [ -d "/etc/opt/miden-node" ] -then - echo "Directory /etc/opt/miden-node exists." -else - mkdir -p /etc/opt/miden-node -fi -sudo chown -R miden-node /etc/opt/miden-node + # Configuration folder + if [ -d "/etc/opt/$svc" ] + then + echo "Directory /etc/opt/$svc exists." + else + mkdir -p "/etc/opt/$svc" + fi + sudo chown -R "$svc" "/etc/opt/$svc" + +done sudo systemctl daemon-reload diff --git a/packaging/node/postrm b/packaging/node/postrm index 893a53588..86a9846a2 100644 --- a/packaging/node/postrm +++ b/packaging/node/postrm @@ -3,7 +3,10 @@ ############### # Remove miden-node installs ############## -sudo rm -rf /lib/systemd/system/miden-node.service -sudo rm -rf /etc/opt/miden-node -sudo deluser miden-node +for svc in miden-node miden-validator; do + sudo rm -rf "/lib/systemd/system/$svc.service" + sudo rm -rf "/etc/opt/$svc" + sudo deluser "$svc" +done + sudo systemctl daemon-reload diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index 001dc4098..c71e853da 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -96,7 +96,7 @@ service Rpc { // Store API for the BlockProducer component service BlockProducer { // Applies changes of a new block to the DB and in-memory data structures. - rpc ApplyBlock(blockchain.Block) returns (google.protobuf.Empty) {} + rpc ApplyBlock(ApplyBlockRequest) returns (google.protobuf.Empty) {} // Retrieves block header by given block number. Optionally, it also returns the MMR path // and current chain length to authenticate the block's inclusion. @@ -112,6 +112,18 @@ service BlockProducer { rpc GetTransactionInputs(TransactionInputsRequest) returns (TransactionInputs) {} } +// APPLY BLOCK REQUEST +// ================================================================================================ + +// Applies a block to the state. +message ApplyBlockRequest { + // Ordered batches encoded using [winter_utils::Serializable] implementation for + // [miden_objects::batch::OrderedBatches]. + bytes ordered_batches = 1; + // Block signed by the Validator. + blockchain.SignedBlock block = 2; +} + // GET BLOCK INPUTS // ================================================================================================ diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index f521fc1c5..b120963f2 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -17,6 +17,13 @@ service Api { // Returns the status info of the node. rpc Status(google.protobuf.Empty) returns (RpcStatus) {} + // Returns the query parameter limits configured for RPC methods. + // + // These define the maximum number of each parameter a method will accept. + // Exceeding the limit will result in the request being rejected and you should instead send + // multiple smaller requests. + rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} + // Returns a Sparse Merkle Tree opening proof for each requested nullifier // // Each proof demonstrates either: @@ -46,6 +53,9 @@ service Api { // Returns the script for a note by its root. rpc GetNoteScriptByRoot(note.NoteRoot) returns (MaybeNoteScript) {} + // TRANSACTION SUBMISSION ENDPOINTS + // -------------------------------------------------------------------------------------------- + // Submits proven transaction to the Miden network. Returns the node's current block height. rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} @@ -63,6 +73,25 @@ service Api { // Returns the node's current block height. rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} + // STATE SYNCHRONIZATION ENDPOINTS + // -------------------------------------------------------------------------------------------- + + // Returns transactions records for specific accounts within a block range. + rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} + + // Returns info which can be used by the client to sync up to the tip of chain for the notes + // they are interested in. + // + // Client specifies the `note_tags` they are interested in, and the block height from which to + // search for new for matching notes for. The request will then return the next block containing + // any note matching the provided tags. + // + // The response includes each note's metadata and inclusion proof. + // + // A basic note sync can be implemented by repeatedly requesting the previous response's block + // until reaching the tip of the chain. + rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} + // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. // // Note that only 16-bit prefixes are supported at this time. @@ -71,16 +100,8 @@ service Api { // Returns account vault updates for specified account within a block range. rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} - // Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. - // - // Client specifies the `note_tags` they are interested in, and the block height from which to search for new for - // matching notes for. The request will then return the next block containing any note matching the provided tags. - // - // The response includes each note's metadata and inclusion proof. - // - // A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - // tip of the chain. - rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} + // Returns storage map updates for specified account and storage slots within a block range. + rpc SyncAccountStorageMaps(SyncAccountStorageMapsRequest) returns (SyncAccountStorageMapsResponse) {} // Returns info which can be used by the client to sync up to the latest state of the chain // for the objects (accounts and notes) the client is interested in. @@ -90,27 +111,14 @@ service Api { // in a loop until `response.block_header.block_num == response.chain_tip`, at which point // the client is fully synchronized with the chain. // - // Each update response also contains info about new notes, accounts etc. created. It also returns - // Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - // MMR peaks and chain MMR nodes. + // Each update response also contains info about new notes, accounts etc. created. It also + // returns Chain MMR delta that can be used to update the state of Chain MMR. This includes + // both chain MMR peaks and chain MMR nodes. // // For preserving some degree of privacy, note tags contain only high // part of hashes. Thus, returned data contains excessive notes, client can make // additional filtering of that data on its side. rpc SyncState(SyncStateRequest) returns (SyncStateResponse) {} - - // Returns storage map updates for specified account and storage slots within a block range. - rpc SyncAccountStorageMaps(SyncAccountStorageMapsRequest) returns (SyncAccountStorageMapsResponse) {} - - // Returns transactions records for specific accounts within a block range. - rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} - - // Returns the query parameter limits configured for RPC methods. - // - // These define the maximum number of each parameter a method will accept. - // Exceeding the limit will result in the request being rejected and you should instead send - // multiple smaller requests. - rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} } // RPC STATUS diff --git a/proto/proto/types/blockchain.proto b/proto/proto/types/blockchain.proto index 6f53cd4f3..43828d4dc 100644 --- a/proto/proto/types/blockchain.proto +++ b/proto/proto/types/blockchain.proto @@ -7,11 +7,11 @@ import "types/primitives.proto"; // BLOCK // ================================================================================================ -// Represents a block. -message Block { - // Block data encoded using [winter_utils::Serializable] implementation for - // [miden_protocol::block::Block]. - bytes block = 1; +// Represents a signed block. +message SignedBlock { + BlockHeader header = 1; + BlockBody body = 2; + BlockSignature signature = 3; } // Represents a proposed block. diff --git a/proto/proto/types/note.proto b/proto/proto/types/note.proto index ac125daa0..ebaa64ed6 100644 --- a/proto/proto/types/note.proto +++ b/proto/proto/types/note.proto @@ -7,6 +7,16 @@ import "types/account.proto"; // NOTES // ================================================================================================ +// The type of a note. +enum NoteType { + // Unspecified note type (default value, should not be used). + NOTE_TYPE_UNSPECIFIED = 0; + // Public note - details are visible on-chain. + NOTE_TYPE_PUBLIC = 1; + // Private note - details are not visible on-chain. + NOTE_TYPE_PRIVATE = 2; +} + // Represents a note's ID. message NoteId { // A unique identifier of the note which is a 32-byte commitment to the underlying note data. @@ -24,8 +34,8 @@ message NoteMetadata { // The account which sent the note. account.AccountId sender = 1; - // The type of the note (0b01 = public, 0b10 = private, 0b11 = encrypted). - uint32 note_type = 2; + // The type of the note. + NoteType note_type = 2; // A value which can be used by the recipient(s) to identify notes intended for them. //