diff --git a/.clang-format b/.clang-format
index 0255bce9c..555606d8e 100644
--- a/.clang-format
+++ b/.clang-format
@@ -16,4 +16,4 @@ ConstructorInitializerAllOnOneLineOrOnePerLine: true
IndentCaseLabels: true
IndentWidth: 4
SpaceAfterCStyleCast: true
-Standard: c++20
+Standard: Latest
diff --git a/.clang-tidy b/.clang-tidy
index bdd70a8f7..9b98f5647 100644
--- a/.clang-tidy
+++ b/.clang-tidy
@@ -1,12 +1,10 @@
Checks: >-
boost*,
- -boost-use-ranges,
bugprone*,
-bugprone-easily-swappable-parameters,
cert*,
-cert-int09-c,
clang-analyzer*,
- -clang-analyzer-optin.cplusplus.VirtualCall,
cppcoreguidelines*,
-cppcoreguidelines-avoid-magic-numbers,
-cppcoreguidelines-owning-memory,
@@ -32,7 +30,6 @@ Checks: >-
-modernize-use-nodiscard,
-modernize-use-trailing-return-type,
-modernize-avoid-c-arrays,
- -modernize-use-ranges,
performance*,
-performance-enum-size,
portability*,
@@ -41,16 +38,6 @@ Checks: >-
-readability-identifier-length,
-readability-magic-numbers,
-readability-enum-initial-value,
+ -readability-use-concise-preprocessor-directives,
WarningsAsErrors: >-
- boost*,
- bugprone*,
- cert*,
- clang-analyzer*,
- cppcoreguidelines*,
- google*,
- hicpp*,
- misc*,
- modernize*,
- performance*,
- portability*,
- readability*,
+ *
diff --git a/.dockerignore b/.dockerignore
index 99ea8dfa7..d39f81a13 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -2,8 +2,6 @@
**/*.a
**/*.swp
**/*.d
-**/*.pb.cc
-**/*.pb.h
**/*.gcno
**/*.so
**/*.dtb
@@ -15,7 +13,7 @@
build
third-party/downloads
src/cartesi-jsonrpc-machine
-src/cartesi-merkle-tree-hash
+src/cartesi-hash-tree-hash
doc/html
doc/api.md
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 62fe73d45..95abae7ed 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -209,10 +209,6 @@ jobs:
run: |
docker run --rm -t -v cmio-templates:/tmp/cartesi-machine/tests/data cartesi/machine-emulator:tests /usr/share/cartesi-machine/tests/scripts/test-cmio.sh cartesi-jsonrpc-machine cartesi-machine lua
- - name: Run Merkle tree tests
- run: |
- docker run --rm -t ${{ github.repository_owner }}/machine-emulator:tests test-merkle-tree-hash --log2-root-size=30 --log2-leaf-size=12 --input=/usr/bin/test-merkle-tree-hash
-
- name: Run C API tests
run: |
docker run --rm -t ${{ github.repository_owner }}/machine-emulator:tests test-machine-c-api
@@ -227,7 +223,7 @@ jobs:
- name: Run test suite with microarchitecture and host based interpreters comparing machine hashes at every step
run: |
- docker run --rm -t ${{ github.repository_owner }}/machine-emulator:tests cartesi-machine-tests --concurrency=update_merkle_tree:1 --test="^rv64ui.*$" --jobs=$(nproc) run_host_and_uarch
+ docker run --rm -t ${{ github.repository_owner }}/machine-emulator:tests cartesi-machine-tests --concurrency=update_hash_tree:1 --test="^rv64ui.*$" --jobs=$(nproc) run_host_and_uarch
- name: Create uarch json logs to be used to test the Solidity based microarchitecture interpreter
run: |
@@ -377,10 +373,6 @@ jobs:
run: |
docker run --platform linux/arm64 --rm -t -v cmio-templates:/tmp/cartesi-machine/tests/data cartesi/machine-emulator:tests /usr/share/cartesi-machine/tests/scripts/test-cmio.sh cartesi-jsonrpc-machine cartesi-machine lua
- - name: Run Merkle tree tests
- run: |
- docker run --platform linux/arm64 --rm -t ${{ github.repository_owner }}/machine-emulator:tests test-merkle-tree-hash --log2-root-size=30 --log2-leaf-size=12 --input=/usr/bin/test-merkle-tree-hash
-
- name: Run C API tests
run: |
docker run --platform linux/arm64 --rm -t ${{ github.repository_owner }}/machine-emulator:tests test-machine-c-api
@@ -395,7 +387,7 @@ jobs:
- name: Run test suite with microarchitecture and host based interpreters comparing machine hashes at every step
run: |
- docker run --platform linux/arm64 --rm -t ${{ github.repository_owner }}/machine-emulator:tests cartesi-machine-tests --test="^rv64ui%-v%-add.bin$" --concurrency=update_merkle_tree:1 --jobs=$(nproc) run_host_and_uarch
+ docker run --platform linux/arm64 --rm -t ${{ github.repository_owner }}/machine-emulator:tests cartesi-machine-tests --test="^rv64ui%-v%-add.bin$" --concurrency=update_hash_tree:1 --jobs=$(nproc) run_host_and_uarch
- name: Build machine-emulator "tests" docker image
uses: docker/build-push-action@v5
@@ -529,6 +521,7 @@ jobs:
GIT_COMMIT=${GITHUB_SHA}
DEBUG=yes
COVERAGE=yes
+ THREADS=no
MACHINE_EMULATOR_VERSION=${{ env.MACHINE_EMULATOR_VERSION }}
project: ${{ vars.DEPOT_PROJECT }}
token: ${{ secrets.DEPOT_TOKEN }}
@@ -546,11 +539,12 @@ jobs:
build-args: |
DEBUG=yes
COVERAGE=yes
+ THREADS=no
MACHINE_EMULATOR_VERSION=${{ env.MACHINE_EMULATOR_VERSION }}
- name: Run coverage
run: |
- docker run --name coverage-report -t ${{ github.repository_owner }}/machine-emulator:coverage make -j1 test-save-and-load test-machine test-hash test-lua test-jsonrpc test-c-api coverage-machine test-uarch-rv64ui test-uarch-interpreter coverage-uarch coverage-report coverage=yes
+ docker run --name coverage-report -t ${{ github.repository_owner }}/machine-emulator:coverage make -j1 test-save-and-load test-machine test-lua test-jsonrpc test-c-api coverage-machine test-uarch-rv64ui test-uarch-interpreter coverage-uarch coverage-report coverage=yes
docker cp coverage-report:/usr/src/emulator/tests/build/coverage .
docker rm coverage-report
@@ -569,7 +563,7 @@ jobs:
sanitize:
name: Sanitize
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
@@ -628,17 +622,17 @@ jobs:
SANITIZE=yes
MACHINE_EMULATOR_VERSION=${{ env.MACHINE_EMULATOR_VERSION }}
- - name: MMAP rnd_bits workaround for the new GitHub ubuntu-22 runner
+ - name: MMAP rnd_bits workaround for the new GitHub Ubuntu runner
run: sudo sysctl vm.mmap_rnd_bits=28
- name: Run tests with sanitizer
run: |
- docker run --rm -t ${{ github.repository_owner }}/machine-emulator:sanitizer make sanitize=yes test-save-and-load test-machine test-hash test-lua test-jsonrpc test-c-api coverage-machine test-uarch-rv64ui test-uarch-interpreter coverage-uarch
+ docker run --rm -t ${{ github.repository_owner }}/machine-emulator:sanitizer make sanitize=yes test-save-and-load test-machine test-lua test-jsonrpc test-c-api coverage-machine test-uarch-rv64ui test-uarch-interpreter coverage-uarch
publish_artifacts:
name: Publish artifacts
needs: [build, static-analysis, coverage, sanitize, test_amd64, test_arm64]
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
steps:
- name: Checkout emulator source code
uses: actions/checkout@v4
diff --git a/.gitignore b/.gitignore
index 0772c58de..6d5078aa6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,14 +6,15 @@
*.a
*.lib
*.wasm
+*.TODO
+*.tmp
build
pkg
third-party/downloads
src/cartesi-jsonrpc-machine
-src/cartesi-merkle-tree-hash
+src/cartesi-hash-tree-hash
src/tests/test-machine-c-api
-src/tests/test-merkle-tree-hash
doc/html
doc/api.md
diff --git a/.typos.toml b/.typos.toml
index ccb29e1e3..4f2054dcd 100644
--- a/.typos.toml
+++ b/.typos.toml
@@ -9,3 +9,5 @@ stap = "stap"
wronly = "wronly"
optin = "optin"
sxl = "sxl"
+nd = "nd"
+mke = "mke"
diff --git a/Dockerfile b/Dockerfile
index 90b21a40a..f12877818 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,35 +1,17 @@
-FROM debian:bookworm-20250407 AS toolchain
+FROM debian:trixie-20250811 AS toolchain
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \
- build-essential vim wget git lcov \
- libboost1.81-dev libssl-dev libslirp-dev \
+ build-essential vim wget git gcovr \
+ libomp-19-dev libboost1.83-dev libssl-dev libslirp-dev \
ca-certificates pkg-config lua5.4 liblua5.4-dev \
- luarocks xxd procps \
- g++-12-riscv64-linux-gnu=12.2.0-13cross1 \
- gcc-riscv64-unknown-elf=12.2.0-14+11+b1 && \
+ lua-check lua-socket lua-posix lua-lpeg \
+ xxd procps unzip gosu \
+ clang-tidy clang-format \
+ g++-14-riscv64-linux-gnu=14.2.0-19cross1 \
+ gcc-riscv64-unknown-elf=14.2.0+19 && \
rm -rf /var/lib/apt/lists/*
-# Install clang 19
-RUN apt-get update && \
- DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \
- wget software-properties-common gnupg && \
- wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc && \
- add-apt-repository -y 'deb http://apt.llvm.org/bookworm/ llvm-toolchain-bookworm-19 main' && \
- add-apt-repository -y 'deb http://apt.llvm.org/bookworm/ llvm-toolchain-bookworm-19 main' && \
- apt-get update && \
- DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y \
- clang-tidy-19 clang-format-19 && \
- update-alternatives --install /usr/bin/clang-format clang-format /usr/bin/clang-format-19 120 && \
- update-alternatives --install /usr/bin/clang-tidy clang-tidy /usr/bin/clang-tidy-19 120 && \
- rm -rf /var/lib/apt/lists/*
-
-# Install lua packages
-RUN luarocks install --lua-version=5.4 luasocket && \
- luarocks install --lua-version=5.4 luasec && \
- luarocks install --lua-version=5.4 luaposix && \
- luarocks install --lua-version=5.4 luacheck
-
# Install stylua
RUN cd /tmp && \
wget https://github.com/JohnnyMorganz/StyLua/releases/download/v0.20.0/stylua-linux-`uname -m`.zip && \
@@ -44,15 +26,6 @@ RUN cd /tmp && \
# Environment has the riscv64 toolchains
ENV DEV_ENV_HAS_TOOLCHAIN=yes
-# Install su-exec
-RUN cd /tmp && \
- git clone --branch v0.2 --depth 1 https://github.com/ncopa/su-exec.git && \
- cd su-exec && \
- if [ `git rev-parse --verify HEAD` != 'f85e5bde1afef399021fbc2a99c837cf851ceafa' ]; then exit 1; fi && \
- make && \
- cp su-exec /usr/local/bin/ && \
- rm -rf /tmp/su-exec
-
# Install workaround to run as current user
COPY tools/docker-entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
@@ -69,10 +42,11 @@ FROM toolchain AS builder
ARG GIT_COMMIT=""
ARG DEBUG=no
ARG COVERAGE=no
+ARG THREADS=yes
ARG SANITIZE=no
COPY . .
-RUN make -j$(nproc) git_commit=$GIT_COMMIT debug=$DEBUG coverage=$COVERAGE sanitize=$SANITIZE
+RUN make -j$(nproc) git_commit=$GIT_COMMIT debug=$DEBUG coverage=$COVERAGE threads=$THREADS sanitize=$SANITIZE
####################################################################################################
FROM builder AS debian-packager
@@ -80,21 +54,17 @@ FROM builder AS debian-packager
RUN make install-uarch debian-package DESTDIR=$PWD/_install
####################################################################################################
-FROM debian:bookworm-20250407-slim
+FROM debian:trixie-20250811-slim
ARG TARGETARCH
-COPY --from=debian-packager \
- /usr/src/emulator/machine-emulator_${TARGETARCH}.deb \
- machine-emulator.deb
-COPY --from=debian-packager /usr/local/lib/lua /usr/local/lib/lua
-COPY --from=debian-packager /usr/local/share/lua /usr/local/share/lua
+COPY --from=debian-packager /usr/src/emulator/machine-emulator_${TARGETARCH}.deb machine-emulator.deb
RUN apt-get update && \
apt-get install -y ./machine-emulator.deb && \
rm -rf /var/lib/apt/lists/* /var/cache/apt/* machine-emulator.deb
-RUN addgroup --system --gid 102 cartesi && \
- adduser --system --uid 102 --ingroup cartesi --disabled-login --no-create-home --home /nonexistent --gecos "cartesi user" --shell /bin/false cartesi
+RUN groupadd --system --gid 102 cartesi && \
+ useradd --system --uid 102 --gid 102 --no-create-home --home /nonexistent --comment "cartesi user" --shell /bin/false cartesi
WORKDIR /opt/cartesi
diff --git a/LICENSES.md b/LICENSES.md
index 0c12e43b3..0569ab0ba 100644
--- a/LICENSES.md
+++ b/LICENSES.md
@@ -8,11 +8,11 @@ This project includes several submodules and dependencies, each with its own lic
- `tests/machine`: Licensed under the Apache License 2.0. See the license terms in [tests/machine/LICENSE](tests/machine/LICENSE).
- `tests/uarch`: Licensed under the Apache License 2.0. Licensing details are available in [tests/uarch/LICENSE](tests/uarch/LICENSE).
+- `third-party/ankerl`: Licensed under the MIT License. The license can be found at [third-party/ankerl/LICENSE](third-party/ankerl/LICENSE).
- `third-party/llvm-flang-uint128`: Licensed under the Apache License 2.0 with LLVM exceptions. The license can be found at [third-party/llvm-flang-uint128/LICENSE](third-party/llvm-flang-uint128/LICENSE).
- `third-party/riscv-arch-test`: Source code licensed under the Apache 2.0 and BSD 3-Clause licenses. Documentation under `CC-BY-4.0`. License information is provided in README.md and other COPYING.* files like [third-party/riscv-arch-test/COPYING.APACHE](third-party/riscv-arch-test/COPYING.APACHE).
- `third-party/riscv-tests`: Licensed under the BSD 3-Clause "New" or "Revised" License. See [third-party/riscv-tests/LICENSE](third-party/riscv-tests/LICENSE) for license details.
- `third-party/riscv-tests/env`: Licensed under the BSD 3-Clause "New" or "Revised" License. License details are in [third-party/riscv-tests/env/LICENSE](third-party/riscv-tests/env/LICENSE).
-- `third-party/tiny_sha3`: Licensed under the MIT License. The license can be found at [third-party/tiny_sha3/LICENSE](third-party/tiny_sha3/LICENSE).
- `third-party/nlohmann-json`: Licensed under the MIT License. The license can be found at [third-party/nlohmann-json/LICENSE.MIT](third-party/nlohmann-json/LICENSE.MIT).
## Debian Packages
diff --git a/Makefile b/Makefile
index 91ee8e864..3ec08cf1d 100644
--- a/Makefile
+++ b/Makefile
@@ -73,17 +73,17 @@ INSTALL_DIR= cp -RP
SYMLINK= ln -sf
CHMOD_EXEC= chmod 0755
-EMU_TO_BIN= src/cartesi-jsonrpc-machine src/cartesi-merkle-tree-hash
+EMU_TO_BIN= src/cartesi-jsonrpc-machine src/cartesi-hash-tree-hash
EMU_TO_LIB= src/$(LIBCARTESI_SO) src/$(LIBCARTESI_SO_JSONRPC)
EMU_TO_LIB_A= src/libcartesi.a src/libcartesi_jsonrpc.a src/libluacartesi.a src/libluacartesi_jsonrpc.a
EMU_LUA_TO_BIN= src/cartesi-machine.lua src/cartesi-machine-stored-hash.lua
-EMU_TO_LUA_PATH= src/cartesi/util.lua src/cartesi/proof.lua src/cartesi/gdbstub.lua
+EMU_TO_LUA_PATH= src/cartesi/util.lua src/cartesi/gdbstub.lua
EMU_TO_LUA_CPATH= src/cartesi.so
EMU_TO_LUA_CARTESI_CPATH= src/cartesi/jsonrpc.so
EMU_TO_INC= $(addprefix src/,jsonrpc-machine-c-api.h machine-c-api.h machine-c-version.h)
UARCH_TO_SHARE= uarch-ram.bin
-TESTS_TO_BIN= tests/build/misc/test-merkle-tree-hash tests/build/misc/test-machine-c-api
+TESTS_TO_BIN= tests/build/misc/test-machine-c-api
TESTS_LUA_TO_LUA_PATH=tests/lua/cartesi
TESTS_LUA_TO_TEST_LUA_PATH=$(wildcard tests/lua/*.lua)
TESTS_SCRIPTS_TO_TEST_SCRIPTS_PATH=$(wildcard tests/scripts/*.sh)
@@ -96,6 +96,9 @@ TESTSDIR = $(abspath tests)
DOWNLOADDIR = $(DEPDIR)/downloads
SUBCLEAN = $(addsuffix .clean,$(SRCDIR) uarch tests)
+# Pass down received UARCH_DEFS to sub-makefiles
+export UARCH_DEFS
+
# Docker image tag
TAG ?= devel
DEBIAN_IMG ?= cartesi/machine-emulator:$(TAG).deb
@@ -206,10 +209,10 @@ doc:
bundle-boost: $(DEPDIR)/downloads/boost
$(DEPDIR)/downloads/boost:
mkdir -p $(DOWNLOADDIR)
- wget -O $(DEPDIR)/downloads/boost_1_81_0.tar.gz https://archives.boost.io/release/1.81.0/source/boost_1_81_0.tar.gz
- tar -C $(DEPDIR)/downloads -xzf $(DEPDIR)/downloads/boost_1_81_0.tar.gz boost_1_81_0/boost
- mv $(DEPDIR)/downloads/boost_1_81_0/boost $(DEPDIR)/downloads/boost
- rm -rf $(DEPDIR)/downloads/boost_1_81_0.tar.gz $(DEPDIR)/downloads/boost_1_81_0
+ wget -O $(DEPDIR)/downloads/boost_1_83_0.tar.gz https://archives.boost.io/release/1.83.0/source/boost_1_83_0.tar.gz
+ tar -C $(DEPDIR)/downloads -xzf $(DEPDIR)/downloads/boost_1_83_0.tar.gz boost_1_83_0/boost
+ mv $(DEPDIR)/downloads/boost_1_83_0/boost $(DEPDIR)/downloads/boost
+ rm -rf $(DEPDIR)/downloads/boost_1_83_0.tar.gz $(DEPDIR)/downloads/boost_1_83_0
submodules:
git submodule update --init --recursive
@@ -250,22 +253,22 @@ $(SRCDIR)/interpret-jump-table.h:
@eval $$($(MAKE) -s --no-print-directory env); $(MAKE) -C $(SRCDIR) interpret-jump-table.h
build-emulator-builder-image:
- docker build $(DOCKER_PLATFORM) --build-arg DEBUG=$(debug) --build-arg COVERAGE=$(coverage) --build-arg SANITIZE=$(sanitize) --target builder -t cartesi/machine-emulator:builder -f Dockerfile .
+ docker build $(DOCKER_PLATFORM) --build-arg DEBUG=$(debug) --build-arg COVERAGE=$(coverage) --build-arg THREADS=$(threads) --build-arg SANITIZE=$(sanitize) --target builder -t cartesi/machine-emulator:builder -f Dockerfile .
build-emulator-toolchain-image build-toolchain:
docker build $(DOCKER_PLATFORM) --target toolchain -t cartesi/machine-emulator:toolchain -f Dockerfile .
build-emulator-image:
- docker build $(DOCKER_PLATFORM) --build-arg DEBUG=$(debug) --build-arg COVERAGE=$(coverage) --build-arg SANITIZE=$(sanitize) -t cartesi/machine-emulator:$(TAG) -f Dockerfile .
+ docker build $(DOCKER_PLATFORM) --build-arg DEBUG=$(debug) --build-arg COVERAGE=$(coverage) --build-arg THREADS=$(threads) --build-arg SANITIZE=$(sanitize) -t cartesi/machine-emulator:$(TAG) -f Dockerfile .
build-emulator-tests-image: build-emulator-builder-image build-emulator-image
- docker build $(DOCKER_PLATFORM) --build-arg DEBUG=$(debug) --build-arg COVERAGE=$(coverage) --build-arg SANITIZE=$(sanitize) --build-arg TAG=$(TAG) -t cartesi/machine-emulator:tests -f tests/Dockerfile .
+ docker build $(DOCKER_PLATFORM) --build-arg DEBUG=$(debug) --build-arg COVERAGE=$(coverage) --build-arg THREADS=$(threads) --build-arg SANITIZE=$(sanitize) --build-arg TAG=$(TAG) -t cartesi/machine-emulator:tests -f tests/Dockerfile .
build-emulator-tests-builder-image: build-emulator-builder-image
- docker build $(DOCKER_PLATFORM) --target tests-builder --build-arg DEBUG=$(debug) --build-arg COVERAGE=$(coverage) --build-arg SANITIZE=$(sanitize) --build-arg TAG=$(TAG) -t cartesi/machine-emulator:tests-builder -f tests/Dockerfile .
+ docker build $(DOCKER_PLATFORM) --target tests-builder --build-arg DEBUG=$(debug) --build-arg COVERAGE=$(coverage) --build-arg THREADS=$(threads) --build-arg SANITIZE=$(sanitize) --build-arg TAG=$(TAG) -t cartesi/machine-emulator:tests-builder -f tests/Dockerfile .
build-debian-package:
- docker build $(DOCKER_PLATFORM) --target debian-packager --build-arg DEBUG=$(debug) --build-arg COVERAGE=$(coverage) --build-arg SANITIZE=$(sanitize) -t $(DEBIAN_IMG) -f Dockerfile .
+ docker build $(DOCKER_PLATFORM) --target debian-packager --build-arg DEBUG=$(debug) --build-arg COVERAGE=$(coverage) --build-arg THREADS=$(threads) --build-arg SANITIZE=$(sanitize) -t $(DEBIAN_IMG) -f Dockerfile .
build-tests-debian-packages: build-emulator-builder-image
docker build $(DOCKER_PLATFORM) --target tests-debian-packager --build-arg TAG=$(TAG) -t cartesi/machine-emulator:tests-debian-packager -f tests/Dockerfile .
@@ -306,11 +309,12 @@ toolchain-env: check-toolchain
cartesi/machine-emulator:toolchain /bin/bash
toolchain-exec: check-toolchain
- @docker run --hostname toolchain --rm \
+ docker run --hostname toolchain --rm \
-e USER=$$(id -u -n) \
-e GROUP=$$(id -g -n) \
-e UID=$$(id -u) \
-e GID=$$(id -g) \
+ -e UARCH_DEFS="$(UARCH_DEFS)" \
-v `pwd`:/opt/cartesi/machine-emulator \
-w /opt/cartesi/machine-emulator \
cartesi/machine-emulator:toolchain /bin/bash -c "$(CONTAINER_COMMAND)"
diff --git a/README.md b/README.md
index b6bc506cf..c2cf67d7b 100644
--- a/README.md
+++ b/README.md
@@ -80,7 +80,7 @@ sudo apt-get update
sudo apt-get install cartesi-machine
```
-The packages provided in this APT repository are known to work with **Debian 12** (Bookworm) and **Ubuntu 24.04** (Noble).
+The packages provided in this APT repository are known to work with **Debian 13** (Trixie) and **Ubuntu 24.04** (Noble).
#### Alpine Linux
@@ -116,28 +116,28 @@ brew install cartesi-machine
##### System Requirements
-- C++ Compiler with support for C++20 (tested with GCC >= 11.x and Clang >= 14.x).
+- C++ Compiler with support for C++23 (tested with GCC >= 14.x and Clang >= 19.x).
- GNU Make >= 3.81
-- Boost >= 1.81
-- Lua >= 5.4.4 (optional, required for scripting support and interactive terminal)
+- Boost >= 1.83
+- Lua >= 5.4.6 (optional, required for scripting support and interactive terminal)
- Libslirp >= 4.6.0 (optional, required for networking support)
###### Debian Requirements
```sh
-sudo apt-get install build-essential git wget libboost1.81-dev liblua5.4-dev libslirp-dev lua5.4
+sudo apt-get install build-essential git wget libgomp-dev libboost1.83-dev liblua5.4-dev libslirp-dev lua5.4
```
###### MacPorts Requirements
```sh
-sudo port install clang boost181 wget pkgconfig lua54 libslirp
+sudo port install clang libomp boost181 wget pkgconfig lua54 libslirp
```
###### Homebrew Requirements
```sh
-brew install llvm boost wget pkg-config lua libslirp
+brew install llvm libomp boost wget pkg-config lua libslirp
```
#### Build
diff --git a/doc/Doxyfile b/doc/Doxyfile
index c7d38e32c..154b51eae 100644
--- a/doc/Doxyfile
+++ b/doc/Doxyfile
@@ -924,7 +924,7 @@ EXCLUDE_SYMLINKS = NO
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
-EXCLUDE_PATTERNS = *.pb.*
+EXCLUDE_PATTERNS =
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
diff --git a/src/.styluaignore b/src/.styluaignore
new file mode 100644
index 000000000..67d08e3a1
--- /dev/null
+++ b/src/.styluaignore
@@ -0,0 +1 @@
+cartesi/third-party/*
diff --git a/src/Makefile b/src/Makefile
index eff7a414c..595bca814 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -14,11 +14,11 @@
# with this program (see COPYING). If not, see .
#
-EMULATOR_MARCHID=19
+EMULATOR_MARCHID=20
# Every new emulator release should bump these constants
EMULATOR_VERSION_MAJOR=0
-EMULATOR_VERSION_MINOR=19
+EMULATOR_VERSION_MINOR=20
EMULATOR_VERSION_PATCH=0
EMULATOR_VERSION_LABEL=
@@ -61,8 +61,8 @@ PICCFLAGS=-fPIC
SOLDFLAGS=-dynamiclib -undefined dynamic_lookup
LIBLDFLAGS=-dynamiclib
EXELDFLAGS=
-PTHREAD_CFLAGS=
-PTHREAD_LDFLAGS=-lpthread
+PTHREAD_CFLAGS=-I/opt/local/include/libomp -fopenmp
+PTHREAD_LDFLAGS=-L/opt/local/lib/libomp -lgomp
CC=clang
CXX=clang++
AR=libtool -static -o
@@ -86,8 +86,8 @@ SLIRP_INC=-I$(BREW_PREFIX)/libslirp/include
# Macports installation
else ifneq (,$(PORT_PREFIX))
INSTALL_PREFIX=/opt/local
-BOOST_LIB_DIR=-L$(INSTALL_PREFIX)/libexec/boost/1.81/lib
-BOOST_INC=-I$(INSTALL_PREFIX)/libexec/boost/1.81/include
+BOOST_LIB_DIR=-L$(INSTALL_PREFIX)/libexec/boost/1.87/lib
+BOOST_INC=-I$(INSTALL_PREFIX)/libexec/boost/1.87/include
SLIRP_LIB=-L$(INSTALL_PREFIX)/lib -lslirp
SLIRP_INC=-I$(INSTALL_PREFIX)/include
@@ -97,7 +97,7 @@ endif
SO_EXT=dylib
LIBCARTESI_LDFLAGS=-install_name '@rpath/$(LIBCARTESI)'
-LIBCARTESI_MERKLE_TREE_LDFLAGS=-install_name '@rpath/$(LIBCARTESI_MERKLE_TREE)'
+LIBCARTESI_HASH_TREE_LDFLAGS=-install_name '@rpath/$(LIBCARTESI_HASH_TREE)'
LIBCARTESI_JSONRPC_LDFLAGS=-install_name '@rpath/$(LIBCARTESI_JSONRPC)' -Wl,-rpath,@loader_path
LUACARTESI_LDFLAGS=-install_name '@rpath/cartesi.so'
LUACARTESI_JSONRPC_LDFLAGS=-install_name '@rpath/cartesi/jsonrpc.so' -Wl,-rpath,@loader_path/..
@@ -111,8 +111,8 @@ PICCFLAGS=-fPIC
SOLDFLAGS=-shared $(PICCFLAGS) $(GCLDFLAGS)
LIBLDFLAGS=$(SOLDFLAGS) -Wl,--no-undefined
EXELDFLAGS=$(GCLDFLAGS) -Wl,--no-undefined
-PTHREAD_CFLAGS=-pthread
-PTHREAD_LDFLAGS=-pthread -lpthread
+PTHREAD_CFLAGS=-fopenmp
+PTHREAD_LDFLAGS=-fopenmp
CC=gcc
CXX=g++
AR=ar rcs
@@ -123,7 +123,7 @@ SLIRP_INC=
SLIRP_LIB=-lslirp
SO_EXT=so
LIBCARTESI_LDFLAGS=
-LIBCARTESI_MERKLE_TREE_LDFLAGS=
+LIBCARTESI_HASH_TREE_LDFLAGS=
LIBCARTESI_JSONRPC_LDFLAGS=-Wl,-rpath,'$$ORIGIN'
LUACARTESI_LDFLAGS=
LUACARTESI_JSONRPC_LDFLAGS=-Wl,-rpath,'$$ORIGIN/..'
@@ -132,15 +132,15 @@ PROFILE_DATA=
endif
LIBCARTESI=libcartesi-$(EMULATOR_VERSION_MAJOR).$(EMULATOR_VERSION_MINOR).$(SO_EXT)
-LIBCARTESI_MERKLE_TREE=libcartesi_merkle_tree-$(EMULATOR_VERSION_MAJOR).$(EMULATOR_VERSION_MINOR).$(SO_EXT)
+LIBCARTESI_HASH_TREE=libcartesi_hash_tree-$(EMULATOR_VERSION_MAJOR).$(EMULATOR_VERSION_MINOR).$(SO_EXT)
LIBCARTESI_JSONRPC=libcartesi_jsonrpc-$(EMULATOR_VERSION_MAJOR).$(EMULATOR_VERSION_MINOR).$(SO_EXT)
ifeq ($(slirp),yes)
# Workaround for building with macports lua-luarocks installation
-machine.o: INCS+=$(SLIRP_INC)
-machine.clang-tidy: INCS+=$(SLIRP_INC)
-virtio-net-carrier-slirp.o: INCS+=$(SLIRP_INC)
-virtio-net-carrier-slirp.clang-tidy: INCS+=$(SLIRP_INC)
+machine-address-ranges.o: INCS+=$(SLIRP_INC)
+machine-address-ranges.clang-tidy: INCS+=$(SLIRP_INC)
+virtio-net-user-address-range.o: INCS+=$(SLIRP_INC)
+virtio-net-user-address-range.clang-tidy: INCS+=$(SLIRP_INC)
#INCS+=$(SLIRP_INC)
LIBCARTESI_COMMON_LIBS+=$(SLIRP_LIB)
else
@@ -148,22 +148,23 @@ DEFS+=-DNO_SLIRP
endif
LIBCARTESI_LIBS=$(LIBCARTESI_COMMON_LIBS)
-LIBCARTESI_MERKLE_TREE_LIBS=
+LIBCARTESI_HASH_TREE_LIBS=
LIBCARTESI_JSONRPC_LIBS=
LUACARTESI_LIBS=$(LIBCARTESI_COMMON_LIBS)
LUACARTESI_JSONRPC_LIBS=
CARTESI_JSONRPC_MACHINE_LIBS=$(LIBCARTESI_COMMON_LIBS)
-CARTESI_MERKLE_TREE_HASH_LIBS=
+CARTESI_HASH_TREE_HASH_LIBS=
-#DEFS+= -DMT_ALL_DIRTY
+C_WARNS=-Wall -Wextra -Wpedantic
+# C_WARNS+=-Wshadow -Wconversion -Wsign-conversion -Wsign-promo -Wuseless-cast -Wformat=2
+CXX_WARNS=$(C_WARNS)
-WARNS=-Wall -Wextra -Wpedantic
-CLANG_TIDY_WARNS=-Wthread-safety -Wglobal-constructors
+CLANG_TIDY_WARNS=-Wthread-safety -Wglobal-constructors -Wundef -Wredundant-decls -Wextra-semi
# Place our include directories before the system's
INCS+= \
+ -I../third-party/ankerl \
-I../third-party/llvm-flang-uint128 \
- -I../third-party/tiny_sha3 \
-I../third-party/nlohmann-json \
-I../third-party/downloads \
$(BOOST_INC)
@@ -175,25 +176,35 @@ DEFS+=-D_FILE_OFFSET_BITS=64
DEFS+=-DJSON_HAS_FILESYSTEM=0
ifeq ($(dump),yes)
-#DEFS+=-DDUMP_ILLEGAL_INSN_EXCEPTIONS
-#DEFS+=-DDUMP_EXCEPTIONS
-#DEFS+=-DDUMP_INTERRUPTS
-DEFS+=-DDUMP_HIST
-#DEFS+=-DDUMP_MMU_EXCEPTIONS
-#DEFS+=-DDUMP_INVALID_MEM_ACCESS
-#DEFS+=-DDUMP_INVALID_CSR
-#DEFS+=-DDUMP_INSN
-#DEFS+=-DDUMP_REGS
-#DEFS+=-DDUMP_COUNTERS
+DUMP_DEFS+=-DDUMP_HASH_TREE_STATS
+DUMP_DEFS+=-DDUMP_ILLEGAL_INSN_EXCEPTIONS
+DUMP_DEFS+=-DDUMP_EXCEPTIONS
+DUMP_DEFS+=-DDUMP_INTERRUPTS
+DUMP_DEFS+=-DDUMP_MMU_EXCEPTIONS
+DUMP_DEFS+=-DDUMP_INVALID_CSR
+DUMP_DEFS+=-DDUMP_REGS
+DUMP_DEFS+=-DDUMP_INSN_HIST
+DUMP_DEFS+=-DDUMP_STATS
+DUMP_DEFS+=-DDUMP_INSN
+DUMP_DEFS+=-DDUMP_UARCH_INSN
+DUMP_DEFS+=-DDUMP_SCOPED_NOTE
+DUMP_DEFS+=-DDUMP_STATE_ACCESS
+DUMP_DEFS+=-DDUMP_UARCH_STATE_ACCESS
endif
+DEFS += $(DUMP_DEFS)
+# Pass down UARCH_DEFS to sub-makefiles
+export UARCH_DEFS += $(DUMP_DEFS)
# By default we compile in release with debug information,
# so the emulator is packaged correctly by default.
-ifeq (,$(filter yes,$(relwithdebinfo) $(release) $(debug) $(sanitize)))
+ifeq (,$(filter yes,$(relwithdebinfo) $(release) $(debug) $(coverage) $(sanitize)))
relwithdebinfo=yes
endif
-ifeq ($(relwithdebinfo),yes)
+ifeq ($(coverage),yes)
+OPTFLAGS+=-Og -g -fno-omit-frame-pointer -fno-dce -fno-inline
+DEFS+=-DCODE_COVERAGE
+else ifeq ($(relwithdebinfo),yes)
OPTFLAGS+=-O2 -g
INTERPRET_CXXFLAGS+=-DNDEBUG # disable asserts only for interpret.cpp
else ifeq ($(release),yes)
@@ -213,8 +224,8 @@ ifneq ($(git_commit),)
DEFS+=-DGIT_COMMIT='"$(git_commit)"'
endif
-# The SHA3 is third party library we always want to compile with O3
-SHA3_CFLAGS=-O3
+# Hashing libraries have special optimizations flags
+HASH_CFLAGS=-O3 -DNDEBUG -funroll-loops -fno-stack-protector
# Optimization flags for the interpreter
ifneq (,$(filter yes,$(relwithdebinfo) $(release)))
@@ -274,15 +285,13 @@ PGO_WORKLOAD=\
whetstone 25000
LINTER_IGNORE_SOURCES=
-LINTER_IGNORE_HEADERS=interpret-jump-table.h
LINTER_SOURCES=$(filter-out $(LINTER_IGNORE_SOURCES),$(strip $(wildcard *.cpp) $(wildcard *.c)))
-LINTER_HEADERS=$(filter-out $(LINTER_IGNORE_HEADERS),$(strip $(wildcard *.hpp) $(wildcard *.h)))
CLANG_TIDY=clang-tidy
CLANG_TIDY_TARGETS=$(patsubst %.cpp,%.clang-tidy,$(patsubst %.c,%.clang-tidy,$(LINTER_SOURCES)))
CLANG_FORMAT=clang-format
-CLANG_FORMAT_UARCH_FILES:=$(wildcard ../uarch/*.cpp)
+CLANG_FORMAT_UARCH_FILES:=$(wildcard ../uarch/*.cpp) $(wildcard ../uarch/*.h)
CLANG_FORMAT_UARCH_FILES:=$(filter-out %uarch-printf%,$(strip $(CLANG_FORMAT_UARCH_FILES)))
CLANG_FORMAT_FILES:=$(wildcard *.cpp) $(wildcard *.c) $(wildcard *.h) $(wildcard *.hpp) $(CLANG_FORMAT_UARCH_FILES)
CLANG_FORMAT_IGNORE_FILES:=interpret-jump-table.h
@@ -290,11 +299,10 @@ CLANG_FORMAT_FILES:=$(strip $(CLANG_FORMAT_FILES))
CLANG_FORMAT_FILES:=$(filter-out $(CLANG_FORMAT_IGNORE_FILES),$(strip $(CLANG_FORMAT_FILES)))
STYLUA=stylua
-STYLUA_FLAGS=--indent-type Spaces --collapse-simple-statement Always
+STYLUA_FLAGS=--indent-type Spaces --collapse-simple-statement Always --respect-ignores
EMPTY:=
SPACE:=$(EMPTY) $(EMPTY)
-CLANG_TIDY_HEADER_FILTER=$(CURDIR)/($(subst $(SPACE),|,$(LINTER_HEADERS)))
ifeq ($(threads),yes)
CFLAGS+=$(PTHREAD_CFLAGS)
@@ -302,22 +310,24 @@ CXXFLAGS+=$(PTHREAD_CFLAGS)
LDFLAGS+=$(PTHREAD_LDFLAGS)
else
DEFS+=-DNO_THREADS
+C_WARNS+=-Wno-unknown-pragmas
+CXX_WARNS+=-Wno-unknown-pragmas
endif
-CXXFLAGS+=$(OPTFLAGS) -std=gnu++20 -fvisibility=hidden -MMD $(PICCFLAGS) $(CC_MARCH) $(INCS) $(GCFLAGS) $(UBCFLAGS) $(DEFS) $(WARNS)
-CFLAGS+=$(OPTFLAGS) -std=gnu99 -fvisibility=hidden -MMD $(PICCFLAGS) $(CC_MARCH) $(INCS) $(GCFLAGS) $(UBCFLAGS) $(DEFS) $(WARNS)
+CXXFLAGS+=$(OPTFLAGS) -std=gnu++23 -fvisibility=hidden -MMD $(PICCFLAGS) $(CC_MARCH) $(INCS) $(GCFLAGS) $(UBCFLAGS) $(DEFS) $(CXX_WARNS)
+CFLAGS+=$(OPTFLAGS) -std=gnu99 -fvisibility=hidden -MMD $(PICCFLAGS) $(CC_MARCH) $(INCS) $(GCFLAGS) $(UBCFLAGS) $(DEFS) $(C_WARNS)
LDFLAGS+=$(UBLDFLAGS)
ifeq ($(coverage),yes)
ifeq ($(COVERAGE_TOOLCHAIN),gcc)
CC=gcc
CXX=g++
-CXXFLAGS+=-g -Og -fno-dce -fno-inline -DCODE_COVERAGE --coverage
+CXXFLAGS+=--coverage
LDFLAGS+=--coverage
else ifeq ($(COVERAGE_TOOLCHAIN),clang)
CC=clang
CXX=clang++
-CXXFLAGS+=-g -O0 -DCODE_COVERAGE -fprofile-instr-generate -fcoverage-mapping
+CXXFLAGS+=-fprofile-instr-generate -fcoverage-mapping
LDFLAGS+=-fprofile-instr-generate -fcoverage-mapping
else ifneq ($(COVERAGE_TOOLCHAIN),)
$(error invalid value for COVERAGE_TOOLCHAIN: $(COVERAGE_TOOLCHAIN))
@@ -331,84 +341,77 @@ SOLDFLAGS+=$(MYSOLDFLAGS)
LIBLDFLAGS+=$(MYLIBLDFLAGS)
EXELDFLAGS+=$(MYEXELDFLAGS)
-all: libcartesi libcartesi_merkle_tree libcartesi_jsonrpc c-api luacartesi cartesi-jsonrpc-machine hash
+all: libcartesi libcartesi_hash_tree libcartesi_jsonrpc c-api luacartesi cartesi-jsonrpc-machine hash
luacartesi: libluacartesi.a cartesi.so libluacartesi_jsonrpc.a cartesi/jsonrpc.so
jsonrpc: cartesi/jsonrpc.so cartesi-jsonrpc-machine
-hash: cartesi-merkle-tree-hash
+hash: cartesi-hash-tree-hash
-c-api: $(LIBCARTESI) $(LIBCARTESI_MERKLE_TREE) $(LIBCARTESI_JSONRPC)
+c-api: $(LIBCARTESI) $(LIBCARTESI_HASH_TREE) $(LIBCARTESI_JSONRPC)
.PHONY: all generate use clean lint format format-lua check-format check-format-lua luacartesi hash c-api compile_flags.txt
LIBCARTESI_OBJS:= \
- pma-driver.o \
- clint.o \
- clint-factory.o \
- plic.o \
- plic-factory.o \
- virtio-factory.o \
- virtio-device.o \
- virtio-console.o \
- virtio-p9fs.o \
- virtio-net.o \
- virtio-net-carrier-tuntap.o \
- virtio-net-carrier-slirp.o \
- dtb.o \
- os.o \
- htif.o \
- htif-factory.o \
- shadow-state.o \
- shadow-state-factory.o \
- shadow-pmas-factory.o \
- shadow-tlb.o \
- shadow-tlb-factory.o \
- shadow-uarch-state.o \
- shadow-uarch-state-factory.o \
- pma.o \
- machine.o \
- machine-config.o \
- json-util.o \
base64.o \
+ clint-address-range.o \
+ dtb.o \
+ hash-tree.o \
+ htif-address-range.o \
interpret.o \
- virtual-machine.o \
- uarch-machine.o \
- uarch-step.o \
- uarch-reset-state.o \
- sha3.o \
- machine-merkle-tree.o \
- pristine-merkle-tree.o \
- uarch-interpret.o \
+ json-util.o \
machine-c-api.o \
+ machine-config.o \
+ machine.o \
+ machine-address-ranges.o \
+ memory-address-range.o \
+ os.o \
+ os-mapped-memory.o \
+ os-filesystem.o \
+ plic-address-range.o \
+ back-merkle-tree.o \
+ replay-step-state-access-interop.o \
+ send-cmio-response.o \
+ keccak-256-hasher.o \
+ sha-256-hasher.o \
+ is-pristine.o \
+ uarch-pristine-hash.o \
uarch-pristine-ram.o \
uarch-pristine-state-hash.o \
- uarch-pristine-hash.o \
- send-cmio-response.o \
- replay-step-state-access-interop.o
+ uarch-reset-state.o \
+ uarch-step.o \
+ local-machine.o \
+ uarch-interpret.o \
+ virtio-address-range.o \
+ virtio-console-address-range.o \
+ virtio-p9fs-address-range.o \
+ virtio-net-address-range.o \
+ virtio-net-tuntap-address-range.o \
+ virtio-net-user-address-range.o
CARTESI_CLUA_OBJS:= \
clua.o \
- clua-i-virtual-machine.o
+ clua-i-machine.o \
+ uarch-pristine-ram.o \
+ uarch-pristine-state-hash.o \
+ uarch-pristine-hash.o
LUACARTESI_OBJS:= \
clua-cartesi.o \
$(CARTESI_CLUA_OBJS)
-LIBCARTESI_MERKLE_TREE_OBJS:= \
- sha3.o \
- machine-merkle-tree.o \
+LIBCARTESI_HASH_TREE_OBJS:= \
+ keccak-256-hasher.o \
+ sha-256-hasher.o \
+ is-pristine.o \
back-merkle-tree.o \
- pristine-merkle-tree.o \
- complete-merkle-tree.o \
- full-merkle-tree.o
-CARTESI_MERKLE_TREE_HASH_OBJS:= \
- merkle-tree-hash.o
+CARTESI_HASH_TREE_HASH_OBJS:= \
+ hash-tree-hash.o
LIBCARTESI_JSONRPC_OBJS:= \
- jsonrpc-virtual-machine.o \
+ jsonrpc-machine.o \
os.o \
jsonrpc-machine-c-api.o \
base64.o \
@@ -426,7 +429,7 @@ CARTESI_JSONRPC_MACHINE_OBJS:= \
ifeq ($(gperf),yes)
DEFS+=-DGPERF
LIBCARTESI_LIBS+=-lprofiler
-LIBCARTESI_MERKLE_TREE_LIBS+=-lprofiler
+LIBCARTESI_HASH_TREE_LIBS+=-lprofiler
LIBCARTESI_JSONRPC_LIBS+=-lprofiler
LUACARTESI_LIBS+=-lprofiler
LUACARTESI_JSONRPC_LIBS+=-lprofiler
@@ -439,10 +442,10 @@ so-version:
@echo $(EMULATOR_VERSION_MAJOR).$(EMULATOR_VERSION_MINOR)
libcartesi: libcartesi.a libcartesi.$(SO_EXT)
-libcartesi.$(SO_EXT): $(LIBCARTESI) $(LIBCARTESI_MERKLE_TREE)
+libcartesi.$(SO_EXT): $(LIBCARTESI) $(LIBCARTESI_HASH_TREE)
ln -sf $< $@
-libcartesi_merkle_tree: libcartesi_merkle_tree.a libcartesi_merkle_tree.$(SO_EXT)
-libcartesi_merkle_tree.$(SO_EXT): $(LIBCARTESI_MERKLE_TREE)
+libcartesi_hash_tree: libcartesi_hash_tree.a libcartesi_hash_tree.$(SO_EXT)
+libcartesi_hash_tree.$(SO_EXT): $(LIBCARTESI_HASH_TREE)
ln -sf $< $@
libcartesi_jsonrpc: libcartesi_jsonrpc.a libcartesi_jsonrpc.$(SO_EXT)
libcartesi_jsonrpc.$(SO_EXT): $(LIBCARTESI_JSONRPC)
@@ -451,7 +454,7 @@ libcartesi_jsonrpc.$(SO_EXT): $(LIBCARTESI_JSONRPC)
libcartesi.a: $(LIBCARTESI_OBJS)
$(AR) $@ $^
-libcartesi_merkle_tree.a: $(LIBCARTESI_MERKLE_TREE_OBJS)
+libcartesi_hash_tree.a: $(LIBCARTESI_HASH_TREE_OBJS)
$(AR) $@ $^
libcartesi_jsonrpc.a: $(LIBCARTESI_JSONRPC_OBJS)
@@ -463,8 +466,8 @@ libluacartesi.a: $(LUACARTESI_OBJS)
libluacartesi_jsonrpc.a: $(LUACARTESI_JSONRPC_OBJS)
$(AR) $@ $^
-$(LIBCARTESI_MERKLE_TREE): $(LIBCARTESI_MERKLE_TREE_OBJS)
- $(CXX) -o $@ $^ $(LIBCARTESI_MERKLE_TREE_LIBS) $(LDFLAGS) $(LIBCARTESI_MERKLE_TREE_LDFLAGS) $(LIBLDFLAGS)
+$(LIBCARTESI_HASH_TREE): $(LIBCARTESI_HASH_TREE_OBJS)
+ $(CXX) -o $@ $^ $(LIBCARTESI_HASH_TREE_LIBS) $(LDFLAGS) $(LIBCARTESI_HASH_TREE_LDFLAGS) $(LIBLDFLAGS)
$(LIBCARTESI): $(LIBCARTESI_OBJS)
$(CXX) -o $@ $^ $(LIBCARTESI_LIBS) $(LDFLAGS) $(LIBCARTESI_LDFLAGS) $(LIBLDFLAGS)
@@ -527,8 +530,8 @@ $(PROFILE_DATA):
llvm-profdata merge -output=default.profdata default*.profraw
endif
-cartesi-merkle-tree-hash: $(CARTESI_MERKLE_TREE_HASH_OBJS) libcartesi_merkle_tree.a
- $(CXX) -o $@ $^ $(CARTESI_MERKLE_TREE_HASH_LIBS) $(LDFLAGS) $(EXELDFLAGS)
+cartesi-hash-tree-hash: $(CARTESI_HASH_TREE_HASH_OBJS) libcartesi_hash_tree.a
+ $(CXX) -o $@ $^ $(CARTESI_HASH_TREE_HASH_LIBS) $(LDFLAGS) $(EXELDFLAGS)
cartesi-jsonrpc-machine: $(CARTESI_JSONRPC_MACHINE_OBJS) libcartesi_jsonrpc.a libcartesi.a
$(CXX) -o $@ $^ $(CARTESI_JSONRPC_MACHINE_LIBS) $(LDFLAGS) $(EXELDFLAGS)
@@ -547,17 +550,15 @@ jsonrpc-discover.cpp: jsonrpc-discover.json
echo '} // namespace cartesi' >> jsonrpc-discover.cpp
%.clang-tidy: %.cpp machine-c-version.h interpret-jump-table.h
- @$(CLANG_TIDY) --header-filter='$(CLANG_TIDY_HEADER_FILTER)' $(CLANG_TIDY_FLAGS) $< -- $(CXXFLAGS) $(CLANG_TIDY_WARNS) $(LUA_INC) -DCLANG_TIDY_LINT 2>/dev/null
+ @$(CLANG_TIDY) $(CLANG_TIDY_FLAGS) $< -- $(CXXFLAGS) $(CLANG_TIDY_WARNS) $(LUA_INC) -DCLANG_TIDY_LINT 2>/dev/null
@$(CXX) $(CXXFLAGS) $(LUA_INC) $< -MM -MT $@ -MF $@.d > /dev/null 2>&1
@touch $@
%.clang-tidy: %.c
- @$(CLANG_TIDY) --header-filter='$(CLANG_TIDY_HEADER_FILTER)' $(CLANG_TIDY_FLAGS) $< -- $(CFLAGS) $(CLANG_TIDY_WARNS) -DCLANG_TIDY_LINT 2>/dev/null
+ @$(CLANG_TIDY) $(CLANG_TIDY_FLAGS) $< -- $(CFLAGS) $(CLANG_TIDY_WARNS) -DCLANG_TIDY_LINT 2>/dev/null
@$(CC) $(CFLAGS) $< -MM -MT $@ -MF $@.d > /dev/null 2>&1
@touch $@
-sha3.o: ../third-party/tiny_sha3/sha3.c
- $(CC) $(CFLAGS) $(SHA3_CFLAGS) -c -o $@ $<
uarch-pristine-ram.o: $(UARCH_PRISTINE_RAM_C)
$(CC) $(CFLAGS) -c -o $@ $<
@@ -571,6 +572,15 @@ interpret-jump-table.h: ../tools/gen-interpret-jump-table.lua
interpret.o: interpret.cpp machine-c-version.h interpret-jump-table.h
$(CXX) $(CXXFLAGS) $(INTERPRET_CXXFLAGS) -c -o $@ $<
+keccak-256-hasher.o: keccak-256-hasher.cpp
+ $(CXX) $(CXXFLAGS) $(HASH_CFLAGS) -c -o $@ $<
+
+sha-256-hasher.o: sha-256-hasher.cpp
+ $(CXX) $(CXXFLAGS) $(HASH_CFLAGS) -c -o $@ $<
+
+is-pristine.o: is-pristine.cpp
+ $(CXX) $(CXXFLAGS) $(HASH_CFLAGS) -c -o $@ $<
+
%.o: %.cpp machine-c-version.h
$(CXX) $(CXXFLAGS) -c -o $@ $<
@@ -603,7 +613,7 @@ clean-libcartesi: clean-objs
@rm -f *.so *.a cartesi/*.so *.dylib
clean-executables:
- @rm -f cartesi-jsonrpc-machine cartesi-merkle-tree-hash compute-uarch-pristine-hash
+ @rm -f cartesi-jsonrpc-machine cartesi-hash-tree-hash compute-uarch-pristine-hash
clean-coverage:
@rm -f *.profdata *.profraw *.gcda *.gcov coverage.info coverage.txt
diff --git a/src/access-log.h b/src/access-log.h
index 97d439a77..0c1b126b8 100644
--- a/src/access-log.h
+++ b/src/access-log.h
@@ -21,9 +21,7 @@
/// \brief State access log implementation
#include
-#include
#include
-#include
#include
#include
#include
@@ -32,9 +30,12 @@
#include
+#include "assert-printf.h"
#include "bracket-note.h"
+#include "hash-tree-constants.h"
+#include "hash-tree.h"
#include "machine-c-api.h"
-#include "machine-merkle-tree.h"
+#include "machine-hash.h"
#include "strict-aliasing.h"
namespace cartesi {
@@ -54,12 +55,12 @@ static inline void set_word_access_data(uint64_t w, access_data &ad) {
ad.insert(ad.end(), p, p + sizeof(w));
}
-static inline void replace_word_access_data(uint64_t w, access_data &ad, int offset = 0) {
+static inline void replace_word_access_data(uint64_t w, access_data &ad, uint64_t offset = 0) {
assert(ad.size() >= offset + sizeof(uint64_t));
aliased_aligned_write(ad.data() + offset, w);
}
-static inline uint64_t get_word_access_data(const access_data &ad, int offset = 0) {
+static inline uint64_t get_word_access_data(const access_data &ad, uint64_t offset = 0) {
assert(ad.size() >= offset + sizeof(uint64_t));
return aliased_aligned_read(ad.data() + offset);
}
@@ -67,12 +68,9 @@ static inline uint64_t get_word_access_data(const access_data &ad, int offset =
/// \brief Records an access to the machine state
class access {
- using hasher_type = machine_merkle_tree::hasher_type;
-
public:
- using hash_type = machine_merkle_tree::hash_type;
- using sibling_hashes_type = std::vector;
- using proof_type = machine_merkle_tree::proof_type;
+ using proof_type = hash_tree::proof_type;
+ using sibling_hashes_type = hash_tree::sibling_hashes_type;
void set_type(access_type type) {
m_type = type;
@@ -143,43 +141,43 @@ class access {
/// \brief Sets hash of data that was written at address after access.
/// \param hash Hash of new data at address.
- void set_written_hash(const hash_type &hash) {
+ void set_written_hash(const machine_hash &hash) {
m_written_hash = hash;
}
/// \brief Gets hash of data that was written at address after access.
/// \returns Hash of written data at address.
- const std::optional &get_written_hash() const {
+ const std::optional &get_written_hash() const {
return m_written_hash;
}
- std::optional &get_written_hash() {
+ std::optional &get_written_hash() {
return m_written_hash;
}
/// \brief Sets hash of data that can be read at address before access.
/// \param hash Hash of data at address.
- void set_read_hash(const hash_type &hash) {
+ void set_read_hash(const machine_hash &hash) {
m_read_hash = hash;
}
/// \brief Gets hash of data that can be read at address before access.
/// \returns Hash of data at address.
- const hash_type &get_read_hash() const {
+ const machine_hash &get_read_hash() const {
return m_read_hash;
}
- hash_type &get_read_hash() {
+ machine_hash &get_read_hash() {
return m_read_hash;
}
/// \brief Constructs a proof using this access' data and a given root hash.
/// \param root_hash Hash to be used as the root of the proof.
/// \return The corresponding proof
- proof_type make_proof(const hash_type root_hash) const {
- // the access can be of data smaller than the merkle tree word size
- // however, the proof must be at least as big as the merkle tree word size
- const int proof_log2_size = std::max(m_log2_size, machine_merkle_tree::get_log2_word_size());
- // the proof address is the access address aligned to the merkle tree word size
- const uint64_t proof_address = m_address & ~(machine_merkle_tree::get_word_size() - 1);
+ proof_type make_proof(const machine_hash root_hash) const {
+ // the access can be of data smaller than the hash tree word size
+ // however, the proof must be at least as big as the hash tree word size
+ const int proof_log2_size = std::max(m_log2_size, HASH_TREE_LOG2_WORD_SIZE);
+ // the proof address is the access address aligned to the hash tree word size
+ const uint64_t proof_address = m_address & ~(HASH_TREE_WORD_SIZE - 1);
if (!m_sibling_hashes.has_value()) {
throw std::runtime_error("can't make proof if access doesn't have sibling hashes");
}
@@ -218,9 +216,9 @@ class access {
uint64_t m_address{0}; ///< Address of access
int m_log2_size{0}; ///< Log2 of size of access
std::optional m_read; ///< Data before access
- hash_type m_read_hash{}; ///< Hash of data before access
+ machine_hash m_read_hash{}; ///< Hash of data before access
std::optional m_written; ///< Written data
- std::optional m_written_hash; ///< Hash of written data
+ std::optional m_written_hash; ///< Hash of written data
std::optional m_sibling_hashes; ///< Hashes of siblings in path from address to root
};
@@ -258,13 +256,14 @@ class access_log {
};
private:
- std::vector m_accesses; ///< List of all accesses
- std::vector m_brackets; ///< Begin/End annotations
- std::vector m_notes; ///< Per-access annotations
- type m_log_type; ///< Log type
+ std::vector m_accesses; ///< List of all accesses
+ std::vector m_brackets; ///< Begin/End annotations
+ std::vector m_notes; ///< Per-access annotations
+ type m_log_type; ///< Log type
+ std::vector::size_type m_outstanding_ends; ///< Number of outstanding unmatched end brackets
public:
- explicit access_log(type log_type) : m_log_type(log_type) {
+ explicit access_log(type log_type) : m_log_type(log_type), m_outstanding_ends{0} {
;
}
@@ -273,8 +272,16 @@ class access_log {
m_accesses(std::forward(accesses)),
m_brackets(std::forward(brackets)),
m_notes(std::forward(notes)),
- m_log_type(log_type) {
- ;
+ m_log_type(log_type),
+ m_outstanding_ends(0) {
+ for (const auto &b : m_brackets) {
+ if (b.type == bracket_type::begin) {
+ ++m_outstanding_ends;
+ }
+ if (b.type == bracket_type::end && m_outstanding_ends > 0) {
+ --m_outstanding_ends;
+ }
+ };
}
/// \brief Clear the log
@@ -282,28 +289,43 @@ class access_log {
m_accesses.clear();
m_notes.clear();
m_brackets.clear();
+ m_outstanding_ends = 0;
}
/// \brief Adds a bracket annotation to the log (if the log type includes annotations)
/// \param type Bracket type
/// \param text Annotation contents
- void push_bracket(bracket_type type, const char *text) {
+ void push_begin_bracket(const char *text) {
if (m_log_type.has_annotations()) {
- if (type == bracket_type::begin) {
- // make sure we have room for end bracket as well. that way,
- // unless the user use unbalanced brackets, there is no way we
- // would throw an exception for lack of memory on and end bracket
- m_brackets.reserve(m_brackets.size() + 2);
+ // Increment number of outstanding end brackets we are expecting
+ ++m_outstanding_ends;
+ // Make sure we have room for the matching end bracket as well.
+ // That way, unless the user is messing with unbalanced brackets, there is no way we
+ // would throw an exception for lack of memory on the matching end bracket
+ m_brackets.push_back(bracket_note{.type = bracket_type::begin, .where = m_accesses.size(), .text = text});
+ m_brackets.reserve(m_brackets.size() + m_outstanding_ends);
+ }
+ }
+
+ void push_end_bracket(const char *text) noexcept {
+ if (m_log_type.has_annotations()) {
+ // If we failed to push, it was because the system is completely screwed anyway *and* the
+ // user is using unbalanced brackets. Therefore, it's OK to quietly ignore the error.
+ try {
+ m_brackets.push_back(bracket_note{.type = bracket_type::end, .where = m_accesses.size(), .text = text});
+ } catch (...) { // NOLINT(bugprone-empty-catch)
+ }
+ // Decrement number of outstanding end brackets we are expecting
+ if (m_outstanding_ends > 0) {
+ --m_outstanding_ends;
}
- m_brackets.push_back(bracket_note{.type = type, .where = m_accesses.size(), .text = text});
}
}
/// \brief Adds a new access to the log
/// \tparam A Type of access
/// \param a Access object
- /// \param text Annotation contents (added if the log
- /// type includes annotations, ignored otherwise)
+ /// \param text Annotation contents (added if the log type includes annotations, ignored otherwise)
template
void push_access(A &&a, const char *text) {
m_accesses.push_back(std::forward(a));
diff --git a/src/address-range-constants.h b/src/address-range-constants.h
new file mode 100644
index 000000000..f09a7d47c
--- /dev/null
+++ b/src/address-range-constants.h
@@ -0,0 +1,85 @@
+// Copyright Cartesi and individual authors (see AUTHORS)
+// SPDX-License-Identifier: LGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify it under
+// the terms of the GNU Lesser General Public License as published by the Free
+// Software Foundation, either version 3 of the License, or (at your option) any
+// later version.
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+// PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License along
+// with this program (see COPYING). If not, see .
+//
+
+#ifndef ADDRESS_RANGE_CONSTANTS_H
+#define ADDRESS_RANGE_CONSTANTS_H
+
+#include
+
+#include "address-range-defines.h"
+
+namespace cartesi {
+
+/// \brief Fixed address ranges.
+enum AR_ranges : uint64_t {
+ AR_SHADOW_STATE_START = EXPAND_UINT64_C(AR_SHADOW_STATE_START_DEF), ///< Start of shadow state range
+ AR_SHADOW_STATE_LENGTH = EXPAND_UINT64_C(AR_SHADOW_STATE_LENGTH_DEF), ///< Length of shadow state range
+ AR_SHADOW_REGISTERS_START = EXPAND_UINT64_C(AR_SHADOW_REGISTERS_START_DEF), ///< Start of shadow registers range
+ AR_SHADOW_REGISTERS_LENGTH = EXPAND_UINT64_C(AR_SHADOW_REGISTERS_LENGTH_DEF), ///< Length of shadow registers range
+ AR_SHADOW_REVERT_ROOT_HASH_START =
+ EXPAND_UINT64_C(AR_SHADOW_REVERT_ROOT_HASH_START_DEF), ///< Start of revert root hash range
+ AR_SHADOW_TLB_START = EXPAND_UINT64_C(AR_SHADOW_TLB_START_DEF), ///< Start of shadow TLB range
+ AR_SHADOW_TLB_LENGTH = EXPAND_UINT64_C(AR_SHADOW_TLB_LENGTH_DEF), ///< Length of shadow TLB range
+ AR_PMAS_START = EXPAND_UINT64_C(AR_PMAS_START_DEF), ///< Start of PMAS list range
+ AR_PMAS_LENGTH = EXPAND_UINT64_C(AR_PMAS_LENGTH_DEF), ///< Length of PMAS list range
+ AR_DTB_START = EXPAND_UINT64_C(AR_DTB_START_DEF), ///< Start of DTB range
+ AR_DTB_LENGTH = EXPAND_UINT64_C(AR_DTB_LENGTH_DEF), ///< Length of DTB range
+ AR_SHADOW_UARCH_STATE_START =
+ EXPAND_UINT64_C(AR_SHADOW_UARCH_STATE_START_DEF), ///< Start of uarch shadow state range
+ AR_SHADOW_UARCH_STATE_LENGTH =
+ EXPAND_UINT64_C(AR_SHADOW_UARCH_STATE_LENGTH_DEF), ///< Length of uarch shadow state range
+ AR_CLINT_START = EXPAND_UINT64_C(AR_CLINT_START_DEF), ///< Start of CLINT range
+ AR_CLINT_LENGTH = EXPAND_UINT64_C(AR_CLINT_LENGTH_DEF), ///< Length of CLINT range
+ AR_PLIC_START = EXPAND_UINT64_C(AR_PLIC_START_DEF), ///< Start of PLIC range
+ AR_PLIC_LENGTH = EXPAND_UINT64_C(AR_PLIC_LENGTH_DEF), ///< Length of PLIC range
+ AR_HTIF_START = EXPAND_UINT64_C(AR_HTIF_START_DEF), ///< Start of HTIF range
+ AR_HTIF_LENGTH = EXPAND_UINT64_C(AR_HTIF_LENGTH_DEF), ///< Length of HTIF range
+ AR_UARCH_RAM_START = EXPAND_UINT64_C(AR_UARCH_RAM_START_DEF), ///< Start of uarch RAM range
+ AR_UARCH_RAM_LENGTH = EXPAND_UINT64_C(AR_UARCH_RAM_LENGTH_DEF), ///< Length of uarch RAM range
+ AR_CMIO_RX_BUFFER_START = EXPAND_UINT64_C(AR_CMIO_RX_BUFFER_START_DEF), ///< Start of CMIO RX buffer range
+ AR_CMIO_RX_BUFFER_LOG2_SIZE = EXPAND_UINT64_C(AR_CMIO_RX_BUFFER_LOG2_SIZE_DEF), ///< Log2 of CMIO RX buffer range
+ AR_CMIO_RX_BUFFER_LENGTH = (UINT64_C(1) << AR_CMIO_RX_BUFFER_LOG2_SIZE_DEF), ///< Length of CMIO RX buffer range
+ AR_CMIO_TX_BUFFER_START = EXPAND_UINT64_C(AR_CMIO_TX_BUFFER_START_DEF), ///< Start of CMIO TX buffer range
+ AR_CMIO_TX_BUFFER_LOG2_SIZE = EXPAND_UINT64_C(AR_CMIO_TX_BUFFER_LOG2_SIZE_DEF), ///< Log2 of CMIO TX buffer range
+ AR_CMIO_TX_BUFFER_LENGTH = (UINT64_C(1) << AR_CMIO_TX_BUFFER_LOG2_SIZE_DEF), ///< Length of CMIO TX buffer range
+ AR_DRIVE_START = EXPAND_UINT64_C(AR_DRIVE_START_DEF), ///< Start address for flash drive ranges
+ AR_DRIVE_OFFSET = EXPAND_UINT64_C(AR_DRIVE_OFFSET_DEF), ///< Offset for extra flash drive ranges
+
+ AR_FIRST_VIRTIO_START = EXPAND_UINT64_C(AR_FIRST_VIRTIO_START_DEF), ///< Start of first VIRTIO range
+ AR_VIRTIO_LENGTH = EXPAND_UINT64_C(AR_VIRTIO_LENGTH_DEF), ///< Length of each VIRTIO range
+ AR_LAST_VIRTIO_END = EXPAND_UINT64_C(AR_LAST_VIRTIO_END_DEF), ///< End of last VIRTIO range
+
+ AR_RAM_START = EXPAND_UINT64_C(AR_RAM_START_DEF), ///< Start of RAM range
+};
+
+static_assert(AR_SHADOW_STATE_LENGTH >= AR_SHADOW_REGISTERS_LENGTH + AR_SHADOW_TLB_LENGTH);
+static_assert(AR_SHADOW_TLB_START == AR_SHADOW_REGISTERS_START + AR_SHADOW_REGISTERS_LENGTH);
+static_assert(AR_SHADOW_STATE_START == AR_SHADOW_REGISTERS_START);
+
+/// \brief PMA constants.
+enum AR_constants : uint64_t {
+ AR_LOG2_PAGE_SIZE = EXPAND_UINT64_C(AR_LOG2_PAGE_SIZE_DEF), ///< Log2 of physical memory page size.
+ AR_PAGE_SIZE = (UINT64_C(1) << AR_LOG2_PAGE_SIZE_DEF), ///< Physical memory page size.
+};
+
+/// \brief PMA masks.
+enum AR_masks : uint64_t {
+ AR_ADDRESSABLE_MASK = ((UINT64_C(1) << 56) - 1) ///< Mask for addressable ranges.
+};
+
+} // namespace cartesi
+
+#endif
diff --git a/src/address-range-defines.h b/src/address-range-defines.h
new file mode 100644
index 000000000..c4366c278
--- /dev/null
+++ b/src/address-range-defines.h
@@ -0,0 +1,62 @@
+// Copyright Cartesi and individual authors (see AUTHORS)
+// SPDX-License-Identifier: LGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify it under
+// the terms of the GNU Lesser General Public License as published by the Free
+// Software Foundation, either version 3 of the License, or (at your option) any
+// later version.
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+// PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License along
+// with this program (see COPYING). If not, see .
+//
+
+#ifndef AR_DEFINES_H
+#define AR_DEFINES_H
+
+// NOLINTBEGIN(cppcoreguidelines-macro-usage,cppcoreguidelines-macro-to-enum,modernize-macro-to-enum)
+#define AR_SHADOW_STATE_START_DEF 0x0 ///< Shadow start address
+#define AR_SHADOW_STATE_LENGTH_DEF 0x8000 ///< Shadow length in bytes
+#define AR_SHADOW_REGISTERS_START_DEF 0x0 ///< Shadow registers start address
+#define AR_SHADOW_REGISTERS_LENGTH_DEF 0x1000 ///< Shadow registers length in bytes
+#define AR_SHADOW_REVERT_ROOT_HASH_START_DEF 0xfe0 ///< Shadow revert root hash start address
+#define AR_SHADOW_TLB_START_DEF 0x1000 ///< Shadow TLB start address
+#define AR_SHADOW_TLB_LENGTH_DEF 0x6000 ///< Shadow TLB length in bytes
+#define AR_PMAS_START_DEF 0x10000 ///< PMA Array start address
+#define AR_PMAS_LENGTH_DEF 0x1000 ///< PMA Array length in bytes
+#define AR_SHADOW_UARCH_STATE_START_DEF 0x400000 ///< microarchitecture shadow state start address
+#define AR_SHADOW_UARCH_STATE_LENGTH_DEF 0x1000 ///< microarchitecture shadow state length
+#define AR_UARCH_RAM_START_DEF 0x600000 ///< microarchitecture RAM start address
+#define AR_UARCH_RAM_LENGTH_DEF 0x200000 ///< microarchitecture RAM length
+#define AR_CLINT_START_DEF 0x2000000 ///< CLINT start address
+#define AR_CLINT_LENGTH_DEF 0xC0000 ///< CLINT length in bytes
+#define AR_PLIC_START_DEF 0x40100000 ///< Start of PLIC range
+#define AR_PLIC_LENGTH_DEF 0x00400000 ///< Length of PLIC range
+#define AR_HTIF_START_DEF 0x40008000 ///< HTIF base address (to_host)
+#define AR_HTIF_LENGTH_DEF 0x1000 ///< HTIF length in bytes
+#define AR_FIRST_VIRTIO_START_DEF 0x40010000 ///< Start of first VIRTIO range
+#define AR_VIRTIO_LENGTH_DEF 0x1000 ///< Length of each VIRTIO range
+#define AR_LAST_VIRTIO_END_DEF 0x40020000 ///< End of last VIRTIO range
+#define AR_DTB_START_DEF 0x7ff00000 ///< DTB start address
+#define AR_DTB_LENGTH_DEF 0x100000 ///< DTB length in bytes
+#define AR_CMIO_RX_BUFFER_START_DEF 0x60000000 ///< CMIO RX buffer start address
+#define AR_CMIO_RX_BUFFER_LOG2_SIZE_DEF 21 ///< log2 of CMIO RX buffer length in bytes
+#define AR_CMIO_TX_BUFFER_START_DEF 0x60800000 ///< CMIO TX buffer start address
+#define AR_CMIO_TX_BUFFER_LOG2_SIZE_DEF 21 ///< log2 of CMIO TX buffer length in bytes
+#define AR_DRIVE_START_DEF 0x80000000000000 ///< Start PMA address for flash drives
+#define AR_DRIVE_OFFSET_DEF 0x10000000000000 ///< PMA offset for extra flash drives
+
+#define AR_RAM_START_DEF 0x80000000 ///< RAM start address
+
+#define AR_LOG2_PAGE_SIZE_DEF 12 ///< log2 of physical memory page size.
+
+// helper for using UINT64_C with defines
+#ifndef EXPAND_UINT64_C
+#define EXPAND_UINT64_C(a) UINT64_C(a)
+#endif
+
+// NOLINTEND(cppcoreguidelines-macro-usage,cppcoreguidelines-macro-to-enum,modernize-macro-to-enum)
+#endif /* end of include guard: AR_DEFINES_H */
diff --git a/src/machine-memory-range-descr.h b/src/address-range-description.h
similarity index 74%
rename from src/machine-memory-range-descr.h
rename to src/address-range-description.h
index 787275b9d..325425b98 100644
--- a/src/machine-memory-range-descr.h
+++ b/src/address-range-description.h
@@ -14,8 +14,8 @@
// with this program (see COPYING). If not, see .
//
-#ifndef MACHINE_MEMORY_RANGE_DESCR_H
-#define MACHINE_MEMORY_RANGE_DESCR_H
+#ifndef ADDRESS_RANGE_DESCRIPTION_H
+#define ADDRESS_RANGE_DESCRIPTION_H
#include
#include
@@ -23,15 +23,15 @@
namespace cartesi {
-/// \brief Description of memory range used for introspection (i.e., get_memory_ranges())
-struct machine_memory_range_descr {
+/// \brief Description of an address range used for introspection (i.e., get_address_ranges())
+struct address_range_description {
uint64_t start = 0; ///< Start of memory range
uint64_t length = 0; ///< Length of memory range
std::string description; ///< User-friendly description for memory range
};
-/// \brief List of memory range descriptions used for introspection (i.e., get_memory_ranges())
-using machine_memory_range_descrs = std::vector;
+/// \brief List of address range descriptions used for introspection (i.e., get_address_ranges())
+using address_range_descriptions = std::vector;
} // namespace cartesi
diff --git a/src/address-range.h b/src/address-range.h
new file mode 100644
index 000000000..b6c82cec7
--- /dev/null
+++ b/src/address-range.h
@@ -0,0 +1,394 @@
+// Copyright Cartesi and individual authors (see AUTHORS)
+// SPDX-License-Identifier: LGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify it under
+// the terms of the GNU Lesser General Public License as published by the Free
+// Software Foundation, either version 3 of the License, or (at your option) any
+// later version.
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+// PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License along
+// with this program (see COPYING). If not, see .
+//
+
+#ifndef ADDRESS_RANGE_H
+#define ADDRESS_RANGE_H
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "address-range-constants.h"
+#include "assert-printf.h"
+#include "i-device-state-access.h"
+#include "interpret.h"
+#include "pmas-constants.h"
+#include "pmas.h"
+
+#ifndef MICROARCHITECTURE
+#include "i-dense-hash-tree.h"
+#include "i-dirty-page-tree.h"
+#endif
+
+namespace cartesi {
+
+/// \file
+/// \brief Physical address range
+
+/// \brief Physical Address Range.
+/// \details The target's physical address layout is described by an array of specializations of such ranges.
+class address_range {
+
+ std::array m_description; ///< Description of address range for use in error messages.
+ uint64_t m_start; ///< Target physical address where range starts.
+ uint64_t m_end; ///< Target physical address where range ends.
+ pmas_flags m_flags; ///< Physical memory attribute flags for range.
+
+public:
+ /// \brief Noexcept constexpr constructor for empty ranges with description
+ /// \detail Can be used to initialize a constexpr empty range
+ template
+ explicit constexpr address_range(const char (&description)[N]) noexcept :
+ m_description{},
+ m_start{0},
+ m_end{0},
+ m_flags{} {
+ for (unsigned i = 0; i < std::min(N, m_description.size() - 1); ++i) {
+ m_description[i] = description[i];
+ }
+ }
+
+ // NOLINTNEXTLINE(hicpp-use-equals-default,modernize-use-equals-default)
+ constexpr virtual ~address_range() {} // = default; // doesn't work due to bug in gcc
+
+ template
+ [[noreturn]]
+ static void ABRTF(ABRT abrt, const char (&fmt)[N], ARGS... args) {
+ char buf[256]{};
+ std::ignore = snprintf(buf, std::size(buf), fmt, args...);
+ abrt(buf);
+ __builtin_trap();
+ }
+
+ /// \brief Constructor
+ /// \tparam ABRT type of function used to abort and report errors
+ /// \param description Description of address range for use in error messages (will be copied)
+ /// \param start Target physical address where range starts
+ /// \param length Length of range, in bytes
+ /// \param f Physical memory attribute flags for range
+ template
+ constexpr address_range(const char *description, uint64_t start, uint64_t length, const pmas_flags &flags,
+ ABRT abrt) :
+ m_description{},
+ m_start{start},
+ m_end{start + length},
+ m_flags{flags} {
+ // Non-empty description is mandatory
+ if (description == nullptr || *description == '\0') {
+ ABRTF(abrt, "address range 0x%" PRIx64 ":0x%" PRIx64 " has empty description", start, length);
+ }
+ for (unsigned i = 0; i < m_description.size() - 1 && description[i] != '\0'; ++i) {
+ m_description[i] = description[i];
+ }
+ // End = start + length cannot overflow
+ if (start >= UINT64_MAX - length) {
+ ABRTF(abrt, "0x%" PRIx64 ":0x%" PRIx64 " is out of bounds when initializing %s", start, length,
+ description);
+ }
+ // All address ranges must be page-aligned
+ if ((m_start & ~PMA_ISTART_START_MASK) != 0) {
+ ABRTF(abrt, "start of %s (0x%" PRIx64 ") must be aligned to page boundary (every %" PRId64 " bytes)",
+ description, start, AR_PAGE_SIZE);
+ }
+ if ((m_end & ~PMA_ISTART_START_MASK) != 0) {
+ ABRTF(abrt, "length of %s (0x% " PRIx64 ") must be multiple of page length (%" PRId64 " bytes)",
+ description, length, AR_PAGE_SIZE);
+ }
+ // Empty range must really be empty
+ if (length == 0) {
+ if (start != 0) {
+ ABRTF(abrt, "empty range with length 0 must start at 0 when initializing %s", description);
+ }
+ if (get_istart() != 0) {
+ ABRTF(abrt, "empty range must have clear flags when initializing %s", description);
+ }
+ }
+ }
+
+ address_range(const address_range &other) = default;
+ address_range &operator=(const address_range &other) = default;
+ address_range(address_range &&other) = default;
+ address_range &operator=(address_range &&other) = default;
+
+ /// \brief Checks if a range of addresses is entirely contained within this range
+ /// \param offset Start of range of interest, relative to start of this range
+ /// \param length Length of range of interest, in bytes
+ /// \returns True if and only if range of interest is entirely contained within this range
+ constexpr bool contains_relative(uint64_t offset, uint64_t length) const noexcept {
+ return get_length() >= length && offset <= get_length() - length;
+ }
+
+ /// \brief Checks if a range of addresses is entirely contained within this range
+ /// \param start Target phyisical address of start of range of interest
+ /// \param length Length of range of interest, in bytes
+ /// \returns True if and only if range of interest is entirely contained within this range
+ constexpr bool contains_absolute(uint64_t start, uint64_t length) const noexcept {
+ return start >= get_start() && contains_relative(start - get_start(), length);
+ }
+
+ /// \brief Returns PMA flags used during construction
+ /// \returns Flags
+ constexpr const pmas_flags &get_flags() const noexcept {
+ return m_flags;
+ }
+
+ /// \brief Returns description of address range for use in error messages.
+ /// \returns Description
+ constexpr const char *get_description() const noexcept {
+ return m_description.data();
+ }
+
+ /// \brief Returns target physical address where range starts.
+ /// \returns Start of range
+ constexpr uint64_t get_start() const noexcept {
+ return m_start;
+ }
+
+ /// \brief Returns target physical address right past end of range.
+ /// \returns End of range
+ constexpr uint64_t get_end() const noexcept {
+ return m_end;
+ }
+
+ /// \brief Returns length of range, in bytes.
+ /// \returns Length of range
+ constexpr uint64_t get_length() const noexcept {
+ return m_end - m_start;
+ }
+
+ /// \brief Test if address range is occupied by memory
+ /// \returns True if and only if range is occupied by memory
+ /// \details In this case, get_host_memory() is guaranteed not to return nullptr.
+ constexpr bool is_memory() const noexcept {
+ return m_flags.M;
+ }
+
+ /// \brief Test if address range is occupied by a device
+ /// \returns True if and only if range is occupied by a device
+ /// \details In this case, read_device() and write_device() are operational.
+ constexpr bool is_device() const noexcept {
+ return m_flags.IO;
+ }
+
+ /// \brief Test if address range is empty
+ /// \returns True if and only if range is empty
+ /// \details Empty ranges should be used only for sentinels.
+ constexpr bool is_empty() const noexcept {
+ return m_end == 0;
+ }
+
+ /// \brief Tests if range is readable
+ /// \returns True if and only if range is readable from within the machine.
+ constexpr bool is_readable() const noexcept {
+ return m_flags.R;
+ }
+
+ /// \brief Tests if range is writeable
+ /// \returns True if and only if range is writeable from within the machine.
+ constexpr bool is_writeable() const noexcept {
+ return m_flags.W;
+ }
+
+ /// \brief Tests if range is executable
+ /// \returns True if and only if range is executable from within the machine.
+ constexpr bool is_executable() const noexcept {
+ return m_flags.X;
+ }
+
+ /// \brief Tests if range is read-idempotent
+ /// \returns True if and only if what is read from range remains there until written to
+ constexpr bool is_read_idempotent() const noexcept {
+ return m_flags.IR;
+ }
+
+ /// \brief Tests if range is write-idempotent
+ /// \returns True if and only if what is written to range remains there and can be read until written to again
+ constexpr bool is_write_idempotent() const noexcept {
+ return m_flags.IW;
+ }
+
+ /// \brief Returns driver ID associated to range
+ /// \returns The driver ID
+ constexpr PMA_ISTART_DID get_driver_id() const noexcept {
+ return m_flags.DID;
+ }
+
+ /// \brief Returns packed address range istart field as per whitepaper
+ /// \returns Packed address range istart
+ uint64_t get_istart() const noexcept {
+ return pmas_pack_istart(m_flags, m_start);
+ }
+
+ /// \brief Returns encoded address range ilength field as per whitepaper
+ /// \returns Packed address range ilength
+ /// \details This currently contains only the length itself
+ uint64_t get_ilength() const noexcept {
+ return get_length();
+ }
+
+ /// \brief Returns number of levels in a tree where each leaf is a page
+ int get_level_count() const noexcept {
+ return get_level_count(get_length());
+ }
+
+#ifndef MICROARCHITECTURE
+ /// \brief Returns reference to dirty page tree.
+ i_dirty_page_tree &get_dirty_page_tree() noexcept {
+ return do_get_dirty_page_tree();
+ }
+
+ /// \brief Returns const reference to dirty page tree.
+ const i_dirty_page_tree &get_dirty_page_tree() const noexcept {
+ return do_get_dirty_page_tree();
+ }
+
+ /// \brief Returns reference to dense hash tree.
+ i_dense_hash_tree &get_dense_hash_tree() noexcept {
+ return do_get_dense_hash_tree();
+ }
+
+ /// \brief Returns const reference to dense hash tree tree.
+ const i_dense_hash_tree &get_dense_hash_tree() const noexcept {
+ return do_get_dense_hash_tree();
+ }
+#endif
+
+ // -----
+ // These are only for device ranges
+ // -----
+
+ /// \brief Reads a word from a device
+ /// \param da State access object through which the machine state can be accessed.
+ /// \param offset Where to start reading, relative to start of this range.
+ /// \param log2_size Log2 of size of value to read (0=uint8_t, 1=uint16_t, 2=uint32_t, 3=uint64_t).
+ /// \param pval Pointer to word where value will be stored.
+ /// \returns True if operation succeeded, false otherwise.
+ bool read_device(i_device_state_access *da, uint64_t offset, int log2_size, uint64_t *pval) const noexcept {
+ return do_read_device(da, offset, log2_size, pval);
+ }
+
+ /// \brief Writes a word to a device
+ /// \param da State access object through which the machine state can be accessed.
+ /// \param offset Where to start reading, relative to start of this range.
+ /// \param log2_size Log2 of size of value to write (0=uint8_t, 1=uint16_t, 2=uint32_t, 3=uint64_t).
+ /// \param val Value to write.
+ /// \returns execute::failure if operation failed, otherwise a success code if operation succeeded.
+ execute_status write_device(i_device_state_access *da, uint64_t offset, int log2_size, uint64_t val) noexcept {
+ return do_write_device(da, offset, log2_size, val);
+ }
+
+ // -----
+ // These are only for memory ranges
+ // -----
+
+ /// \brief Returns start of associated memory region in host
+ /// \returns Pointer to memory
+ const unsigned char *get_host_memory() const noexcept {
+ return do_get_host_memory();
+ }
+
+ /// \brief Returns start of associated memory region in host
+ /// \returns Pointer to memory
+ unsigned char *get_host_memory() noexcept {
+ return do_get_host_memory();
+ }
+
+ /// \brief Returns true if the mapped memory is read-only on the host
+ /// \returns True if the memory is read-only in the host
+ bool is_host_read_only() const noexcept {
+ return do_is_host_read_only();
+ }
+
+ /// \brief Returns true if the mapped memory is shared with a backing store in the host
+ /// \returns True if the memory is shared in the host
+ bool is_backing_store_shared() const noexcept {
+ return do_is_backing_store_shared();
+ }
+
+protected:
+ /// \brief Returns number of levels in a tree where each leaf is a page
+ /// \param length Length of range, in bytes
+ static constexpr int get_level_count(uint64_t length) noexcept {
+ auto page_count = length >> AR_LOG2_PAGE_SIZE;
+ if (page_count == 0) {
+ return 0;
+ }
+ // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
+ return std::bit_width(std::bit_ceil(page_count));
+ }
+
+private:
+ // Default implementation of read_device() for non-device ranges always fails
+ virtual bool do_read_device(i_device_state_access * /*a*/, uint64_t /*offset*/, int /*log2_size*/,
+ uint64_t * /*val*/) const noexcept {
+ return false;
+ }
+
+ // Default implementation of write_device() for non-device ranges always fails
+ virtual execute_status do_write_device(i_device_state_access * /*a*/, uint64_t /*offset*/, int /* log2_size */,
+ uint64_t /*val*/) noexcept {
+ return execute_status::failure;
+ }
+
+ // Default implementation of get_host_memory() for non-memory ranges returns nullptr
+ virtual const unsigned char *do_get_host_memory() const noexcept {
+ return nullptr;
+ }
+
+ virtual unsigned char *do_get_host_memory() noexcept {
+ return nullptr;
+ }
+
+ virtual bool do_is_host_read_only() const noexcept {
+ return false;
+ }
+
+ virtual bool do_is_backing_store_shared() const noexcept {
+ return false;
+ }
+
+#ifndef MICROARCHITECTURE
+ // Default implemenationt returns always dirty tree
+ virtual const i_dirty_page_tree &do_get_dirty_page_tree() const noexcept {
+ const static empty_dirty_page_tree no_dirty{};
+ return no_dirty;
+ }
+
+ virtual i_dirty_page_tree &do_get_dirty_page_tree() noexcept {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
+ return const_cast(std::as_const(*this).do_get_dirty_page_tree());
+ }
+
+ // Default implemenationt returns no hashes
+ virtual const i_dense_hash_tree &do_get_dense_hash_tree() const noexcept {
+ const static empty_dense_hash_tree no_hashes{};
+ return no_hashes;
+ }
+
+ virtual i_dense_hash_tree &do_get_dense_hash_tree() noexcept {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
+ return const_cast(std::as_const(*this).do_get_dense_hash_tree());
+ }
+#endif
+};
+
+} // namespace cartesi
+
+#endif // ADDRESS_RANGE_H
diff --git a/src/algorithm.h b/src/algorithm.h
new file mode 100644
index 000000000..60b7ccd0f
--- /dev/null
+++ b/src/algorithm.h
@@ -0,0 +1,58 @@
+// Copyright Cartesi and individual authors (see AUTHORS)
+// SPDX-License-Identifier: LGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify it under
+// the terms of the GNU Lesser General Public License as published by the Free
+// Software Foundation, either version 3 of the License, or (at your option) any
+// later version.
+//
+// This program is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+// PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License along
+// with this program (see COPYING). If not, see .
+//
+
+#ifndef ALGORITHM_H
+#define ALGORITHM_H
+
+#include
+#include
+#include
+
+#include "concepts.h"
+
+namespace cartesi {
+
+/// \brief Adds new entry to back of container, if not already there
+/// \tparam Container Container type
+/// \tparam T Value type
+/// \param container Container to push back into
+/// \param value Value to push back
+template
+ requires BackInsertableWith
+constexpr void try_push_back(Container &container, T &&value) {
+ if (container.empty() || container.back() != value) {
+ container.push_back(std::forward(value));
+ }
+}
+
+/// \brief Performs saturating addition of two unsigned integers
+/// \tparam T Unsigned integer type
+/// \param a First addend
+/// \param b Second addend
+/// \param max Maximum value of T (default: std::numeric_limits::max())
+/// \returns The sum of a and b, or the maximum value of T if overflow occurs
+template
+ requires std::is_unsigned_v
+static constexpr T saturating_add(T a, T b, T max = std::numeric_limits::max()) noexcept {
+ if (b > max || a > max - b) [[unlikely]] {
+ return max;
+ }
+ return a + b;
+}
+
+} // namespace cartesi
+
+#endif
diff --git a/src/htif-factory.h b/src/array2d.h
similarity index 74%
rename from src/htif-factory.h
rename to src/array2d.h
index 9a6807682..91e67aad2 100644
--- a/src/htif-factory.h
+++ b/src/array2d.h
@@ -14,19 +14,18 @@
// with this program (see COPYING). If not, see .
//
-#ifndef HTIF_FACTORY_H
-#define HTIF_FACTORY_H
+#ifndef ARRAY2D_H
+#define ARRAY2D_H
-#include
-
-#include "machine-runtime-config.h"
-#include "pma.h"
+#include
+#include
namespace cartesi {
-/// \brief Creates a PMA entry for the HTIF device
-pma_entry make_htif_pma_entry(uint64_t start, uint64_t length);
+//??(edubart): In future C++ standards we should switch to `std::mdarray` or `std::mdspan`
+template
+using array2d = std::array, M>;
} // namespace cartesi
-#endif
+#endif // ARRAY2D_H
diff --git a/src/shadow-pmas-factory.h b/src/assert-printf.h
similarity index 51%
rename from src/shadow-pmas-factory.h
rename to src/assert-printf.h
index 35514872f..1bf7bb85b 100644
--- a/src/shadow-pmas-factory.h
+++ b/src/assert-printf.h
@@ -14,32 +14,34 @@
// with this program (see COPYING). If not, see .
//
-#ifndef SHADOW_PMAS_FACTORY_H
-#define SHADOW_PMAS_FACTORY_H
-
-#include
+#ifndef ASSERT_PRINTF_H
+#define ASSERT_PRINTF_H
/// \file
-/// \brief Shadow device.
-
-#include "pma.h"
-#include "shadow-pmas.h"
+/// \brief Microarchitecture-dependent includes for printf and assert
-namespace cartesi {
+#ifdef MICROARCHITECTURE
+#include "../uarch/uarch-runtime.h" // IWYU pragma: export
+#else
+#include // IWYU pragma: export
+#include // IWYU pragma: export
+#endif
-pma_entry make_shadow_pmas_pma_entry(uint64_t start, uint64_t length);
+#include // IWYU pragma: export
+#include
+#include
-template
-void populate_shadow_pmas_state(const PMAS &pmas, shadow_pmas_state *shadow) {
- static_assert(PMA_SHADOW_PMAS_LENGTH >= sizeof(shadow_pmas_state), "shadow PMAs length is too small");
- unsigned index = 0;
- for (const auto &pma : pmas) {
- shadow->pmas[index].istart = pma.get_istart();
- shadow->pmas[index].ilength = pma.get_ilength();
- ++index;
- }
+static inline void d_vprintf(const char *fmt, va_list ap) {
+ std::ignore = vfprintf(stderr, fmt, ap);
}
-} // namespace cartesi
+// Better to use C-style variadic function that checks for format!
+// NOLINTNEXTLINE(cert-dcl50-cpp)
+__attribute__((__format__(__printf__, 1, 2))) static inline void d_printf(const char *fmt, ...) {
+ va_list ap{};
+ va_start(ap, fmt);
+ d_vprintf(fmt, ap);
+ va_end(ap);
+}
#endif
diff --git a/src/back-merkle-tree.cpp b/src/back-merkle-tree.cpp
index 027edccb7..1c3c36e6d 100644
--- a/src/back-merkle-tree.cpp
+++ b/src/back-merkle-tree.cpp
@@ -15,25 +15,23 @@
//
/// \file
-/// \brief Back Merkle tree implementation.
+/// \brief Back merkle tree implementation.
+
+#include "back-merkle-tree.h"
-#include
#include
+#include
#include
#include
#include
+#include
-#include "back-merkle-tree.h"
-#include "i-hasher.h"
+#include "machine-hash.h"
+#include "variant-hasher.h"
namespace cartesi {
-back_merkle_tree::back_merkle_tree(int log2_root_size, int log2_leaf_size, int log2_word_size) :
- m_log2_root_size{log2_root_size},
- m_log2_leaf_size{log2_leaf_size},
- m_max_leaves{address_type{1} << (log2_root_size - log2_leaf_size)},
- m_context(std::max(1, log2_root_size - log2_leaf_size + 1)),
- m_pristine_hashes{log2_root_size, log2_word_size} {
+int back_merkle_tree::validate_log2_max_leaves_size(int log2_root_size, int log2_leaf_size, int log2_word_size) {
if (log2_root_size < 0) {
throw std::out_of_range{"log2_root_size is negative"};
}
@@ -47,24 +45,26 @@ back_merkle_tree::back_merkle_tree(int log2_root_size, int log2_leaf_size, int l
throw std::out_of_range{"log2_leaf_size is greater than log2_root_size"};
}
if (log2_word_size > log2_leaf_size) {
- throw std::out_of_range{"log2_word_size is greater than log2_word_size"};
+ throw std::out_of_range{"log2_word_size is greater than log2_leaf_size"};
}
- if (log2_root_size - m_log2_leaf_size >= std::numeric_limits::digits) {
- throw std::out_of_range{"tree is too large for address type"};
+ if (log2_root_size - log2_leaf_size >= std::numeric_limits::digits) {
+ throw std::out_of_range{"log2_root_size is too large"};
}
+ return log2_root_size - log2_leaf_size;
}
-void back_merkle_tree::push_back(const hash_type &new_leaf_hash) {
- hasher_type h;
- hash_type right = new_leaf_hash;
- if (m_leaf_count >= m_max_leaves) {
+void back_merkle_tree::push_back(const machine_hash &new_leaf_hash) {
+ variant_hasher h{m_hash_function};
+ machine_hash right = new_leaf_hash;
+ if (m_leaf_count >= get_max_leaves()) {
throw std::out_of_range{"too many leaves"};
}
- const int depth = m_log2_root_size - m_log2_leaf_size;
- for (int i = 0; i <= depth; ++i) {
- if ((m_leaf_count & (address_type{1} << i)) != 0) {
+ const size_t log2_max_leaves = get_log2_max_leaves();
+ for (size_t i = 0; i <= log2_max_leaves; ++i) {
+ const auto i_span = UINT64_C(1) << i;
+ if ((m_leaf_count & i_span) != 0) {
const auto &left = m_context[i];
- get_concat_hash(h, left, right, right);
+ h.concat_hash(left, right, right);
} else {
m_context[i] = right;
break;
@@ -73,99 +73,101 @@ void back_merkle_tree::push_back(const hash_type &new_leaf_hash) {
++m_leaf_count;
}
-void back_merkle_tree::pad_back(uint64_t new_leaf_count) {
- hasher_type h;
- if (new_leaf_count > m_max_leaves || m_leaf_count + new_leaf_count > m_max_leaves) {
- throw std::invalid_argument("too many leaves");
+void back_merkle_tree::pad_back(uint64_t new_leaf_count, const machine_hashes &pad_hashes) {
+ if (new_leaf_count == 0) {
+ return;
+ }
+ const size_t max_leaves = get_max_leaves();
+ const size_t log2_max_leaves = get_log2_max_leaves();
+
+ // Validate inputs
+ if (new_leaf_count > max_leaves || m_leaf_count + new_leaf_count > max_leaves) {
+ throw std::invalid_argument{"too many leaves"};
+ }
+ if (pad_hashes.size() != log2_max_leaves + 1) {
+ throw std::invalid_argument{"pad hashes does not have expected size"};
}
- const int depth = m_log2_root_size - m_log2_leaf_size;
- int j = 0;
- while (j <= depth) {
- const uint64_t j_span = address_type{1} << j;
+
+ variant_hasher h{m_hash_function};
+
+ // Process each bit position from LSB to MSB
+ for (size_t j = 0; j <= log2_max_leaves;) {
+ const uint64_t j_span = UINT64_C(1) << j;
+
+ // Skip if this bit isn't set in new_leaf_count
if (j_span > new_leaf_count) {
break;
}
- // is our smallest tree at depth j?
- if ((m_leaf_count & j_span) != 0) {
- // if so, we can add 2^j pristine leaves directly
- auto right = m_pristine_hashes.get_hash(m_log2_leaf_size + j);
- for (int i = j; i <= depth; ++i) {
- const uint64_t i_span = address_type{1} << i;
+
+ // Check if we have an existing subtree at this position
+ if ((m_leaf_count & j_span) != 0) { // Is our smallest tree at depth j?
+ // Combine existing subtree with padding and propagate upward
+ auto right = pad_hashes[j];
+ for (size_t i = j; i <= log2_max_leaves; ++i) {
+ const uint64_t i_span = UINT64_C(1) << i;
if ((m_leaf_count & i_span) != 0) {
const auto &left = m_context[i];
- get_concat_hash(h, left, right, right);
+ h.concat_hash(left, right, right);
} else {
m_context[i] = right;
- // outer loop continues where we left off
+ // Outer loop continues where we left off
j = i;
break;
}
}
- new_leaf_count = new_leaf_count - j_span;
- m_leaf_count = m_leaf_count + j_span;
+ m_leaf_count += j_span;
+ new_leaf_count -= j_span;
} else {
++j;
}
}
- // now add the rest of the padding directly to the context
- for (int i = 0; i <= depth; ++i) {
- const uint64_t i_span = address_type{1} << i;
+
+ // Add the rest of the padding directly to the context
+ for (size_t i = 0; i <= log2_max_leaves && new_leaf_count > 0; ++i) {
+ const uint64_t i_span = UINT64_C(1) << i;
+ // Check if we have to set the subtree at this position
if ((new_leaf_count & i_span) != 0) {
- m_context[i] = m_pristine_hashes.get_hash(m_log2_leaf_size + i);
- new_leaf_count = new_leaf_count - i_span;
- m_leaf_count = m_leaf_count + i_span;
+ m_context[i] = pad_hashes[i];
+ m_leaf_count += i_span;
+ new_leaf_count -= i_span;
}
}
+
+ assert(new_leaf_count == 0);
+ assert(m_leaf_count <= get_max_leaves());
}
-back_merkle_tree::hash_type back_merkle_tree::get_root_hash() const {
- hasher_type h;
- assert(m_leaf_count <= m_max_leaves);
- const int depth = m_log2_root_size - m_log2_leaf_size;
- if (m_leaf_count < m_max_leaves) {
- auto root = m_pristine_hashes.get_hash(m_log2_leaf_size);
- for (int i = 0; i < depth; ++i) {
- if ((m_leaf_count & (address_type{1} << i)) != 0) {
- const auto &left = m_context[i];
- get_concat_hash(h, left, root, root);
- } else {
- const auto &right = m_pristine_hashes.get_hash(m_log2_leaf_size + i);
- get_concat_hash(h, root, right, root);
- }
- }
- return root;
+machine_hashes back_merkle_tree::make_pad_hashes(const machine_hash &leaf_hash, int log2_max_leaves,
+ hash_function_type hash_function) {
+ assert(log2_max_leaves >= 0);
+ machine_hashes hashes;
+ hashes.resize(log2_max_leaves + 1);
+ hashes[0] = leaf_hash;
+ variant_hasher h{hash_function};
+ for (size_t i = 1; i < hashes.size(); ++i) {
+ h.concat_hash(hashes[i - 1], hashes[i - 1], hashes[i]);
}
- return m_context[depth];
+ return hashes;
}
-back_merkle_tree::proof_type back_merkle_tree::get_next_leaf_proof() const {
- const int depth = m_log2_root_size - m_log2_leaf_size;
- if (m_leaf_count >= m_max_leaves) {
- throw std::out_of_range{"tree is full"};
- }
- hasher_type h;
- proof_type proof{m_log2_root_size, m_log2_leaf_size};
- proof.set_target_address(m_leaf_count << m_log2_leaf_size);
- proof.set_target_hash(m_pristine_hashes.get_hash(m_log2_leaf_size));
- hash_type hash = m_pristine_hashes.get_hash(m_log2_leaf_size);
- for (int i = 0; i < depth; ++i) {
- if ((m_leaf_count & (address_type{1} << i)) != 0) {
- const auto &left = m_context[i];
- proof.set_sibling_hash(left, m_log2_leaf_size + i);
- get_concat_hash(h, left, hash, hash);
- } else {
- const auto &right = m_pristine_hashes.get_hash(m_log2_leaf_size + i);
- proof.set_sibling_hash(right, m_log2_leaf_size + i);
- get_concat_hash(h, hash, right, hash);
- }
- }
- proof.set_root_hash(hash);
-#ifndef NDEBUG
- if (!proof.verify(h)) {
- throw std::runtime_error{"produced invalid proof"};
+static machine_hash get_pristine_word_hash(int log2_word_size, hash_function_type hash_function) {
+ std::vector word(UINT64_C(1) << log2_word_size, 0);
+ machine_hash hash{};
+ variant_hasher h{hash_function};
+ h.hash(word, hash);
+ return hash;
+}
+
+machine_hashes back_merkle_tree::make_pristine_pad_hashes(int log2_root_size, int log2_leaf_size, int log2_word_size,
+ hash_function_type hash_function) {
+ validate_log2_max_leaves_size(log2_root_size, log2_leaf_size, log2_word_size);
+ auto pristine_pad_hashes = make_pad_hashes(get_pristine_word_hash(log2_word_size, hash_function),
+ log2_root_size - log2_word_size, hash_function);
+ if (log2_leaf_size > log2_word_size) {
+ pristine_pad_hashes.erase(pristine_pad_hashes.begin(),
+ pristine_pad_hashes.begin() + (log2_leaf_size - log2_word_size));
}
-#endif
- return proof;
+ return pristine_pad_hashes;
}
} // namespace cartesi
diff --git a/src/back-merkle-tree.h b/src/back-merkle-tree.h
index fd1902b80..28b0c1763 100644
--- a/src/back-merkle-tree.h
+++ b/src/back-merkle-tree.h
@@ -17,135 +17,191 @@
#ifndef BACK_MERKLE_TREE_H
#define BACK_MERKLE_TREE_H
+#include
#include
-#include
+#include
+#include
+#include
-#include "keccak-256-hasher.h"
-#include "merkle-tree-proof.h"
-#include "pristine-merkle-tree.h"
+#include "machine-hash.h"
+#include "variant-hasher.h"
/// \file
-/// \brief Back Merkle tree interface.
+/// \brief Back back_hash_tree tree interface.
namespace cartesi {
-/// \brief Incremental way of maintaining a Merkle tree for a stream of
-/// leaf hashes
-/// \details This is surprisingly efficient in both time and space.
-/// Adding the next leaf takes O(log(n)) in the worst case, but is
-/// this is amortized to O(1) time when adding n leaves.
-/// Obtaining the proof for the current leaf takes theta(log(n)) time.
-/// Computing the tree root hash also takes theta(log(n)) time.
-/// The class only ever stores log(n) hashes (1 for each tree level).
+/// \brief Incremental hash tree that efficiently maintains hashes for a stream of leaves
+/// \details Space-efficient design stores only O(log n) hashes (one per tree level).
+/// Leaf insertion is O(log n) worst-case but amortizes to O(1) over n operations.
class back_merkle_tree {
public:
- /// \brief Hasher class.
- using hasher_type = keccak_256_hasher;
-
- /// \brief Storage for a hash.
- using hash_type = hasher_type::hash_type;
-
- /// \brief Storage for a hash.
- using address_type = uint64_t;
-
- /// \brief Storage for the proof of a word value.
- using proof_type = merkle_tree_proof;
-
- /// \brief Constructor
- /// \param log2_root_size Log2 of root node
- /// \param log2_leaf_size Log2 of leaf node
- /// \param log2_word_size Log2 of word node
- back_merkle_tree(int log2_root_size, int log2_leaf_size, int log2_word_size);
+ /// \brief Constructor from an existing leaves context
+ /// \param log2_max_leaves Log base 2 of maximum amount of leaves
+ /// \param hash_function Hash function to use
+ /// \param leaf_count Amount of leaves already added to the tree
+ /// \param context Context representing the leaves hashes
+ back_merkle_tree(int log2_max_leaves, hash_function_type hash_function, uint64_t leaf_count = 0,
+ machine_hashes context = {}) :
+ m_hash_function(hash_function),
+ m_leaf_count{leaf_count},
+ m_context(std::max(1, log2_max_leaves + 1)) {
+ if (log2_max_leaves < 0) {
+ throw std::out_of_range{"log2_max_leaves is negative"};
+ }
+ if (log2_max_leaves >= std::numeric_limits::digits) {
+ throw std::out_of_range{"log2_max_leaves is too large"};
+ }
+ if (leaf_count >= get_max_leaves()) {
+ throw std::out_of_range{"leaf count is greater than or equal to max leaves"};
+ }
+ // Unpack context
+ size_t j = 0;
+ if (leaf_count > 0) {
+ for (int i = 0; i <= log2_max_leaves; ++i) {
+ const auto i_span = UINT64_C(1) << i;
+ if ((leaf_count & i_span) != 0) {
+ if (j >= context.size()) {
+ throw std::out_of_range{"leaves context is incompatible"};
+ }
+ m_context[i] = context[j++];
+ }
+ }
+ }
+ if (j != context.size()) {
+ throw std::out_of_range{"leaves context is incompatible"};
+ }
+ }
+ /// \brief Constructor from known root, leaf and word sizes
+ /// \param log2_root_size Log base 2 of root node
+ /// \param log2_leaf_size Log base 2 of leaf node
+ /// \param log2_word_size Log base 2 of word node
+ /// \param hash_function Hash function to use
+ back_merkle_tree(int log2_root_size, int log2_leaf_size, int log2_word_size, hash_function_type hash_function) :
+ back_merkle_tree(validate_log2_max_leaves_size(log2_root_size, log2_leaf_size, log2_word_size), hash_function) {
+ }
/// \brief Appends a new hash to the tree
/// \param new_leaf_hash Hash of new leaf data
/// \details
- /// Consider the tree down to the leaf level.
- /// The tree is only complete after 2^(log2_root_size-log2_leaf_size)
- /// leaves have been added.
- /// Before that, when leaf_count leaves have been added, we assume the rest
- /// of the leaves are filled with zeros (i.e., they are pristine).
- /// The trick is that we do not need to store the hashes of all leaf_count
- /// leaves already added to the stream.
- /// This is because, whenever a subtree is complete, all we need is its
- /// root hash.
- /// The complete subtrees are disjoint, abutting, and appear in decreasing
- /// size.
- /// In fact, there is exactly one complete subtree for each bit set in
- /// leaf_count.
- /// We only need log2_root_size-log2_leaf_size+1 bits to represent
- /// leaf_count.
- /// So our context is a vector with log2_root_size-log2_leaf_size+1 entries,
- /// where entry i contains the hash for a complete subtree of
- /// size 2^i leaves.
- /// We will only use the entries i if the corresponding bit is set
- /// in leaf_count.
- /// Adding a new leaf hash exactly like adding 1 to leaf_count.
- /// We scan from least to most significant bit in leaf_count.
- /// We start with the right = leaf_hash and i = 0.
- /// If the bit i is set in leaf_count, we replace
- /// context[i] = hash(context[i], right) and move up a bit.
- /// If the bit is not set, we simply store context[i] = right and break
- /// In other words, we can update the context in
- /// log time (log2_root_size-log2_leaf_size)
- void push_back(const hash_type &new_leaf_hash);
+ /// The algorithm efficiently maintains only the root hashes of complete subtrees.
+ /// Each bit set in leaf_count corresponds to a complete subtree of size 2^i,
+ /// with its hash stored in context[i].
+ ///
+ /// Adding a leaf is equivalent to binary addition: scan bits from LSB to MSB.
+ /// For each set bit i in leaf_count, combine context[i] with the new hash
+ /// and propagate upward. Store the result at the first unset bit position.
+ /// This achieves O(log n) worst-case, O(1) amortized time complexity.
+ void push_back(const machine_hash &new_leaf_hash);
/// \brief Appends a number of padding hashes to the tree
- /// \param leaf_count Number of padding hashes to append
+ /// \param new_leaf_count Number of padding hashes to append
+ /// \param pad_hashes Array containing the padding hashes
/// \details
- /// Recall that a bit i set in leaf_count represents a complete subtree
- /// of size 2^i for which we have a hash in context[i].
- /// The remaining entries in the context are unused.
- /// The base case is when the least significant bit set in leaf_count is
- /// bigger than new_leaf_count.
- /// We can simply add to context[j] a pristine subtree of size 2^j
- /// for each bit j set in new_leaf_count.
- /// No used used entry in the context will be overwritten.
- /// We can then simply add new_leaf_count to leaf_count and we are done.
- /// In the general case, the least significant bit set i in leaf_count is
- /// less than or equal to new_leaf_count.
- /// Here, we add a pristine subtree of size 2^i to the context and
- /// bubble up.
- /// We add 2^i to leaf_count and subtract 2^i from new_leaf_count.
- /// Then we repeat this process until we reach the base case.
- void pad_back(uint64_t new_leaf_count);
+ /// Uses binary representation of leaf counts to efficiently add padding.
+ /// Each set bit i in leaf_count represents a complete subtree of size 2^i.
+ ///
+ /// Base case: When the least significant set bit in leaf_count exceeds new_leaf_count,
+ /// directly place pad subtrees at positions corresponding to bits set in new_leaf_count.
+ ///
+ /// General case: When overlap exists, combine the smallest existing subtree with
+ /// a matching pad subtree, bubble up the result, and repeat until base case is reached.
+ void pad_back(uint64_t new_leaf_count, const machine_hashes &pad_hashes);
/// \brief Returns the root tree hash
/// \returns Root tree hash
- /// \details
- /// We can produce the tree root hash from the context at any time, also
- /// in log time
- /// Ostensibly, we add pristine leaves until the leaf_count
- /// hits 2^(log2_root_size-log2_leaf_size)
- /// To do this in log time, we start by precomputing the hashes for all
- /// completely pristine subtree sizes
- /// If leaf_count is already 2^(log2_root_size-log2_leaf_size), we
- /// return context[i]
- /// Otherwise, we start with i = 0 and root = pristine[i+log2_leaf_size]
- /// (i.e., the invariant is that root contains the hash of the rightmost
- /// subtree whose log size is i + log2_leaf_size)
- /// If bit i is set, we set root = hash(context[i], root) and move up a bit
- /// (i.e., the subtree we are growing is to the right of what is
- /// in the context)
- /// If bit i is not set, we set
- /// root = hash(root, pristine[i+log2_leaf_size]) and move up a bit
- /// (i.e., to grow our subtree, we need to pad it on the right with
- /// a pristine subtree of the same size)
- hash_type get_root_hash() const;
-
- /// \brief Returns proof for the next pristine leaf
- /// \returns Proof for leaf at given index, or throws exception
- /// \details This is basically the same algorithm as
- /// back_merkle_tree::get_root_hash.
- proof_type get_next_leaf_proof() const;
+ /// \details The tree must be complete, otherwise an exception is thrown
+ machine_hash get_root_hash() const {
+ if (!full()) {
+ throw std::runtime_error{"attempt to get root hash of an incomplete back tree"};
+ }
+ return m_context.back();
+ }
+
+ /// \brief Clears the tree, making it empty (as if no leaves were added)
+ void clear() noexcept {
+ m_leaf_count = 0;
+ }
+
+ /// \brief Returns true if the tree is complete (reached maximum amount of leaves)
+ bool full() const noexcept {
+ return m_leaf_count >= get_max_leaves();
+ }
+
+ /// \brief Returns true if the tree is empty (no leaves were added)
+ bool empty() const noexcept {
+ return m_leaf_count == 0;
+ }
+
+ /// \brief Returns log base 2 of maximum amount of leaves that can be held by the tree
+ int get_log2_max_leaves() const noexcept {
+ return static_cast(m_context.size()) - 1;
+ }
+
+ /// \brief Returns maximum amount of leaves that can be held by the tree
+ uint64_t get_max_leaves() const noexcept {
+ return static_cast(1) << get_log2_max_leaves();
+ }
+
+ /// \brief Returns the hash function used by the tree
+ hash_function_type get_hash_function() const noexcept {
+ return m_hash_function;
+ }
+
+ /// \brief Returns amount of leaves already added to the tree
+ uint64_t get_leaf_count() const noexcept {
+ return m_leaf_count;
+ }
+
+ /// \brief Returns amount of leaves that can yet be added to the tree
+ uint64_t get_remaining_leaf_count() const noexcept {
+ return get_max_leaves() - m_leaf_count;
+ }
+
+ /// \brief Returns the leaves context
+ machine_hashes get_context() const {
+ // Pack context
+ machine_hashes context;
+ if (m_leaf_count > 0) {
+ const int log2_max_leaves = get_log2_max_leaves();
+ for (int i = 0; i <= log2_max_leaves; ++i) {
+ const auto i_span = UINT64_C(1) << i;
+ if ((m_leaf_count & i_span) != 0) {
+ context.push_back(m_context[i]);
+ }
+ }
+ }
+ return context;
+ }
+
+ /// \brief Creates an array of pad hashes to be used with pad_back()
+ /// \param leaf_hash Hash of the leaf node
+ /// \param log2_max_leaves Log base 2 of maximum amount of leaves
+ /// \param hash_function Hash function to use
+ /// \returns Array of pad hashes
+ static machine_hashes make_pad_hashes(const machine_hash &leaf_hash, int log2_max_leaves,
+ hash_function_type hash_function);
+
+ /// \brief Creates an array of pristine pad hashes to be used with pad_back()
+ /// \param log2_root_size Log base 2 of root node
+ /// \param log2_leaf_size Log base 2 of leaf node
+ /// \param log2_word_size Log base 2 of word node
+ /// \param hash_function Hash function to use
+ /// \returns Array of pad hashes
+ static machine_hashes make_pristine_pad_hashes(int log2_root_size, int log2_leaf_size, int log2_word_size,
+ hash_function_type hash_function);
+
+ /// \brief Validates and computes log2_max_leaves from root, leaf and word sizes
+ /// \param log2_root_size Log base 2 of root node
+ /// \param log2_leaf_size Log base 2 of leaf node
+ /// \param log2_word_size Log base 2 of word node
+ /// \returns Log base 2 of maximum amount of leaves
+ static int validate_log2_max_leaves_size(int log2_root_size, int log2_leaf_size, int log2_word_size);
private:
- int m_log2_root_size; ///< Log2 of tree size
- int m_log2_leaf_size; ///< Log2 of leaf size
- address_type m_leaf_count{0}; ///< Number of leaves already added
- address_type m_max_leaves; ///< Maximum number of leaves
- std::vector m_context; ///< Hashes of bits set in leaf_count
- pristine_merkle_tree m_pristine_hashes; ///< Hash of pristine subtrees of all sizes
+ hash_function_type m_hash_function; ///< Hash function
+ uint64_t m_leaf_count; ///< Number of leaves already added
+ machine_hashes m_context; ///< Hashes of bits set in leaf_count
};
} // namespace cartesi
diff --git a/src/base64.cpp b/src/base64.cpp
index 1fb10354d..0fcb4aec2 100644
--- a/src/base64.cpp
+++ b/src/base64.cpp
@@ -14,16 +14,15 @@
// with this program (see COPYING). If not, see .
//
+#include "base64.h"
+
#include
#include
#include
#include
#include
-#include
-#include "base64.h"
-
-namespace cartesi {
+namespace cartesi::detail {
// Base64 globals
static constexpr uint8_t b64base[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
@@ -43,7 +42,7 @@ static constexpr uint8_t b64unbase[] = {255, 255, 255, 255, 255, 255, 255, 255,
// Accumulates bytes in input buffer until 3 bytes are available.
// Translate the 3 bytes into Base64 form and append to buffer.
// Returns new number of bytes in buffer.
-static size_t b64encode(uint8_t c, uint8_t *input, size_t size, std::ostringstream &sout) {
+size_t b64encode(uint8_t c, uint8_t *input, size_t size, std::ostringstream &sout) {
input[size++] = c;
if (size == 3) {
uint8_t code[4];
@@ -70,7 +69,7 @@ static size_t b64encode(uint8_t c, uint8_t *input, size_t size, std::ostringstre
// Encodes the Base64 last 1 or 2 bytes and adds padding '='
// Result, if any, is appended to buffer.
// Returns 0.
-static size_t b64pad(const uint8_t *input, size_t size, std::ostringstream &sout) {
+size_t b64pad(const uint8_t *input, size_t size, std::ostringstream &sout) {
uint64_t value = 0;
uint8_t code[4] = {'=', '=', '=', '='};
switch (size) {
@@ -104,7 +103,7 @@ static size_t b64pad(const uint8_t *input, size_t size, std::ostringstream &sout
// Accumulates bytes in input buffer until 4 bytes are available.
// Translate the 4 bytes from Base64 form and append to buffer.
// Returns new number of bytes in buffer.
-static size_t b64decode(uint8_t c, uint8_t *input, size_t size, std::ostringstream &sout) {
+size_t b64decode(uint8_t c, uint8_t *input, size_t size, std::ostringstream &sout) {
if (b64unbase[c] > 64) {
if (c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r') { // ignore whitespace characters
return size;
@@ -145,25 +144,4 @@ static size_t b64decode(uint8_t c, uint8_t *input, size_t size, std::ostringstre
return size;
}
-std::string encode_base64(const std::string_view &input) {
- std::ostringstream sout;
- uint8_t ctx[4]{};
- size_t ctxlen = 0;
- for (const char b : input) {
- ctxlen = b64encode(static_cast(b), ctx, ctxlen, sout);
- }
- b64pad(ctx, ctxlen, sout);
- return sout.str();
-}
-
-std::string decode_base64(const std::string_view &input) {
- std::ostringstream sout;
- uint8_t ctx[4]{};
- size_t ctxlen = 0;
- for (const char b : input) {
- ctxlen = b64decode(static_cast(b), ctx, ctxlen, sout);
- }
- return sout.str();
-}
-
-} // namespace cartesi
+} // namespace cartesi::detail
diff --git a/src/base64.h b/src/base64.h
index b2ebe8d20..6b7cf7e9b 100644
--- a/src/base64.h
+++ b/src/base64.h
@@ -17,14 +17,52 @@
#ifndef BASE64_H
#define BASE64_H
+#include
+#include
+#include
#include
-#include
+
+#include "concepts.h"
namespace cartesi {
-std::string encode_base64(const std::string_view &input);
+namespace detail {
+
+size_t b64encode(uint8_t c, uint8_t *input, size_t size, std::ostringstream &sout);
+size_t b64pad(const uint8_t *input, size_t size, std::ostringstream &sout);
+size_t b64decode(uint8_t c, uint8_t *input, size_t size, std::ostringstream &sout);
+
+} // namespace detail
+
+/// \brief Encodes binary data into base64
+/// \param data Input data range
+/// \returns String with encoded data
+template
+std::string encode_base64(R &&data) { // NOLINT(cppcoreguidelines-missing-std-forward)
+ //??D we could make this faster by avoiding ostringstream altogether...
+ std::ostringstream sout;
+ uint8_t ctx[4]{};
+ size_t ctxlen = 0;
+ for (auto b : data) {
+ ctxlen = detail::b64encode(static_cast(b), ctx, ctxlen, sout);
+ }
+ detail::b64pad(ctx, ctxlen, sout);
+ return sout.str();
+}
-std::string decode_base64(const std::string_view &input);
+/// \brief Decodes binary data from base64
+/// \param data Input data range
+/// \returns String with decoded data
+template
+std::string decode_base64(R &&data) { // NOLINT(cppcoreguidelines-missing-std-forward)
+ std::ostringstream sout;
+ uint8_t ctx[4]{};
+ size_t ctxlen = 0;
+ for (auto b : data) {
+ ctxlen = detail::b64decode(static_cast(b), ctx, ctxlen, sout);
+ }
+ return sout.str();
+}
} // namespace cartesi
diff --git a/src/cartesi-machine-stored-hash.lua b/src/cartesi-machine-stored-hash.lua
index 25beca28f..04f9a672b 100755
--- a/src/cartesi-machine-stored-hash.lua
+++ b/src/cartesi-machine-stored-hash.lua
@@ -16,12 +16,11 @@
-- with this program (see COPYING). If not, see .
--
+local cartesi = require("cartesi")
local util = require("cartesi.util")
-local f = assert(
- io.open(assert(arg[1], "missing machine name") .. "/hash", "rb"),
- string.format("unable to open machine '%s'", tostring(arg[1]))
-)
-local h = assert(f:read("a"), "unable to read hash")
-f:close()
-print(util.hexhash(h))
+local dir = assert(arg[1], "missing machine directory")
+local machine = cartesi.machine(dir)
+local root_hash = machine:get_root_hash()
+local hex_root_hash = util.hexhash(root_hash)
+print(hex_root_hash)
diff --git a/src/cartesi-machine.lua b/src/cartesi-machine.lua
index ff5cfbc66..6b598c742 100755
--- a/src/cartesi-machine.lua
+++ b/src/cartesi-machine.lua
@@ -87,10 +87,6 @@ where options are:
--ram-length=
set RAM length.
- --dtb-image=
- name of file containing DTB image
- (default: auto generated flattened device tree).
-
--no-bootargs
clear default bootargs.
@@ -106,19 +102,22 @@ where options are:
: is one of
label: