diff --git a/.env.example b/.env.example new file mode 100644 index 000000000..40c6c3720 --- /dev/null +++ b/.env.example @@ -0,0 +1,5 @@ +LOCAL_DATA="$PWD/.local" +PUBLIC_IP="127.0.0.1" +BASED_ENV=dev + +BASED_OP_GETH_PORT=8645 diff --git a/.github/assets/wallets/batcher/address b/.github/assets/wallets/batcher/address new file mode 100644 index 000000000..15f3fc634 --- /dev/null +++ b/.github/assets/wallets/batcher/address @@ -0,0 +1 @@ +0x43d97E13a80444436517C178575D34cB364134F2 diff --git a/.github/assets/wallets/batcher/key b/.github/assets/wallets/batcher/key new file mode 100644 index 000000000..85798966f --- /dev/null +++ b/.github/assets/wallets/batcher/key @@ -0,0 +1 @@ +0xd9170dabfdd56a71fe35b3d20ce0d2e560038c2cc4484c1cd6b91ef9ca62025b diff --git a/.github/assets/wallets/proposer/address b/.github/assets/wallets/proposer/address new file mode 100644 index 000000000..dc8b9acff --- /dev/null +++ b/.github/assets/wallets/proposer/address @@ -0,0 +1 @@ +0xc8a32259c26b5bE53C99f62202c664CA7c94fFb1 diff --git a/.github/assets/wallets/proposer/key b/.github/assets/wallets/proposer/key new file mode 100644 index 000000000..22d2ff94e --- /dev/null +++ b/.github/assets/wallets/proposer/key @@ -0,0 +1 @@ +0xac7f4cdcdfb1fb4c421a4c572180aac52569bd0a407881e22ba03efd73ca2fd2 diff --git a/.github/assets/wallets/sequencer/address b/.github/assets/wallets/sequencer/address new file mode 100644 index 000000000..15f3fc634 --- /dev/null +++ b/.github/assets/wallets/sequencer/address @@ -0,0 +1 @@ +0x43d97E13a80444436517C178575D34cB364134F2 diff --git a/.github/assets/wallets/sequencer/key b/.github/assets/wallets/sequencer/key new file mode 100644 index 000000000..85798966f --- /dev/null +++ b/.github/assets/wallets/sequencer/key @@ -0,0 +1 @@ +0xd9170dabfdd56a71fe35b3d20ce0d2e560038c2cc4484c1cd6b91ef9ca62025b diff --git a/.github/workflows/integration-trigger.yml b/.github/workflows/integration-trigger.yml new file mode 100644 index 000000000..c7a3ccdf7 --- /dev/null +++ b/.github/workflows/integration-trigger.yml @@ -0,0 +1,92 @@ +on: + workflow_dispatch: + inputs: + pr_number: + description: "PR number to check" + required: true + type: string + + issue_comment: + types: [created] + +env: + TRIGGER_COMMAND: '/run-integration-tests' + +jobs: + # Parse comment and determine if we should run + parse-trigger: + if: | + github.event_name == 'workflow_dispatch' || + (github.event_name == 'issue_comment' && + github.event.issue.pull_request) + + runs-on: ubuntu-latest + outputs: + should_run: ${{ steps.decision.outputs.should_run }} + sha: ${{ steps.get-ref.outputs.sha }} + + steps: + - name: React to comment (if comment trigger) + if: github.event_name == 'issue_comment' + uses: peter-evans/create-or-update-comment@v3 + with: + comment-id: ${{ github.event.comment.id }} + reactions: rocket + + - name: Determine trigger type and decision + id: decision + run: | + if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + echo "should_run=true" >> $GITHUB_OUTPUT + elif [[ "${{ github.event_name }}" == "issue_comment" ]]; then + if [[ "${{ github.event.comment.body }}" == *"${{ env.TRIGGER_COMMAND }}"* ]]; then + echo "should_run=true" >> $GITHUB_OUTPUT + else + echo "should_run=false" >> $GITHUB_OUTPUT + fi + else + echo "should_run=false" >> $GITHUB_OUTPUT + fi + + - name: Get PR reference information + id: get-ref + if: steps.decision.outputs.should_run == 'true' + uses: actions/github-script@v7 + with: + script: | + let prNumber, sha; + + if (context.eventName === 'pull_request') { + // Direct PR event + prNumber = context.payload.pull_request.number; + sha = context.payload.pull_request.head.sha; + } else if (context.eventName === 'workflow_dispatch') { + // Manual dispatch with PR number + prNumber = context.payload.inputs.pr_number; + const { data: pr } = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: parseInt(prNumber) + }); + sha = pr.head.sha; + } else if (context.eventName === 'issue_comment') { + // Comment on PR + prNumber = context.payload.issue.number; + const { data: pr } = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber + }); + sha = pr.head.sha; + } + + core.setOutput('sha', sha); + + # Call the reusable workflow + integration-tests: + needs: parse-trigger + if: needs.parse-trigger.outputs.should_run == 'true' + uses: ./.github/workflows/integration.yml + with: + sha: ${{ needs.parse-trigger.outputs.sha }} + diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml new file mode 100644 index 000000000..3db3be5b9 --- /dev/null +++ b/.github/workflows/integration.yml @@ -0,0 +1,110 @@ +name: Project integration tests + +on: + workflow_call: + inputs: + sha: + description: "git sha to checkout" + required: true + type: string + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +env: + BASED_ENV: dev + PORTAL_TIMEOUT: 180 + STARTUP_DELAY: 15 + GITHUB_REPOSITORY: ${{ github.repository }} + +jobs: + # Build images and publish as artifacts + build: + name: Build images + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ inputs.sha }} + fetch-depth: 0 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Log in to Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - uses: taiki-e/install-action@just + + - name: Build Docker images + run: | + just ci build + + - name: Save Docker images to tarballs + run: | + mkdir -p artifacts + just ci export-images $(realpath ./artifacts) + + - name: Upload images artifacts + uses: actions/upload-artifact@v4 + with: + name: based-op-images + path: artifacts + + # Use published artifacts and run nodes + test: + name: Configure nodes & run tests + runs-on: ubuntu-latest + needs: build + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ inputs.sha }} + fetch-depth: 0 + + - name: Install Just + uses: taiki-e/install-action@just + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Download images artifacts + uses: actions/download-artifact@v4 + with: + name: based-op-images + path: /tmp/artifacts/images + + - name: Load Docker images + run: | + find /tmp/artifacts/images -type f -exec docker load -i {} \; + + - name: Prepare configuration + run: | + cp .env.example .env + just ci prepare + + # - name: Publish configuration artifact + # uses: actions/upload-artifact@v4 + # if: always() + # with: + # name: config + # include-hidden-files: true + # path: | + # .local + # !.local/**/data + + - name: Start services and run tests + run: | + just ci run + sleep {{ env.STARTUP_DELAY }} + just ci test + + - name: Teardown + if: always() + run: | + just ci stop diff --git a/.gitignore b/.gitignore index c3a72d17f..14e15f1b3 100644 --- a/.gitignore +++ b/.gitignore @@ -45,4 +45,8 @@ based/temp_registry.json temp webcontrol data -*.py \ No newline at end of file + +*.py + +/dist +/.local diff --git a/Justfile b/Justfile new file mode 100644 index 000000000..3d7d01aef --- /dev/null +++ b/Justfile @@ -0,0 +1,138 @@ +set dotenv-load + +export LOCAL_DATA := canonicalize(env("LOCAL_DATA", shell('mkdir -p .local && realpath .local'))) + +# dev = use locally built images +# prod = use releases +export BASED_ENV := env("BASED_ENV", "prod") + +self := "just -f " + justfile() +deps := "just -f " + join(justfile_directory(), "deps", "Justfile") + +# Verifies that system dependencies are present +@check: + echo "jq: {{require('jq')}}" + echo "docker: {{require('docker')}}" + echo "cast: {{require('cast')}}" + echo "rustup: {{require('rustup')}}" + echo "python: {{require('python')}}" + +# Prepare the local environment: fetch deps, build them, setup toolchains... +@prepare: + {{deps}} fetch + cd docs && npm i + cd based && rustup toolchain install + +# 🏗️ Build +build: + #!/usr/bin/env bash + set -euo pipefail + + {{deps}} build & + just -f based/docker/Justfile all & + + wait + +# 📚 Build local docs +docs: + just -f docs/Justfile serve + +# Build and link rabby in the configured output folder +rabby out="./dist": + just -f deps/rabby.just build + ln -s deps/rabby/dist {{out}}/rabby + ln -s deps/rabby/dist-mv2 {{out}}/rabby-mv2 + +## Component access (component verb) + +# Run recipes from scripts/spamoor.just +spamoor *args=("start ./spamoor-config.yml"): + just -f scripts/spamoor.just {{args}} + +# Run recipes from scripts/overseer.just +overseer *args=("start"): + just -f based/overseer.just {{args}} + +# Run recipes from based/portal.just +portal *args: + just -f based/portal.just {{args}} + +# Run recipes from based/registry.just +registry *args: + just -f based/registry.just {{args}} + +# Run recipes from based/main-node.just +main-node *args: + just -f based/main-node.just {{args}} + +# Run recipes from based/follower-node.just +follower-node *args: + just -f based/follower-node.just {{args}} + +# Run recipes from based/monitoring.just +monitoring *args: + just -f scripts/monitoring.just {{args}} + +## Action access (verb component) + +# View logs for the given service +logs name: + just -f scripts/logs.just {{name}} + +# Start the given service +start name: + {{self}} {{name}} start + +# Stop the given service +stop name: + {{self}} {{name}} stop + +# Run a test recipe described in scripts/test.just +test name: + just -f scripts/test.just {{name}} + +# Cleanup all the local state of the project +reset: + {{self}} main-node reset + {{self}} follower-node reset + rm -rf $LOCAL_DATA + +# Run a recipe from scripts/ci.just +ci *args: + just -f scripts/ci.just {{args}} + +# TODO: consider some sort of interactive config if needed +quick-start: + {{self}} main-node config-with-deploy + {{self}} main-node start + {{self}} follower-node create-config + {{self}} follower-node start-dev + + echo "Waiting for 10 seconds before starting the overseer" && sleep 10 + {{self}} overseer start + + +# Cleanup all local state +reset-and-start-full-stack-local: + #!/usr/bin/env bash + set -euo pipefail + + export PUBLIC_IP=127.0.0.1 + + echo "Ensuring required environment variables are available..." + echo 'OP_BATCHER_KEY={{env("OP_BATCHER_KEY")}}' + echo 'OP_PROPOSER_KEY={{env("OP_PROPOSER_KEY")}}' + echo 'OP_SEQUENCER_KEY={{env("OP_SEQUENCER_KEY")}}' + + echo "Resetting configuration and deploying new L2 from scratch" + {{self}} reset || true + + {{self}} main-node config-with-deploy + {{self}} main-node start + {{self}} follower-node create-config + {{self}} follower-node start-dev + + echo "Waiting for 15 seconds before triggering peering and starting the overseer" && sleep 15 + + python peering.py + {{self}} start overseer diff --git a/Makefile b/Makefile index cd2ab2dd8..4b972b207 100644 --- a/Makefile +++ b/Makefile @@ -30,9 +30,9 @@ L2_CHAIN_ID?=$(shell \ L2_CHAIN_ID_HEX:=$(shell printf "0x%064x" $(L2_CHAIN_ID)) PORTAL?=http://0.0.0.0:8080 TXPROXY?=http://0.0.0.0:8090 -L1_RPC_URL?=http://3.84.162.42:8545 -L1_BEACON_RPC_URL?=http://3.84.162.42:5051 -PUBLIC_IP?=$(shell curl ifconfig.me) +L1_RPC_URL?=https://ethereum-sepolia-rpc.publicnode.com +L1_BEACON_RPC_URL?=https://ethereum-sepolia-beacon-api.publicnode.com +PUBLIC_IP?=$(shell curl -4 ifconfig.me) # if GATEWAY_SEQUENCING_KEY is set, use that one, otherwise key_to_address will generate a new one GATEWAY_SEQUENCING_KEY ?= $(shell \ [ -f .local_gateway_and_follower/.env ] && \ @@ -365,7 +365,7 @@ start-main-node: create-network $(MAKE) fix-compose; \ echo "Initializing all components of a main sequencing node in ./.local_main_node ..."; \ { \ - echo "DISPUTE_GAME_FACTORY_ADDRESS=$$(docker run -v $$(pwd)/.local_main_node/config:/config -i --rm imega/jq -r '.opChainDeployments[0].disputeGameFactoryProxyAddress' /config/state.json)"; \ + echo "DISPUTE_GAME_FACTORY_ADDRESS=$$(docker run -v $$(pwd)/.local_main_node/config:/config -i --rm imega/jq -r '.opChainDeployments[0].DisputeGameFactoryProxy' /config/state.json)"; \ echo "NETWORK_ID=$$(docker run -v $$(pwd)/.local_main_node/config:/config -i --rm imega/jq -r '.l2_chain_id' /config/rollup.json)"; \ echo "L1_RPC_URL=$(L1_RPC_URL)"; \ echo "L1_BEACON_RPC_URL=$(L1_BEACON_RPC_URL)"; \ diff --git a/README.md b/README.md index a666fc08b..e80bc0655 100644 --- a/README.md +++ b/README.md @@ -7,26 +7,26 @@ The following steps have been tested on Sepolia, with a previously deployed L2 chain 1. locate your `rollup.json`, `genesis.json` and `state.json` files -2. run `make config-main-node OP_NODE_DATA_DIR= OP_GETH_DATA_DIR= ROLLUP_JSON= GENESIS_JSON= STATE_JSON=` -3. there should be some files set up in `.local_main_node` -4. start sequencing the main chain with `make start-main-node OP_BATCHER_KEY= OP_PROPOSER_KEY= MAIN_KEY=` +2. run `just main-node create-config /path/to/folder/with/{rollup.json, genesis.json, state.json}` +3. there should be some files set up in `.local/local_main_node` +4. start sequencing the main chain with `just start main-node`, ensuring `OP_PROPOSER_KEY`, `OP_BATCHER_KEY` and `OP_SEQUENCER_KEY` environment variables are set with the private key of the corresponding entity for the chain 5. Normally you should see some logs starting 6. `blockscout` should be up and running at `http://0.0.0.0:4000` - 7a. `make stop-main-node` to stop all the sequencing services - 7b. `make stop-monitoring` to stop all monitoring services (Grafana, Prometheus, etc) - 7c. `make logs-main-node` to output logs of all the main services + 7a. `just stop main-node` to stop all the sequencing services + 7b. `just stop monitoring` to stop all monitoring services (Grafana, Prometheus, etc) + 7c. `just logs main-node` to output logs of all the main services ### Deploy a new l2 chain on Sepolia 1. To deploy a new chain on l2, make sure to have an address on Sepolia with some funds. This will be used as the `MAIN`/`vault` address. -2. create 2 more accounts, deposit ~0.2 eth in them. One will be used for the `op-batcher` one for the `op-proposer. -3. run `make deploy-chain OP_BATCHER_KEY= OP_PROPOSER_KEY= MAIN_KEY=` -4. start sequencing the main chain with `make start-main-node OP_BATCHER_KEY= OP_PROPOSER_KEY= MAIN_KEY= L1_RPC_URL= L1_BEACON_RPC_URL=` +2. create 2 more accounts, deposit ~0.2 eth in them. One will be used for the `op-batcher` one for the `op-proposer`. +3. run `just main-node config-with-deploy`, ensuring `OP_BATCHER_KEY`, `OP_PROPOSER_KEY` and `OP_SEQUENCER_KEY` environment variables are set with the private key of the corresponding entity for your new chain +4. start sequencing the main chain with `just main-node start`, ensuring the same environment variables as set 5. Normally you should see some logs starting 6. `blockscout` should be up and running at `http://0.0.0.0:4000` - 7a. `make stop-main-node` to stop all the sequencing services - 7b. `make stop-monitoring` to stop all monitoring services (Grafana, Prometheus, etc) - 7c. `make logs-main-node` to output logs of all the main services + 7a. `just stop main-node` to stop all the sequencing services + 7b. `just stop monitoring` to stop all monitoring services (Grafana, Prometheus, etc) + 7c. `just logs main-node` to output logs of all the main services ### Run a Based Gateway @@ -35,7 +35,7 @@ With that default config, a new `private-key` and `wallet` combo will be generat The `wallet` is communicated back to the `Portal` to be gossiped around to the rest of the network for signature verification. The following single command sets up everything and will start your `Gateway`, `Based OP-node`, `Based OP-geth`: -`git clone https://github.com/gattaca-com/based-op && cd based-op && make start-gateway` +`git clone https://github.com/gattaca-com/based-op && cd based-op && just start follower-node` If everything went well, you should see a terminal UI appear, called the `Overseer`: @@ -44,29 +44,29 @@ If everything went well, you should see a terminal UI appear, called the `Overse This shows you the status of your local `Gateway` and a general overview of the chain. You can press left and right keys to cycle between the different tabs and explore all the information! -The configuration that was generated can be found in `based-op/.local_gateway_and_follower`, mainly the `.env` and `compose.yml` files. +The configuration that was generated can be found in `based-op/.local/gateway_and_follower`, mainly the `.env` and `compose.yml` files. When you [spam some transactions with `based-bmf`](https://based-bmf.gattaca.com), you should see them appear in the `Transaction Pool` of your `Gateway`. A couple of commands tend to come in handy (from the top `based-op` directory): -- `make stop-gateway` -- `make start-gateway` -- `make start-overseer` -- `make logs-gateway` -- `make logs-based-op-node` -- `make logs-based-op-geth` +- `just stop follower-node` +- `just start follower-node` +- `just start overseer` +- `just logs follower-node` +- `just logs based-op-node` +- `just logs based-op-geth` ### Add/Update based-gateways to Registry -When a based-gateway is started with `make start-gateway`, it will register itself to the Registry behind the `PORTAL`. For now, the Registry is kept in a simple json file in `.local_main_node/config/registry.json`. You can add/update/remove gateways there, the Registry and Portal will pick up on the changes every minute. +When a based-gateway is started with `just follower-node start`, it will register itself to the Registry behind the `PORTAL`. For now, the Registry is kept in a simple json file in `.local_main_node/config/registry.json`. You can add/update/remove gateways there, the Registry and Portal will pick up on the changes every minute. If you have started both the main sequencing node and the gateway on the same machine, you might need to change the ip to `0.0.0.0`, by default `curl ifconfig.me` is used to populate your url. ### Send your first tx -You can now test sending a transaction with `make test-tx`. +You can now test sending a transaction with `just test tx`. The transaction will be sent to the Portal, and forwarded to the gateway, which will sequence the transaction in a new Frag, and broacast it via p2p to follower nodes. > [!IMPORTANT] @@ -78,10 +78,10 @@ The transaction will be sent to the Portal, and forwarded to the gateway, which Wallets commonly use a high polling interval for the transaction receipt. To be able to see the preconfirmation speed, we modify Rabby to speed up that interval. You can test it compiling it: ```sh -make build-rabby-chrom +just rabby ``` -And importing it to your browser locally (see [Firefox](https://extensionworkshop.com/documentation/develop/temporary-installation-in-firefox/) or [Chrome](https://developer.chrome.com/docs/extensions/get-started/tutorial/hello-world?hl=es-419#load-unpacked) references). The compiled extension directory is `rabby/dist` for Google Chrome, and `rabby/dist-mv2` for Mozilla Firefox. +And importing it to your browser locally (see [Firefox](https://extensionworkshop.com/documentation/develop/temporary-installation-in-firefox/) or [Chrome](https://developer.chrome.com/docs/extensions/get-started/tutorial/hello-world?hl=es-419#load-unpacked) references). The compiled extension directory is `dist/rabby` for Google Chrome, and `dist/rabby-mv2` for Mozilla Firefox. ### Connecting your local wallet to your local follower node diff --git a/based/docker/Justfile b/based/docker/Justfile new file mode 100644 index 000000000..d23b5427a --- /dev/null +++ b/based/docker/Justfile @@ -0,0 +1,39 @@ +set quiet + +suppress_output := env("QUIET", "true") +quiet := if suppress_output == "true" { "--quiet > /dev/null 2>&1 " } else { "" } + +# Inject caching into docker builds +caching := env("DOCKER_CACHING", "") + +# Build the docker image from the corresponding `.Dockerfile` +build image *args: + #!/usr/bin/env bash + set -euo pipefail + + IMAGE_NAME="{{image}}" + + echo "Building {{image}}..." + docker buildx build -t local_based_{{image}} -f ./{{image}}.Dockerfile ../ {{args}} \ + {{caching}} \ + {{quiet}} + +save image dest: + echo "Exporting {{image}}..." + docker save local_based_{{image}} | gzip > {{dest}}/based-{{image}}.tar.gz + +build-portal: (build "portal") +build-registry: (build "registry") +build-gateway: (build "gateway") +build-txproxy: (build "txproxy") +build-metrics-exporter: (build "metrics-exporter" "--load") + +[parallel] +all: build-portal build-registry build-gateway build-txproxy build-metrics-exporter + +[parallel] +export-all dest: (save "portal" dest) (save "registry" dest) (save "gateway" dest) (save "txproxy" dest) (save "metrics-exporter" dest) + +# Creates the docker network `name` if it doesn't already exist +create-network name="based_op_net": + docker network inspect {{name}} > /dev/null 2>&1 || docker network create {{name}} diff --git a/based/gateway.Dockerfile b/based/docker/gateway.Dockerfile similarity index 100% rename from based/gateway.Dockerfile rename to based/docker/gateway.Dockerfile diff --git a/based/key_to_address.Dockerfile b/based/docker/key_to_address.Dockerfile similarity index 100% rename from based/key_to_address.Dockerfile rename to based/docker/key_to_address.Dockerfile diff --git a/based/metrics-exporter.Dockerfile b/based/docker/metrics-exporter.Dockerfile similarity index 67% rename from based/metrics-exporter.Dockerfile rename to based/docker/metrics-exporter.Dockerfile index a00a7808c..01dea91a1 100644 --- a/based/metrics-exporter.Dockerfile +++ b/based/docker/metrics-exporter.Dockerfile @@ -7,15 +7,15 @@ RUN apt-get update && apt-get install -y clang FROM chef AS planner COPY . . -RUN --mount=from=reth,target=/reth cargo chef prepare --recipe-path recipe.json +RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder COPY --from=planner /app/recipe.json recipe.json -RUN --mount=from=reth,target=/reth cargo chef cook --release --recipe-path recipe.json +RUN cargo chef cook --release --recipe-path recipe.json COPY . . -RUN --mount=from=reth,target=/reth cargo build --release --bin bop-metrics-exporter +RUN cargo build --release --bin bop-metrics-exporter FROM debian:stable-slim AS runtime diff --git a/based/portal.Dockerfile b/based/docker/portal.Dockerfile similarity index 100% rename from based/portal.Dockerfile rename to based/docker/portal.Dockerfile diff --git a/based/registry.Dockerfile b/based/docker/registry.Dockerfile similarity index 100% rename from based/registry.Dockerfile rename to based/docker/registry.Dockerfile diff --git a/based/txproxy.Dockerfile b/based/docker/txproxy.Dockerfile similarity index 100% rename from based/txproxy.Dockerfile rename to based/docker/txproxy.Dockerfile diff --git a/based/follower-node.just b/based/follower-node.just new file mode 100644 index 000000000..562724923 --- /dev/null +++ b/based/follower-node.just @@ -0,0 +1,232 @@ +import "./scripts/consts.just" + +mod docker + +# Override where the local data for the gateway will be stored +export DATA_FOLDER := env("FOLLOWER_NODE_DATA_FOLDER", "gateway_and_follower") + +FOLLOWER_NODE_DATA := join(LOCAL_DATA, DATA_FOLDER) + +# Override these to change where geth, the main node and the gateway store their data +export BASED_OP_DATA_DIR := join(FOLLOWER_NODE_DATA, "data") +export BASED_OP_GETH_DATA_DIR := env("BASED_OP_GETH_DATA_DIR", join(BASED_OP_DATA_DIR, "geth")) +export BASED_OP_NODE_DATA_DIR := env("BASED_OP_NODE_DATA_DIR", join(BASED_OP_DATA_DIR, "node")) +export BASED_GATEWAY_DATA_DIR := env("BASED_GATEWAY_DATA_DIR", join(BASED_OP_DATA_DIR, "gateway")) + +# Override follower node source dir +export FOLLOWER_NODE_DIR := join(justfile_directory(), "..", "follower_node") + +# External environment configuration +export PORTAL := shell("echo ${PORTAL:-$(" + portal + " address)}") + +# Force config creation +export FORCE := env("FORCE", "") + +# Use local or release images +export BASED_ENV := env("BASED_ENV", "prod") + +WALLET_NAME := env("FOLLOWER_NODE_WALLET", "gateway") +PORTAL_TIMEOUT := env("PORTAL_TIMEOUT", "30") + +# Invocation utils +self := "just -f " + justfile() +parent := "just -f " + join(justfile_directory(), "..", "Justfile") +portal := "just -f " + join(justfile_directory(), "portal.just") +wallet := "just -f " + join(justfile_directory(), "scripts", "wallet.just") +link := "just -f " + join(justfile_directory(), "scripts", "link.just") + +_generate-dotenv gossip enr enode $dotenv=join(FOLLOWER_NODE_DATA, ".env"): + #!/usr/bin/env bash + set -euo pipefail + + cp $FOLLOWER_NODE_DIR/env_example $dotenv + cp $FOLLOWER_NODE_DIR/compose.{{BASED_ENV}}.yml $(dirname $dotenv)/compose.yml + + l2_chain_id=$({{portal}} l2_chain_id) + + echo "PORTAL=$PORTAL" >> $dotenv + echo "OP_NODE_GOSSIP_IP=$PUBLIC_IP" >> $dotenv + echo "GATEWAY_SEQUENCING_KEY=$GATEWAY_SEQUENCING_KEY" >> $dotenv + echo "MAIN_OP_NODE_GOSSIP_STATIC={{gossip}}" >> $dotenv + echo "MAIN_OP_NODE_ENR={{enr}}" >> $dotenv + echo "MAIN_OP_GETH_ENODE={{enode}}" >> $dotenv + echo "NETWORK_ID=$l2_chain_id" >> $dotenv + +_replace-env-var var new_value target_file: + #!/usr/bin/env bash + set -euo pipefail + + target_file={{canonicalize(target_file)}} + + awk -v new_value="{{new_value}}" ''' + BEGIN { found = 0 } + /^{{var}}=/ { + print "{{var}}=" new_value + found = 1 + next + } + { print } + END { + if (!found) print "{{var}}=" new_value + }''' \ + {{target_file}} > "replacement.tmp" && mv "replacement.tmp" {{target_file}} + +_update-dotenv gossip enr enode $dotenv=join(FOLLOWER_NODE_DATA, ".env"): + #!/usr/bin/env bash + set -euo pipefail + + {{self}} _replace-env-var "MAIN_OP_NODE_GOSSIP_STATIC" "{{gossip}}" "$dotenv" + {{self}} _replace-env-var "MAIN_OP_NODE_ENR" "{{enr}}" "$dotenv" + {{self}} _replace-env-var "MAIN_OP_GETH_ENODE" "{{enode}}" "$dotenv" + +@_dump-chain-config dest=join(FOLLOWER_NODE_DATA, "config"): + mkdir -p {{dest}} + # TODO: check for invalid json files (empty, ...) + {{portal}} rollup > {{dest}}/rollup.json + {{portal}} genesis > {{dest}}/genesis.json + +@_link-data dest=BASED_OP_DATA_DIR: + {{link}} link-dir-if-exists-elsewhere BASED_OP_GETH_DATA_DIR "{{dest}}/geth" + {{link}} link-dir-if-exists-elsewhere BASED_OP_NODE_DATA_DIR "{{dest}}/node" + {{link}} link-dir-if-exists-elsewhere BASED_GATEWAY_DATA_DIR "{{dest}}/gateway" + +_retry cmd timeout=PORTAL_TIMEOUT: + #!/usr/bin/env bash + start_time=$(date +%s) + deadline=$((start_time + {{timeout}})) + + while true; do + # We only print the output of the command that succeeds + if output=$({{cmd}} 2>/dev/null); then + printf '%s\n' "$output" # Print exactly what the command output + break + fi + + now=$(date +%s) + if (( now >= deadline )); then + echo "Deadline reached, exiting." + exit 1 + fi + + sleep 1 + done + +# Create Gateway configuration +# +# Will take care of creating a private key for the gateway and setting up its .env +create-config $dotenv=join(FOLLOWER_NODE_DATA, ".env"): + #!/usr/bin/env bash + set -euo pipefail + + FOLLOWER_NODE_DATA=$(dirname $dotenv) + echo "*** Setting up gateway in $FOLLOWER_NODE_DATA" + + # If FORCE is set, reset the config + if [ -n "$FORCE" ]; then + echo "FORCE set, removing existing config..." + {{self}} stop || true + rm -rf $FOLLOWER_NODE_DATA || true + {{wallet}} remove {{WALLET_NAME}} + fi + + mkdir -p $FOLLOWER_NODE_DATA + mkdir -p $FOLLOWER_NODE_DATA/config + + # Generate JWT if missing + if [ ! -f $FOLLOWER_NODE_DATA/config/jwt ]; then + openssl rand -hex 32 | tr -d '\n' | sed 's/^/0x/' > $FOLLOWER_NODE_DATA/config/jwt + fi + + # Ensure wallet is ready + echo "Retrieving {{WALLET_NAME}} wallet" + export GATEWAY_SEQUENCING_ADDRESS=$({{wallet}} ensure {{WALLET_NAME}}) + export GATEWAY_SEQUENCING_KEY=$({{wallet}} key {{WALLET_NAME}}) + + echo "Retrieving OP Node bootstrap environment" + gossip=$({{self}} _retry "{{portal}} node_gossip_static") + enr=$({{self}} _retry "{{portal}} node_enr") + enode=$({{self}} _retry "{{portal}} geth_enode") + + # Generate .env if missing + if [ ! -f $dotenv ]; then + echo "Initializing gateway and follower op-node in $FOLLOWER_NODE_DATA..." + echo "Gateway Sequencing Private Key: $GATEWAY_SEQUENCING_KEY" + echo "Gateway Sequencing Wallet: $GATEWAY_SEQUENCING_ADDRESS" + + {{self}} _generate-dotenv "$gossip" "$enr" "$enode" "$dotenv" + {{self}} _dump-chain-config "$FOLLOWER_NODE_DATA/config" + {{self}} _link-data "$FOLLOWER_NODE_DATA/data" + else + # Replace values of existing config + {{self}} _update-dotenv "$gossip" "$enr" "$enode" "$dotenv" + fi + + echo "*** Gateway setup completed!" + +_is-configured $cfg=FOLLOWER_NODE_DATA: + #!/usr/bin/env bash + set -euo pipefail + + if [ ! -d $cfg ]; then + echo "❌ Gateway and Follower Node does not seem to have been configured at $cfg. run \`create-config\` to configure" + exit 1 + fi + + echo $cfg + +# Start the gateway service +# +# Will ensure the gateway is configured correctly before starting the service +start-dev $dotenv=join(FOLLOWER_NODE_DATA, ".env") network="based_op_node": (docker::create-network network) (create-config dotenv) + #!/usr/bin/env bash + set -euo pipefail + + FOLLOWER_NODE_DATA=$(dirname $dotenv) + + # Retrieve IP and port from config + ip=$(grep -m2 '^OP_NODE_GOSSIP_IP[[:space:]]*=' $dotenv | tail -n1 | cut -d= -f2) + port=$(grep -m1 '^GATEWAY_PORT[[:space:]]*=' $dotenv | cut -d= -f2) + + address=$({{wallet}} address {{WALLET_NAME}}) + jwt=$(cat $FOLLOWER_NODE_DATA/config/jwt) + + echo "Registering gateway" + {{portal}} register-gateway "http://$ip:$port" "$address" "$jwt" + + # Start services + echo "Starting gateway and monitoring services..." + docker compose -f $FOLLOWER_NODE_DATA/compose.yml up -d + + echo "*** To start the overseer run \`start overseer\`" + +# Start the gateway service as well as the monitoring services +start $dotenv=join(FOLLOWER_NODE_DATA, ".env") network="based_op_node": (start-dev dotenv network) + {{parent}} monitoring start + +stop $dotenv=join(FOLLOWER_NODE_DATA, ".env"): + #!/usr/bin/env bash + set -euo pipefail + + FOLLOWER_NODE_DATA=$(dirname $dotenv) + {{self}} _is-configured $FOLLOWER_NODE_DATA + + docker compose -f $FOLLOWER_NODE_DATA/compose.yml down + +logs-gateway: + docker logs based-gateway --tail 100 -f + +logs $dotenv=join(FOLLOWER_NODE_DATA, ".env"): + docker compose -f $(dirname $dotenv)/compose.yml logs --tail 100 -f + +proxy network="kt-based-op" port="8545": + docker run --rm network {{network}} -p {{port}}:8545 cars10/simprox simprox --skip-ssl-verify=true -l 127.0.0.1:8545 -t op-el-2-op-geth-op-node-op-kurtosis:8545 + +# Remove all config from main-node and cleanup containers +reset $cfg=join(FOLLOWER_NODE_DATA, "config"): + #!/usr/bin/env bash + + {{self}} _is-configured $cfg || exit 0 + + FOLLOWER_NODE_DATA=$(dirname $cfg) + docker compose -f $FOLLOWER_NODE_DATA/compose.yml rm -s -f + rm -rf $FOLLOWER_NODE_DATA diff --git a/based/justfile b/based/justfile index 18934dd30..4107bf7da 100644 --- a/based/justfile +++ b/based/justfile @@ -1,13 +1,14 @@ toolchain := "nightly" -fmt: - rustup toolchain install {{toolchain}} > /dev/null 2>&1 && \ +toolchain: + rustup toolchain install {{toolchain}} > /dev/null 2>&1 + +fmt: toolchain cargo +{{toolchain}} fmt -fmt-check: - rustup toolchain install {{toolchain}} > /dev/null 2>&1 && \ +fmt-check: toolchain cargo +{{toolchain}} fmt --check -clippy: - rustup toolchain install {{toolchain}} > /dev/null 2>&1 && \ +clippy: toolchain cargo +{{toolchain}} clippy --all-features --no-deps -- -D warnings + diff --git a/based/main-node.just b/based/main-node.just new file mode 100644 index 000000000..0915e749b --- /dev/null +++ b/based/main-node.just @@ -0,0 +1,205 @@ +set dotenv-load + +import './scripts/consts.just' + +# Override where the local data for the main node will be stored +export DATA_FOLDER := env("MAIN_NODE_DATA_FOLDER", "local_main_node") + +MAIN_NODE_DATA := join(LOCAL_DATA, DATA_FOLDER) + +# Override these to change where geth and the main node store their data +export BASED_MAIN_NODE_DATA_DIR := join(MAIN_NODE_DATA, "data") +export BASED_OP_GETH_DATA_DIR := env("BASED_OP_GETH_DATA_DIR", join(BASED_MAIN_NODE_DATA_DIR, "geth")) +export BASED_OP_NODE_DATA_DIR := env("BASED_OP_NODE_DATA_DIR", join(BASED_MAIN_NODE_DATA_DIR, "node")) + +# Override main node source dir +export MAIN_NODE_DIR := join(justfile_directory(), "..", "main_node") + +# External environment configuration +export OP_PROPOSER_KEY := shell("echo ${OP_PROPOSER_KEY:-$(" + wallet + " key proposer)} | grep . || exit 1") +export OP_BATCHER_KEY := shell("echo ${OP_BATCHER_KEY:-$(" + wallet + " key batcher)} | grep . || exit 1") +export OP_SEQUENCER_KEY := shell("echo ${OP_SEQUENCER_KEY:-$(" + wallet + " key sequencer)} | grep . || exit 1") + +# Force config creation +export FORCE := env("FORCE", "") +# +# Use local or release images +export BASED_ENV := env("BASED_ENV", "prod") + +# Invocation utils +self := "just -f " + justfile() +link := "just -f " + join(justfile_directory(), "scripts", "link.just") +wallet := "just -f " + join(justfile_directory(), "scripts", "wallet.just") + +@_link-data dest=join(BASED_MAIN_NODE_DATA_DIR, "data"): + {{link}} link-dir-if-exists-elsewhere BASED_OP_GETH_DATA_DIR "{{dest}}/geth" + {{link}} link-dir-if-exists-elsewhere BASED_OP_NODE_DATA_DIR "{{dest}}/node" + +_populate-chain-cfg $src $dest: + #!/usr/bin/env bash + set -euo pipefail + + function nc_copy() { + # Copy without overriding destination + if [ ! -f $dest/$1 ]; then + cp $src/$1 $dest/$1 + fi + } + nc_copy rollup.json + nc_copy state.json + nc_copy genesis.json + +_generate-dotenv $cfg=join(MAIN_NODE_DATA, "config") $dotenv=join(shell("dirname " + cfg), ".env"): + #!/usr/bin/env bash + set -euo pipefail + + MAIN_NODE_DATA=$(dirname $cfg) + + cp $MAIN_NODE_DIR/env_example $dotenv + cp $MAIN_NODE_DIR/compose.{{BASED_ENV}}.yml $MAIN_NODE_DATA/compose.yml + cp $MAIN_NODE_DIR/tx_receivers_example.json $cfg/tx_receivers.json + + # TODO: $(MAKE) fix-compose # Unable to find it + + game_factory=$(jq -r '.opChainDeployments[0].DisputeGameFactoryProxy' $cfg/state.json) + l2_chain_id=$(jq -r '.l2_chain_id' $cfg/rollup.json) + + echo "DISPUTE_GAME_FACTORY_ADDRESS=$game_factory" >> $dotenv + echo "NETWORK_ID=$l2_chain_id" >> $dotenv + echo "L1_RPC_URL=$L1_RPC_URL" >> $dotenv + echo "L1_BEACON_RPC_URL=$L1_BEACON_RPC_URL" >> $dotenv + echo "OP_NODE_SEQUENCER_KEY=$OP_SEQUENCER_KEY" >> $dotenv + echo "OP_NODE_GOSSIP_IP=$PUBLIC_IP" >> $dotenv + echo "OP_BATCHER_PRIVATE_KEY=$OP_BATCHER_KEY" >> $dotenv + echo "OP_PROPOSER_PRIVATE_KEY=$OP_PROPOSER_KEY" >> $dotenv + +# Finalize configuration, assuming that chain config is present at $cfg +_finalize-config $cfg=join(MAIN_NODE_DATA, "config"): + #!/usr/bin/env bash + set -euo pipefail + + MAIN_NODE_DATA=$(dirname $cfg) + + # Generate JWT if missing + if [ ! -f $cfg/jwt ]; then + openssl rand -hex 32 | tr -d '\n' | sed 's/^/0x/' > $cfg/jwt + fi + + # Generate .env if missing + dotenv=$MAIN_NODE_DATA/.env + if [ ! -f $dotenv ]; then + {{self}} _generate-dotenv + fi + + # Initialize registry if missing + registry=$cfg/registry.json + if [ ! -f $registry ]; then + echo "[]" > $registry + fi + + {{self}} _link-data "$MAIN_NODE_DATA/data" + echo "*** Done initializing $MAIN_NODE_DATA" + +# Create a backup of the chain config at `destination` +backup-config $dest $cfg=join(MAIN_NODE_DATA, "config"): + #!/usr/bin/env bash + set -euo pipefail + + cp $cfg/rollup.json $dest + cp $cfg/state.json $dest + cp $cfg/genesis.json $dest + +# Create the Main Node configuration +# +# Arguments: +# * `chain_cfg`: folder containing rollup.json, genesis.json and state.json +create-config chain_cfg $cfg=join(MAIN_NODE_DATA, "config"): + #!/usr/bin/env bash + set -euo pipefail + + MAIN_NODE_DATA=$(dirname $cfg) + echo "*** Setting up main node in $MAIN_NODE_DATA" + + # If FORCE is set, reset the config + if [ -n "$FORCE" ]; then + echo "FORCE set, removing existing config..." + rm -rf $MAIN_NODE_DATA || true + fi + + if [ -d $cfg ]; then + echo "❌ Seems like the main node was already configured (see $cfg)." + exit 1 + fi + + mkdir -p $cfg + + # Copy chain config files + {{self}} _populate-chain-cfg {{chain_cfg}} $cfg + + {{self}} _finalize-config $cfg + +# Deploy a new chain and create the appropriate configuration +config-with-deploy $cfg=join(MAIN_NODE_DATA, "config"): + just -f ./scripts/deploy.just deploy-chain $cfg + {{self}} _finalize-config $cfg + +_is-configured $cfg=join(MAIN_NODE_DATA, "config"): + #!/usr/bin/env bash + set -euo pipefail + + if [ ! -d $cfg ]; then + echo "❌ main-node does not seem to have been configured at $cfg. run \`create-config\` to configure" + exit 1 + fi + + if [ ! -d $(dirname $cfg)/data ]; then + echo "❌ main-node configuration seems to not have been finalized correctly at $cfg. Ensure that the chain state is present under $cfg/config and run `_finalize-config $cfg` to finalize." + exit 1 + fi + + echo $cfg + +# Start the Main Node service +start $cfg=join(MAIN_NODE_DATA, "config"): + #!/usr/bin/env bash + set -euo pipefail + + # Check Node running + if docker ps --format '{{{{.Names}}' | grep -wq op-node ; then + echo "❌ Main node already running." + exit 1 + fi + + {{self}} _is-configured $cfg + + MAIN_NODE_DATA=$(dirname $cfg) + + docker compose -f $MAIN_NODE_DATA/compose.yml up -d + +stop $cfg=join(MAIN_NODE_DATA, "config"): + #!/usr/bin/env bash + set -euo pipefail + + {{self}} _is-configured $cfg + + MAIN_NODE_DATA=$(dirname $cfg) + docker compose -f $MAIN_NODE_DATA/compose.yml down + +logs $cfg=join(MAIN_NODE_DATA, "config"): + #!/usr/bin/env bash + set -euo pipefail + + {{self}} _is-configured $cfg + + MAIN_NODE_DATA=$(dirname $cfg) + docker compose -f $MAIN_NODE_DATA/compose.yml logs --tail 100 -f + +# Remove all config from main-node and cleanup containers +reset $cfg=join(MAIN_NODE_DATA, "config"): + #!/usr/bin/env bash + + {{self}} _is-configured $cfg || exit 0 + + MAIN_NODE_DATA=$(dirname $cfg) + docker compose -f $MAIN_NODE_DATA/compose.yml rm -s -f + rm -rf $MAIN_NODE_DATA diff --git a/based/overseer.just b/based/overseer.just new file mode 100644 index 000000000..bdc297458 --- /dev/null +++ b/based/overseer.just @@ -0,0 +1,9 @@ +import './scripts/consts.just' + +parent := "just -f " + join(justfile_directory(), "..", "Justfile") + +# Start overseer +@start: + docker exec -it based-gateway overseer \ + --portal-url $({{parent}} portal address) \ + --rich-wallet-key {{DUMMY_RICH_WALLET_PRIVATE_KEY}} diff --git a/based/portal.just b/based/portal.just new file mode 100644 index 000000000..f67e1cae3 --- /dev/null +++ b/based/portal.just @@ -0,0 +1,58 @@ +export PORTAL_PORT := env("PORTAL_PORT", "8080") +export PORTAL := env("PORTAL", "http://0.0.0.0:" + PORTAL_PORT) + +# Normally JQ doesn't return error if its return was empty or null. -e fixes that +export JQ_ARGS := env("JQ_ARGS", "-e") + +service-name := "based-portal" + +# Invocation utils +main_node := "just -f " + justfile_directory() + "/main-node.just" +self := "just -f " + justfile() + +@address: + echo $PORTAL + +@_rpc method params="[]": + curl -s -X POST -H "Content-Type: application/json" \ + --data '{"jsonrpc": "2.0", "method": "{{method}}", "params": {{params}}, "id": 1}' \ + {{PORTAL}} | jq -r $JQ_ARGS '.result' + +node_gossip_static: (_rpc "portal_opNodeGossipStatic") +node_enr: (_rpc "portal_opNodeBootnodeEnr") +geth_enode: (_rpc "portal_opGethBootnodeEnode") +l2_chain_id: (_rpc "portal_l2ChainId") + +rollup: (_rpc "portal_fileRollup") +genesis: (_rpc "portal_fileGenesis") + +@register-gateway url address jwt: + # The portal returns null on success so we remove the -e flag + JQ_ARGS="" {{self}} _rpc "registry_registerGateway" \ + "[[\"{{url}}\", \"{{address}}\", \"{{jwt}}\"]]" + +# Start the portal service and view logs (for main sequencing node) +start: + #!/usr/bin/env bash + set -euo pipefail + + cfg=$({{main_node}} _is-configured) + data=$(dirname $cfg) + + # TODO: $(MAKE) fix-compose # Unable to find it + docker compose -f $data/compose.yml up -d {{service-name}} + {{self}} logs + +# View the portal service logs +logs: + docker logs {{service-name}} --tail 100 -f + +# Stop the portal service +stop: + #!/usr/bin/env bash + set -euo pipefail + + data=$(dirname $({{main_node}} _is-configured)) + + docker compose -f $data/compose.yml down {{service-name}} + diff --git a/based/registry.just b/based/registry.just new file mode 100644 index 000000000..e780263f1 --- /dev/null +++ b/based/registry.just @@ -0,0 +1,29 @@ + +# Invocation utils +main_node := "just -f " + justfile_directory() + "/main-node.just" +self := "just -f " + justfile() + +service-name := "based-registry" + +# Start the registry service and view logs (for main sequencing node) +start: + #!/usr/bin/env bash + set -euo pipefail + + data=$(dirname $({{main_node}} _is-configured)) + + docker compose -f $data/compose.yml up -d {{service-name}} + {{self}} logs + +# View the registry service logs +logs: + docker logs {{service-name}} --tail 100 -f + +# Stop the registry service +stop: + #!/usr/bin/env bash + set -euo pipefail + + data=$(dirname $({{main_node}} _is-configured)) + + docker compose -f $data/compose.yml down {{service-name}} diff --git a/based/scripts/consts.just b/based/scripts/consts.just new file mode 100644 index 000000000..a6ca08cef --- /dev/null +++ b/based/scripts/consts.just @@ -0,0 +1,15 @@ +dummy_rich_wallet_default := "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + +export DUMMY_RICH_WALLET_PRIVATE_KEY := env("DUMMY_RICH_WALLET_PRIVATE_KEY", dummy_rich_wallet_default) + +export L1_RPC_URL := env("L1_RPC_URL", "https://ethereum-sepolia-rpc.publicnode.com") +export L1_BEACON_RPC_URL := env("L1_BEACON_RPC_URL", "https://ethereum-sepolia-beacon-api.publicnode.com") + +export BASED_OP_GETH_PORT := env("BASED_OP_GETH_PORT", "8645") +export FOLLOWER_NODE_HOST := env("FOLLOWER_NODE_HOST", "http://localhost") + +export PUBLIC_IP := shell("echo ${PUBLIC_IP:-$(curl -4s ifconfig.me)}") + +default_local_data := join(canonicalize(justfile_directory()), "..", "..", ".local") +prepare_local_data := "mkdir -p " + default_local_data + " | echo " + default_local_data +export LOCAL_DATA := canonicalize(shell("echo ${LOCAL_DATA:-$(" + prepare_local_data + ")}")) diff --git a/based/scripts/deploy.just b/based/scripts/deploy.just new file mode 100644 index 000000000..d5d6e5f3e --- /dev/null +++ b/based/scripts/deploy.just @@ -0,0 +1,123 @@ +set dotenv-load + +import './consts.just' + +export DATA_FOLDER := env("MAIN_NODE_DATA_FOLDER", "local_main_node") + +MAIN_NODE_DATA := join(LOCAL_DATA, DATA_FOLDER) + +# Override main node source dir +export MAIN_NODE_DIR := join(justfile_directory(), "..", "..", "main_node") + +# External environment configuration +export L1_CHAIN_ID := env("L1_CHAIN_ID", "11155111") # Default to Sepolia +export L2_CHAIN_ID := env("L2_CHAIN_ID", shell(gen-l2-chain-id)) + +export OP_PROPOSER_KEY := shell("echo ${OP_PROPOSER_KEY:-$(" + wallet + " key proposer)} | grep . || exit 1") +export OP_BATCHER_KEY := shell("echo ${OP_BATCHER_KEY:-$(" + wallet + " key batcher)} | grep . || exit 1") +export OP_SEQUENCER_KEY := shell("echo ${OP_SEQUENCER_KEY:-$(" + wallet + " key sequencer)} | grep . || exit 1") +# +# Force config creation +export FORCE := env("FORCE", "") + +BATCHER_WALLET_NAME := env("BATCHER_WALLET", "batcher") +PROPOSER_WALLET_NAME := env("PROPOSER_WALLET", "proposer") +SEQUENCER_WALLET_NAME := env("SEQUENCER_WALLET", "sequencer") + +# Override OP deployer image and cache dir +op_deployer_image := env("IMAGE_OP_DEPLOYER", "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-deployer:v0.4.5") +export DEPLOYER_CACHE_DIR:= env("DEPLOYER_CACHE_DIR", "/tmp/op-deployer-cache") + +export L2_CHAIN_ID_HEX := shell('printf "0x%064x" ' + L2_CHAIN_ID) +# NOTE: Having this as s Just recipe would lead to loops +gen-l2-chain-id := " +RAW=$(od -An -N2 -tu2 /dev/urandom | tr -d ' ') +echo $(($RAW % 50000 + 1)) +" + +# Invocation utils +self := "just -f " + justfile() +wallet := "just -f " + join(justfile_directory(), "wallet.just") + +_op-deployer $cfg *runtime-args: + docker run --rm -v $cfg:/config -e DEPLOYER_CACHE_DIR=$DEPLOYER_CACHE_DIR \ + {{op_deployer_image}} {{runtime-args}} + +_fix-permissions $cfg: + docker run --rm -v $cfg:/config --entrypoint sh {{op_deployer_image}} -c \ + "find /config -mindepth 1 -exec chmod 666 {} \; && chmod 755 /config" + +_populate-intent $cfg=join(MAIN_NODE_DATA, "config"): + #!/usr/bin/env bash + set -euo pipefail + + sequencer=$({{wallet}} ensure {{SEQUENCER_WALLET_NAME}} $OP_SEQUENCER_KEY) + proposer=$({{wallet}} ensure {{PROPOSER_WALLET_NAME}} $OP_PROPOSER_KEY) + batcher=$({{wallet}} ensure {{BATCHER_WALLET_NAME}} $OP_SEQUENCER_KEY) + + sed -E \ + -e "s@L1_CHAIN_ID@$L1_CHAIN_ID@g" \ + -e "s@L2_CHAIN_ID@$L2_CHAIN_ID_HEX@g" \ + -e "s@VAULT_WALLET@$sequencer@g" \ + -e "s@OP_BATCHER_WALLET@$batcher@g" \ + -e "s@OP_PROPOSER_WALLET@$proposer@g" \ + {{MAIN_NODE_DIR}}/intent.template.toml \ + > $cfg/intent.toml + +# Create a new chain configuration +create-config $cfg=join(MAIN_NODE_DATA, "config"): + #!/usr/bin/env bash + set -euo pipefail + + MAIN_NODE_DATA=$(dirname $cfg) + + if [ -n "$FORCE" ]; then + echo "FORCE set, removing existing config at $MAIN_NODE_DATA..." + rm -rf $MAIN_NODE_DATA || true + fi + + if [ -d $cfg ]; then + echo "❌ Seems like information of a previous chain is already present. Please remove $cfg to deploy a new one." + exit 1 + fi + + # Ensure directory exists and has write permissions + mkdir -p $cfg + chmod 755 $cfg || true + + {{self}} _op-deployer $cfg op-deployer init --l1-chain-id $L1_CHAIN_ID --l2-chain-ids $L2_CHAIN_ID --workdir /config + {{self}} _fix-permissions $cfg + {{self}} _populate-intent + {{self}} _fix-permissions $cfg + + exit 0 + +@_dump-chain-config $dest=join(MAIN_NODE_DATA, "config"): + mkdir -p {{dest}} + + {{self}} _op-deployer $cfg op-deployer inspect genesis --workdir /config $L2_CHAIN_ID_HEX > $dest/genesis.json + {{self}} _op-deployer $cfg op-deployer inspect rollup --workdir /config $L2_CHAIN_ID_HEX > $dest/rollup.json + +# Setup and deploy a new chain on the configured L1 +deploy-chain $cfg=join(MAIN_NODE_DATA, "config"): + #!/usr/bin/env bash + set -euo pipefail + + echo "*** Setting up new Chain with id: $L2_CHAIN_ID" + {{self}} create-config $cfg + + echo "*** Deploying new chain..." + sequencer=$({{wallet}} key {{SEQUENCER_WALLET_NAME}}) + + {{self}} _op-deployer $cfg op-deployer apply --workdir /config --l1-rpc-url $L1_RPC_URL --private-key $sequencer + + echo "*** Retrieving newly created chain data..." + {{self}} _dump-chain-config $cfg + + # World accessible permissions + {{self}} _fix-permissions $cfg + + echo "*** Deployment complete! See chain config in $cfg" + + exit 0 + diff --git a/based/scripts/link.just b/based/scripts/link.just new file mode 100644 index 000000000..e4b6daf33 --- /dev/null +++ b/based/scripts/link.just @@ -0,0 +1,22 @@ +set quiet + +export FORCE := env("FORCE", "") + +# Links `src` to `dest`, if `src` is not `dest` and `src` exists, otherwise, created `src` +# +# `src` is the result of evaluating `srcEnv` environment. +link-dir-if-exists-elsewhere srcEnv $dest: + #!/usr/bin/env bash + set -euo pipefail + + src=${{srcEnv}} + + if [ -n "$FORCE" ]; then + rm -rf $dest || true + fi + + if [[ "$src" != "$dest" && -d "$src" && ! -d "$dest" ]]; then + ln -s $src $dest + else + mkdir -p $dest + fi diff --git a/based/scripts/wallet.just b/based/scripts/wallet.just new file mode 100644 index 000000000..869271390 --- /dev/null +++ b/based/scripts/wallet.just @@ -0,0 +1,78 @@ +# Override where the wallet data will be stored +# +# The resulting path for a given wallet will be $LOCAL_DATA/$WALLET_DATA_FOLDER/{{name}} +export LOCAL_DATA := env("LOCAL_DATA") +export DATA_FOLDER := join(LOCAL_DATA, env("WALLET_DATA_FOLDER", ".wallet")) + +export FORCE := env("FORCE", "") + +tool_image := env("WALLET_TOOL_IMAGE", "ghcr.io/gattaca-com/based-op/key-to-address:latest") + +self := "just -f " + justfile() + +[no-exit-message] +needs_create name: + #!/usr/bin/env bash + set -euo pipefail + + if [[ -e "{{join(DATA_FOLDER, name)}}" && -z "$FORCE" ]]; then + echo "Wallet {{name}} already exists, set \$FORCE to override" >&2 + exit 1 + fi + + exit 0 + +# Creates the wallet `name` at `$DATA_FOLDER/{{name}}` +# +# Will create 2 files, key and address, holding the private key and the address of the wallet +# +# Outputs the address of the generated wallet +create name override="": + #!/usr/bin/env bash + set -euo pipefail + + wallet=$(docker run --rm -i {{tool_image}} {{override}}) + key=$(echo "$wallet" | head -n1) + address=$(echo "$wallet" | head -n2 | tail -n1) + + mkdir -p {{join(DATA_FOLDER, name)}} + echo "$key" > {{join(DATA_FOLDER, name, "key")}} + echo "$address" > {{join(DATA_FOLDER, name, "address")}} + echo "$address" + +# Remove the wallet `name` +[no-exit-message] +@remove name: + rm -rf {{join(DATA_FOLDER, "name")}} + +# Ensures the wallet `name` exists, and returns the corresponding address +ensure name override="": + #!/usr/bin/env bash + set -euo pipefail + + mkdir -p $DATA_FOLDER + + {{self}} needs_create {{name}} || { + {{self}} address {{name}} + # No need to create the wallet + exit 0 + } + + # Cleanup existing copy (only possible if FORCE is set) + {{self}} remove {{name}} || true + + # Create the wallet + {{self}} create {{name}} {{override}} + +# Returns the key of the wallet `name` +@key name: + cat {{join(DATA_FOLDER, name, "key")}} + +# Returns the address of the wallet `name` +@address name: + cat {{join(DATA_FOLDER, name, "address")}} + +# Backup the named wallet at `dest` +backup name dest: + mkdir -p {{join(invocation_directory(), dest, name)}} + cp -r {{join(DATA_FOLDER, name)}}/* {{join(invocation_directory(), dest, name)}} diff --git a/deps/.gitignore b/deps/.gitignore new file mode 100644 index 000000000..1c93dbb80 --- /dev/null +++ b/deps/.gitignore @@ -0,0 +1,7 @@ +.DS_Store +.idea +*.log +tmp/ + +/geth +/optimism diff --git a/deps/Justfile b/deps/Justfile new file mode 100644 index 000000000..ad09129d9 --- /dev/null +++ b/deps/Justfile @@ -0,0 +1,20 @@ +set dotenv-load +set quiet + +geth := "just -f " + join(justfile_directory(), "geth.just") +optimism := "just -f " + join(justfile_directory(), "optimism.just") + +_fetch dep: + just -f {{join(justfile_directory(), dep + ".just")}} fetch + +[parallel] +fetch: (_fetch "geth") (_fetch "optimism") + +_build dep: (_fetch dep) + just -f {{join(justfile_directory(), dep + ".just")}} build + +[parallel] +build: (_build "geth") (_build "optimism") + +hash dep: + git -C {{join(justfile_directory(), dep)}} rev-parse --verify --short HEAD diff --git a/deps/geth.just b/deps/geth.just new file mode 100644 index 000000000..0a42588b2 --- /dev/null +++ b/deps/geth.just @@ -0,0 +1,22 @@ +repo_default := "https://github.com/gattaca-com/based-op-geth" +ref_default := "based/develop" + +repo := env("BASED_OP_GETH_REPO", repo_default) +ref := env("BASED_OP_GETH_REF", ref_default) + +suppress_output := env("QUIET", "true") +quiet := if suppress_output == "true" { "--quiet > /dev/null 2>&1 " } else { "" } + +set quiet + +@fetch repo=repo ref=ref: + REPO={{repo}} REF={{ref}} just -f source.just ensure geth + +@link source: + LOCAL={{source}} just -f source.just ensure geth + +# 🏗️ Build OP geth from based-op-geth +[working-directory: 'geth'] +build: + echo "Building based-op-geth..." + docker build -t local_based_op_geth . {{quiet}} diff --git a/deps/optimism.just b/deps/optimism.just new file mode 100644 index 000000000..9774ca09b --- /dev/null +++ b/deps/optimism.just @@ -0,0 +1,35 @@ +repo_default := "https://github.com/gattaca-com/based-optimism" +ref_default := "based/develop" + +repo := env("BASED_OP_GETH_REPO", repo_default) +ref := env("BASED_OP_GETH_REF", ref_default) + +suppress_output := env("QUIET", "true") +quiet := if suppress_output == "true" { "> /dev/null 2>&1" } else { "" } + +export IMAGE_TAGS := "develop" +set quiet + +@fetch repo=repo ref=ref: + REPO={{repo}} REF={{ref}} just -f source.just ensure optimism + +@link source: + LOCAL={{source}} just -f source.just ensure optimism + +# 🏗️ Build OP node from based-optimism +[working-directory: 'optimism'] +build-op-node: + echo "Building based-op-node..." + docker buildx bake -f docker-bake.hcl \ + --set op-node.tags=local_based_op_node \ + --load op-node {{quiet}} + +# 🏗️ Build OP deployer from based-optimism +[working-directory: 'optimism'] +build-op-deployer: + echo "Building based-op-deployer..." + docker buildx bake -f docker-bake.hcl \ + --set op-deployer.tags=local_based_op_deployer \ + --load op-deployer {{quiet}} + +build: build-op-node build-op-deployer diff --git a/deps/rabby.just b/deps/rabby.just new file mode 100644 index 000000000..b3847f7f0 --- /dev/null +++ b/deps/rabby.just @@ -0,0 +1,5 @@ +# 🏗️ Build modified Rabby wallet for Google Chrome and Firefox +build-rabby-chrome: + cd rabby && yarn && \ + yarn build:pro && \ + yarn build:pro:mv2 diff --git a/deps/source.just b/deps/source.just new file mode 100644 index 000000000..db791d8ad --- /dev/null +++ b/deps/source.just @@ -0,0 +1,61 @@ +set quiet := true + +# Set these to control where deps are fetched from. +repo := env("REPO") +ref := env("REF", "main") + +# Set these to instead link the dependency +local := env("LOCAL", "") +uses_local := if trim(local) != "" { "true" } else { "" } + +force := env("FORCE", "") + +# Fetch the given dependency from the configured remote +fetch $folder: + #!/usr/bin/env bash + set -euo pipefail + + echo "Fetching $folder from {{repo}} @{{ref}}" + git clone --branch "{{ref}}" --depth 1 "{{repo}}" "$folder" + +# Determine if the dependency needs sourcing +# +# If the folder is not present, or if force is set, the dependency will be sourced +[no-exit-message] +needs_sourcing $folder: + #!/usr/bin/env bash + set -euo pipefail + + if [[ -e "./$folder" && -z "{{force}}" ]]; then + echo "$folder already exists, set \$FORCE to override" >&2 + exit 1 + fi + + exit 0 + +# Fetch or link the configured dependency in `folder` +ensure $folder: + #!/usr/bin/env bash + set -euo pipefail + + echo "Checking $folder..." + + just -f {{justfile()}} needs_sourcing $folder || { + # No need to source the dependency + exit 0 + } + + # Cleanup existing copy (only possible if FORCE is set) + rm -rf $folder || true + + # Link local copy, if configured + if [ -n "{{uses_local}}" ]; then + local_path="{{local}}" + echo "Linking local path $local_path -> $folder" + ln -s "$local_path" "./$folder" + exit 0 + fi + + # Fetch from source + echo "Fetching $folder from source" + just -f {{justfile()}} fetch {{folder}} diff --git a/docs/Justfile b/docs/Justfile new file mode 100644 index 000000000..dc24a74b0 --- /dev/null +++ b/docs/Justfile @@ -0,0 +1,15 @@ +set quiet + +suppress_output := env("QUIET", "true") +quiet := if suppress_output == "true" { "> /dev/null 2>&1 " } else { "" } + +prepare: + npm i {{quiet}} + +build: prepare + echo "Building docs..." + npm run build {{quiet}} + +serve: build + echo "Serving docs:" + npm run start diff --git a/follower_node/compose.dev.yml b/follower_node/compose.dev.yml new file mode 100644 index 000000000..069b4a293 --- /dev/null +++ b/follower_node/compose.dev.yml @@ -0,0 +1,118 @@ +services: + based-op-geth: + image: local_based_op_geth:latest + container_name: based-op-geth + entrypoint: ["/bin/sh", "-c"] + command: + - | + # 1) if this directory lacks the geth chaindata, initialize it + if [ ! -d /data/geth/geth/chaindata ]; then + echo ">>> initializing geth datadir…" + geth init \ + --state.scheme=hash \ + --datadir=/data/geth \ + /config/genesis.json + fi + # 2) then exec the real geth with all your flags + exec geth \ + --networkid=$NETWORK_ID \ + --datadir=/data/geth \ + --gcmode=archive \ + --state.scheme=hash \ + --http \ + --http.addr=0.0.0.0 \ + --http.port=$OP_GETH_RPC_PORT \ + --http.vhosts="*" \ + --http.corsdomain="*" \ + --http.api="admin,engine,net,eth,web3,debug,miner,txpool" \ + --ws \ + --ws.addr=0.0.0.0 \ + --ws.port=$OP_GETH_WS_PORT \ + --ws.api="admin,engine,net,eth,web3,debug,miner,txpool" \ + --ws.origins="*" \ + --authrpc.port=$OP_GETH_ENGINE_RPC_PORT \ + --authrpc.addr=0.0.0.0 \ + --authrpc.vhosts="*" \ + --authrpc.jwtsecret=/config/jwt \ + --syncmode=full \ + --rpc.allow-unprotected-txs \ + --allow-insecure-unlock \ + --discovery.port=$OP_GETH_GOSSIP_PORT \ + --port=$OP_GETH_GOSSIP_PORT \ + --bootnodes=$MAIN_OP_GETH_ENODE \ + --rollup.sequencerhttp=$TXPROXY \ + --metrics=true \ + --metrics.addr=0.0.0.0 \ + --metrics.port=8010 + labels: + - logs=enabled + - service=based-op-geth + volumes: + - ./data/geth:/data/geth + - ./config:/config + restart: unless-stopped + network_mode: "host" + + based-op-node: + image: local_based_op_node:latest + container_name: based-op-node + network_mode: "host" + command: + - op-node + - --l2=http://0.0.0.0:$OP_GETH_ENGINE_RPC_PORT + - --l2.jwt-secret=/config/jwt + - --verifier.l1-confs=1 + - --rollup.config=/config/rollup.json + - --rpc.addr=0.0.0.0 + - --rpc.port=8547 + - --rpc.enable-admin + - --l1=$L1_RPC_URL + - --l1.rpckind=standard + - --l1.beacon=$L1_BEACON_RPC_URL + - --p2p.advertise.ip=$OP_NODE_GOSSIP_IP + - --p2p.advertise.tcp=$OP_NODE_GOSSIP_PORT + - --p2p.advertise.udp=$OP_NODE_GOSSIP_PORT + - --p2p.listen.ip=0.0.0.0 + - --p2p.listen.tcp=$OP_NODE_GOSSIP_PORT + - --p2p.listen.udp=$OP_NODE_GOSSIP_PORT + - --safedb.path=/data/op-node/op-node-beacon-data + - --p2p.static=$MAIN_OP_NODE_GOSSIP_STATIC + - --rpc.enable-based + - --registry=$PORTAL + - --p2p.bootnodes=$MAIN_OP_NODE_ENR + - --syncmode=execution-layer + - --metrics.enabled=true + - --metrics.addr=0.0.0.0 + - --metrics.port=8011 + volumes: + - ./data/node:/data/op-node + - ./config:/config + depends_on: + - based-op-geth + restart: unless-stopped + labels: + - logs=enabled + - service=based-op-node + + based-gateway: + image: local_based_gateway:latest + container_name: based-gateway + command: + - --rpc.port=$GATEWAY_PORT + - --rpc.port_no_auth=$GATEWAY_PORT_NO_AUTH + - --rpc.host=0.0.0.0 + - --chain=/config/genesis.json + - --db.datadir=/data/gateway + - --eth_client.url=$PORTAL + - --rpc.jwt=/config/jwt + - --gossip.signer_private_key=$GATEWAY_SEQUENCING_KEY + - --gossip.root_peer_url=http://0.0.0.0:8547 + - --log.dir=/var/log/app + volumes: + - ./config:/config + - ./data/gateway:/data/gateway + - /tmp:/tmp + - /dev/shm:/dev/shm + - /var/log/containers/based-op/based-gateway:/var/log/app + network_mode: "host" + restart: unless-stopped diff --git a/follower_node/compose.yml b/follower_node/compose.prod.yml similarity index 98% rename from follower_node/compose.yml rename to follower_node/compose.prod.yml index 534c6aa50..f3505a7a2 100644 --- a/follower_node/compose.yml +++ b/follower_node/compose.prod.yml @@ -102,6 +102,7 @@ services: container_name: based-gateway command: - --rpc.port=$GATEWAY_PORT + - --rpc.port_no_auth=$GATEWAY_PORT_NO_AUTH - --rpc.host=0.0.0.0 - --chain=/config/genesis.json - --db.datadir=/data/gateway diff --git a/follower_node/env_example b/follower_node/env_example index 3b9bf6993..b9830b619 100644 --- a/follower_node/env_example +++ b/follower_node/env_example @@ -7,4 +7,5 @@ OP_GETH_ENGINE_RPC_PORT=8651 OP_GETH_RPC_PORT=8645 OP_GETH_WS_PORT=8646 GATEWAY_PORT=9997 +GATEWAY_PORT_NO_AUTH=9994 TXPROXY=http://localhost:8090 diff --git a/main_node/compose.dev.yml b/main_node/compose.dev.yml new file mode 100644 index 000000000..c0fb69193 --- /dev/null +++ b/main_node/compose.dev.yml @@ -0,0 +1,259 @@ +services: + op-geth: + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101603.5 + container_name: op-geth + network_mode: "host" + entrypoint: ["/bin/sh", "-c"] + command: + - | + # 1) if this directory lacks the geth chaindata, initialize it + if [ ! -d /data/geth/geth/chaindata ]; then + echo ">>> initializing geth datadir…" + geth init \ + --state.scheme=hash \ + --datadir=/data/geth \ + /config/genesis.json + fi + # 2) then exec the real geth with all your flags + exec geth \ + --networkid=$NETWORK_ID \ + --datadir=/data/geth \ + --gcmode=archive \ + --state.scheme=hash \ + --http \ + --http.addr=0.0.0.0 \ + --http.vhosts="*" \ + --http.corsdomain="*" \ + --http.api="admin,engine,net,eth,web3,debug,miner,txpool" \ + --ws \ + --ws.addr=0.0.0.0 \ + --ws.port=$OP_GETH_WS_PORT \ + --ws.api="admin,engine,net,eth,web3,debug,miner,txpool" \ + --ws.origins="*" \ + --authrpc.port=$OP_GETH_ENGINE_RPC_PORT \ + --authrpc.addr=0.0.0.0 \ + --authrpc.vhosts="*" \ + --authrpc.jwtsecret=/config/jwt \ + --syncmode=full \ + --rpc.allow-unprotected-txs \ + --discovery.port=$OP_GETH_GOSSIP_PORT \ + --port=$OP_GETH_GOSSIP_PORT + volumes: + - ./data/geth:/data/geth + - ./config:/config + restart: unless-stopped + labels: + - logs=enabled + - service=op-geth + + op-node: + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:v1.16.2 + container_name: op-node + entrypoint: + - sh + - -c + - | + sleep "${OP_NODE_DELAY:-5}" && + exec op-node "$@" + network_mode: "host" + # Override inter-container URLs for bridge networking + command: + - op-node + - --l2=http://0.0.0.0:${PORTAL_PORT} + - --l2.jwt-secret=/config/jwt + - --verifier.l1-confs=1 + - --rollup.config=/config/rollup.json + - --rpc.addr=0.0.0.0 + - --rpc.port=${OP_NODE_RPC_PORT} + - --rpc.enable-admin + - --l1=${L1_RPC_URL} + - --l1.rpckind=standard + - --l1.beacon=${L1_BEACON_RPC_URL} + - --p2p.advertise.ip=${OP_NODE_GOSSIP_IP} + - --p2p.advertise.tcp=${OP_NODE_GOSSIP_PORT} + - --p2p.advertise.udp=${OP_NODE_GOSSIP_PORT} + - --p2p.listen.ip=0.0.0.0 + - --p2p.listen.tcp=${OP_NODE_GOSSIP_PORT} + - --p2p.listen.udp=${OP_NODE_GOSSIP_PORT} + - --p2p.sequencer.key=${OP_NODE_SEQUENCER_KEY} + - --safedb.path=/data/op-node/op-node-beacon-data + - --sequencer.enabled + volumes: + - ./data/node:/data/op-node + - ./config:/config + depends_on: + - op-geth + - based-portal + restart: unless-stopped + labels: + - logs=enabled + - service=op-node + + op-batcher: + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-batcher:v1.13.2 + container_name: op-batcher + network_mode: "host" + + # Override inter-container URLs for bridge networking + command: + - op-batcher + - --l2-eth-rpc=http://0.0.0.0:${PORTAL_PORT} + - --rollup-rpc=http://0.0.0.0:${OP_NODE_RPC_PORT} + - --sub-safety-margin=6 + - --num-confirmations=1 + - --safe-abort-nonce-too-low-count=3 + - --resubmission-timeout=30s + - --rpc.addr=0.0.0.0 + - --rpc.port=8548 + - --rpc.enable-admin + - --max-channel-duration=50 + - --l1-eth-rpc=${L1_RPC_URL} + - --private-key=${OP_BATCHER_PRIVATE_KEY} + - --max-blocks-per-span-batch=10 + - --data-availability-type=blobs + - --metrics.enabled=true + - --metrics.addr=0.0.0.0 + - --metrics.port=8008 + depends_on: + - op-node + restart: unless-stopped + labels: + - logs=enabled + - service=op-batcher + + op-proposer: + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-proposer:v1.10.0 + container_name: op-proposer + network_mode: "host" + command: + - op-proposer + - --poll-interval=12s + - --rpc.port=8560 + - --rollup-rpc=http://0.0.0.0:${OP_NODE_RPC_PORT} + - --game-factory-address=${DISPUTE_GAME_FACTORY_ADDRESS} + - --game-type=${DISPUTE_GAME_TYPE} + - --private-key=${OP_PROPOSER_PRIVATE_KEY} + - --l1-eth-rpc=${L1_RPC_URL} + - --proposal-interval=2s + - --metrics.enabled=true + - --metrics.addr=0.0.0.0 + - --metrics.port=8009 + depends_on: + - op-node + restart: unless-stopped + labels: + - logs=enabled + - service=op-proposer + + based-portal: + image: local_based_portal:latest + container_name: based-portal + network_mode: "host" + # Override inter-container URLs for bridge networking + command: + - --fallback.eth_url=http://0.0.0.0:${OP_GETH_RPC_PORT} + - --fallback.engine_url=http://0.0.0.0:${OP_GETH_ENGINE_RPC_PORT} + - --op_node.url=http://0.0.0.0:${OP_NODE_RPC_PORT} + - --registry.url=http://0.0.0.0:${REGISTRY_PORT} + - --gateway.timeout_ms=200 + - --log.dir=/var/log/app + - --port=${PORTAL_PORT} + volumes: + - ./config:/config + - /tmp:/tmp + - /dev/shm:/dev/shm + - /var/log/containers/based-op/based-portal:/var/log/app + restart: unless-stopped + + based-registry: + image: local_based_registry:latest + container_name: based-registry + network_mode: "host" + # Override inter-container URLs for bridge networking + command: + - --registry.path=/config/registry.json + - --eth_rpc_url=http://0.0.0.0:${OP_GETH_RPC_PORT} + - --port=${REGISTRY_PORT} + - --log.dir=/var/log/app + volumes: + - ./config:/config + - /tmp:/tmp + - /dev/shm:/dev/shm + - /var/log/containers/based-op/based-registry:/var/log/app + restart: unless-stopped + + restarter: + image: docker:cli + volumes: ["/var/run/docker.sock:/var/run/docker.sock"] + command: + [ + "/bin/sh", + "-c", + "while true; do sleep 3600; docker restart op-batcher; done", + ] + restart: unless-stopped + network_mode: "host" + + based-txproxy: + image: local_based_txproxy:latest + container_name: based-txproxy + network_mode: "host" + command: + - --tx_receivers.path=/config/tx_receivers.json + - --port=${TXPROXY_PORT} + - --log.dir=/var/log/app + volumes: + - ./config:/config + - /tmp:/tmp + - /dev/shm:/dev/shm + - /var/log/containers/based-op/based-txproxy:/var/log/app + restart: unless-stopped +# ################################################################################ +# # Postgres for Blockscout +# ################################################################################ +# blockscout-postgres: +# image: postgres:14-alpine +# container_name: blockscout-postgres +# restart: unless-stopped +# environment: +# # these credentials will be baked into the Blockscout DATABASE_URL below +# POSTGRES_USER: blockscout +# POSTGRES_PASSWORD: blockscout +# POSTGRES_DB: blockscout +# volumes: +# - ./data/blockscout-postgres:/var/lib/postgresql/data +# shm_size: '50gb' +# network_mode: "host" + +# ################################################################################ +# # Blockscout explorer +# ################################################################################ +# blockscout: +# image: blockscout/blockscout:6.8.0 +# container_name: blockscout +# command: +# - /bin/sh +# - -c +# - bin/blockscout eval "Elixir.Explorer.ReleaseTasks.create_and_migrate()" && bin/blockscout start + +# network_mode: "host" +# depends_on: +# - op-node # need the L2 node up to serve RPC +# - blockscout-postgres # need the DB up first +# environment: +# DATABASE_URL: "postgresql://blockscout:blockscout@blockscout-postgres:5432/blockscout" +# # point to your L2 JSON-RPC: +# ETHEREUM_JSONRPC_HTTP_URL: "http://0.0.0.0:${OP_GETH_RPC_PORT}" +# ETHEREUM_JSONRPC_WS_URL: "ws://0.0.0.0:${OP_GETH_WS_PORT}" +# ETHEREUM_JSONRPC_TRACE_URL: "http://0.0.0.0:${OP_GETH_RPC_PORT}" +# ETHEREUM_JSONRPC_VARIANT: "geth" +# SECRET_KEY_BASE: "56NtB48ear7+wMSf0IQuWDAAazhpb31qyc7GiyspBP2vh7t5zlCsF5QDv76chXeN" +# # A “network name” for Blockscout’s UI +# NETWORK: "local" +# API_V2_ENABLED: "true" +# # disable Subgraph support if you don’t have one +# ECTO_USE_SSL: "false" +# SSG_ENABLED: "false" +# PORT: "${BLOCKSCOUT_PORT}" +# # no ports: host-mode means Blockscout binds to 4000 on the host automatically + diff --git a/main_node/compose.yml b/main_node/compose.prod.yml similarity index 98% rename from main_node/compose.yml rename to main_node/compose.prod.yml index 560890c8a..3f83181e2 100644 --- a/main_node/compose.yml +++ b/main_node/compose.prod.yml @@ -1,6 +1,6 @@ services: op-geth: - image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101503.4-rc.1 + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101603.5 container_name: op-geth network_mode: "host" entrypoint: ["/bin/sh", "-c"] @@ -47,13 +47,13 @@ services: - service=op-geth op-node: - image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:v1.13.2 + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:v1.16.2 container_name: op-node entrypoint: - sh - -c - | - sleep "${OP_NODE_DELAY:-10}" && + sleep "${OP_NODE_DELAY:-5}" && exec op-node "$@" network_mode: "host" # Override inter-container URLs for bridge networking @@ -158,6 +158,7 @@ services: - --registry.url=http://0.0.0.0:${REGISTRY_PORT} - --gateway.timeout_ms=200 - --log.dir=/var/log/app + - --port=${PORTAL_PORT} volumes: - ./config:/config - /tmp:/tmp diff --git a/main_node/tx_receivers_example.json b/main_node/tx_receivers_example.json index 8746c27d5..4a9395afe 100644 --- a/main_node/tx_receivers_example.json +++ b/main_node/tx_receivers_example.json @@ -1,4 +1,4 @@ [ "http://localhost:8545/", - "http://localhost:9998/" + "http://localhost:9994/" ] diff --git a/peering.py b/peering.py index 0bb741d93..667755365 100644 --- a/peering.py +++ b/peering.py @@ -1,5 +1,6 @@ import requests from itertools import combinations +import re class NodePeering: def __init__(self, node_urls, geth_urls): @@ -37,19 +38,22 @@ def p2p_setup(self): for url in self.node_urls: self_info = self.node_opp2p_self(url) node_addresses = self_info.get('addresses', []) - multi_addresses[url] = node_addresses + node_local = [re.sub(r'\/ip4\/[0-9\.]*\/', '/ip4/127.0.0.1/', addr) for addr in node_addresses] + multi_addresses[url] = node_local print(f"Node at {url} has addresses: {node_addresses}") for (url1, addrs1), (url2, addrs2) in combinations(multi_addresses.items(), 2): if addrs1: - print(f"Connecting Node at {url2} to Node at {url1} ({addrs1[0]})") - self.node_opp2p_connect_peer(url2, addrs1[0]) + for addr in addrs1: + print(f"Connecting Node at {url2} to Node at {url1} ({addr})") + self.node_opp2p_connect_peer(url2, addr) else: print(f"Could not get multiaddress for Node at {url1}.") if addrs2: - print(f"Connecting Node at {url1} to Node at {url2} ({addrs2[0]})") - self.node_opp2p_connect_peer(url1, addrs2[0]) + for addr in addrs2: + print(f"Connecting Node at {url1} to Node at {url2} ({addr})") + self.node_opp2p_connect_peer(url1, addr) else: print(f"Could not get multiaddress for Node at {url2}.") @@ -58,8 +62,9 @@ def p2p_setup(self): for url in self.geth_urls: node_info = self.geth_node_info(url) enode = node_info.get('enode') - enodes[url] = enode - print(f"Geth Node at {url} has enode: {enode}") + enode_local = re.sub(r'@.+:', '@127.0.0.1:', enode) + enodes[url] = enode_local + print(f"Geth Node at {url} has enode: {enode_local}") for (url1, enode1), (url2, enode2) in combinations(enodes.items(), 2): if enode1: @@ -88,4 +93,4 @@ def p2p_setup(self): ] p2p = NodePeering(node_urls, geth_urls) - p2p.p2p_setup() \ No newline at end of file + p2p.p2p_setup() diff --git a/scripts/ci.just b/scripts/ci.just new file mode 100644 index 000000000..2dfead590 --- /dev/null +++ b/scripts/ci.just @@ -0,0 +1,75 @@ +root := join(justfile_directory(), "..") +assets := canonicalize(join(root, ".github", "assets")) + +parent := "just -f " + join(root, "Justfile") +based := "just -f " + join(root, "based", "docker", "Justfile") +deps := "just -f " + join(root, "deps", "Justfile") + +export DOCKER_CACHING := ''' +--cache-from type=registry,ref=ghcr.io/$GITHUB_REPOSITORY/$IMAGE_NAME:cache \ +--cache-to type=registry,ref=ghcr.io/$GITHUB_REPOSITORY/$IMAGE_NAME:cache,mode=max''' + +@build: + {{parent}} build + +# Upgrade the stored chain config with the one currently in use +_upgrade: + #!/usr/bin/env bash + set -euo pipefail + + {{parent}} main-node _is-configured || { + echo "*** No active chain configuration!" + exit 1 + } + + {{parent}} main-node backup-config {{assets}} + +# Prepare chain config +# +# Will deploy a new chain each time, using a known wallet +prepare: + #!/usr/bin/env bash + set -euo pipefail + + # Ensure LOCAL_DATA folder is available + mkdir -p $LOCAL_DATA + + # Populate wallets from backup + cp -r --update=none {{join(assets, "wallets")}} {{join("$LOCAL_DATA", ".wallet")}} || true + + {{parent}} main-node _is-configured || { + echo "*** Using chain config at {{assets}} to configure Main Node..." + {{parent}} main-node config-with-deploy + } + + {{parent}} follower-node _is-configured || { + echo "*** Starting Main Node to configure Follower Node..." + {{parent}} start main-node + {{parent}} follower-node create-config + {{parent}} stop main-node + } + + echo "*** Main Node and Follower Node configured! Use \`run\` to start both" + +# Run the configured chain +@run: (prepare) + {{parent}} main-node start + {{parent}} follower-node start-dev + +# Run tests on the configured chain +test: + {{parent}} test tx + +# Stop the configured chain +@stop: + {{parent}} main-node stop + {{parent}} follower-node stop + +# Save the built images of the used services +export-images dest: + docker save local_based_op_node | gzip > {{dest}}/based-op-node.tar.gz + docker save local_based_op_deployer | gzip > {{dest}}/based-op-deployer.tar.gz + docker save local_based_op_geth | gzip > {{dest}}/based-op-geth.tar.gz + {{based}} export-all {{dest}} + + diff --git a/scripts/logs.just b/scripts/logs.just new file mode 100644 index 000000000..2b2b76157 --- /dev/null +++ b/scripts/logs.just @@ -0,0 +1,42 @@ +set quiet + +# Invocation utils +self := "just -f " + justfile() +parent := "just -f " + join(justfile_directory(), "..", "Justfile") + +logs := self + " docker-logs" + +docker-logs container: + docker logs {{container}} --tail 100 -f + +based-op-node: + {{logs}} based-op-node + +based-op-geth: + {{logs}} based-op-geth + +op-node: + {{logs}} op-node + +op-geth: + {{logs}} op-geth + +batcher: + {{logs}} op-batcher + +proposer: + {{logs}} op-proposer + +## The following are here to provide a consistent interface + +follower-node: + {{parent}} follower-node logs + +gateway: + {{parent}} follower-node logs-gateway + +main-node: + {{parent}} main-node logs + +portal: + {{parent}} portal logs diff --git a/scripts/monitoring.just b/scripts/monitoring.just new file mode 100644 index 000000000..92a466f45 --- /dev/null +++ b/scripts/monitoring.just @@ -0,0 +1,13 @@ + +# Invocation utils +self := "just -f " + justfile() + +compose-file := canonicalize(join(justfile_directory(), "..", "monitoring", "compose.yml")) + +# Start the monitoring service +start: + docker compose -f {{compose-file}} up -d + +# Stop the monitoring service +stop: + docker compose -f {{compose-file}} down diff --git a/scripts/peering.just b/scripts/peering.just new file mode 100644 index 000000000..8ad67e2cc --- /dev/null +++ b/scripts/peering.just @@ -0,0 +1,46 @@ +export LOCAL_DATA := env("LOCAL_DATA", join(invocation_directory(), ".local")) +export DATA_FOLDER := join(LOCAL_DATA, env("PEERING_DATA_FOLDER", ".peering")) + +pyenv := join(DATA_FOLDER, "bin", "activate") + +export OP_GETH_RPC := env("OP_GETH_RPC", "http://localhost:8545") +export BOP_GETH_RPC := env("BOP_GETH_RPC", "http://localhost:8645") + +self := "just -f " + justfile() + +@block-number rpc: + curl -s -X POST -H "Content-Type: application/json" \ + --data '{"jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id": 1}' {{rpc}} + +_check-block-number: + #!/usr/bin/env bash + set -euo pipefail + + bop=$({{self}} block-number $BOP_GETH_RPC | jq -re '.result') + op=$({{self}} block-number $OP_GETH_RPC | jq -re '.result') + + if [ "$bop" == "$op" ]; then + exit 0 + fi + + echo "Based OP Geth and OP Geth not synced!" + exit 1 + +_ensure-pyenv: + #!/usr/bin/env bash + set -euo pipefail + + python3 -m venv {{DATA_FOLDER}} + source {{pyenv}} + + pip install requests + +peering: + #!/usr/bin/env bash + set -euo pipefail + + {{self}} _check-block-number || { + {{self}} _ensure-pyenv + source {{pyenv}} + python3 {{join(justfile_directory(), "..", "peering.py")}} + } diff --git a/scripts/spamoor.just b/scripts/spamoor.just new file mode 100644 index 000000000..a83f9d3ea --- /dev/null +++ b/scripts/spamoor.just @@ -0,0 +1,21 @@ +import 'based/scripts/consts.just' + +tool_image := env("SPAMOOR_TOOL_IMAGE", "ghcr.io/chainbound/spamoor-op-geth") + +port := env("SPAMOOR_DAEMON_UI_PORT", "8075") + +# Start spamooor as a foreground process +start config: + docker run --pull always --network host \ + -v {{canonicalize(config)}}:/etc/spamoor-config.yml \ + {{tool_image}} \ + run /etc/spamoor-config.yml \ + --privkey {{DUMMY_RICH_WALLET_PRIVATE_KEY}} + --rpchost http://localhost:{{BASED_OP_GETH_PORT}} + +# Start spamoor as a daemon with UI on port `SPAMOOR_DAEMON_UI_PORT` (default 8075) +daemon config="../spamoor-daemon": #TODO: where is this folder? + docker run --rm --network host --entrypoint {{config}} \ + {{tool_image}} --privkey {{DUMMY_RICK_WALLET_PRIVATE_KEY}} \ + --rpchost http://localhost:{{BASED_OP_GETH_PORT}} \ + --db /tmp/spamoor.db --port {{port}} diff --git a/scripts/test.just b/scripts/test.just new file mode 100644 index 000000000..d18987dfc --- /dev/null +++ b/scripts/test.just @@ -0,0 +1,110 @@ +import '../based/scripts/consts.just' + +export BOP_EL_PORT := env("BOP_EL_PORT", shell("grep '^OP_GETH_ENGINE_RPC_PORT=' $(" + parent + " follower-node _is-configured)/.env | cut -d'=' -f2")) +export OP_EL_PORT := env("OP_EL_PORT", shell("grep '^OP_GETH_ENGINE_RPC_PORT=' $(" + parent + " main-node _is-configured)/../.env | cut -d'=' -f2")) +export BOP_NODE_PORT := env("BOP_NODE_PORT", shell("grep '^OP_NODE_GOSSIP_PORT=' $(" + parent + " main-node _is-configured)/../.env | cut -d'=' -f2")) + +# Invocation utils +self := "just -f " + justfile() +parent := "just -f " + join(justfile_directory(), "..", "Justfile") + +tx: + cast publish --rpc-url http://0.0.0.0:$BASED_OP_GETH_PORT {{shell(self + " _dummy-tx")}} + +_block-number: + cast block-number --rpc-url http://localhost:$BOP_EL_PORT + +_dummy-tx: + cast mktx --rpc-url http://0.0.0.0:$BASED_OP_GETH_PORT --private-key $DUMMY_RICH_WALLET_PRIVATE_KEY \ + --value 1 0x7DDcC7c49D562997A68C98ae7Bb62eD1E8E4488a + +@_rpc method params="[]": + curl -X POST --url $FOLLOWER_NODE_HOST:BASED_OP_GETH_PORT -H "Content-Type: application/json" \ + --data '{"jsonrpc", "2.0", method: "{{method}}", "params": {{params}}, "id": 1}' + +frag seq: + {{self}} _rpc based_newFrag \ + [{ \ + "signature": "0xa47da12abd5563f45332e637d1de946c3576902a245511d86826743c8af1f1e2093d4f5efd5b9630c0acc5f2bb23f236b4f7bdbe0d21d281b2bd2ff60c6cf1861b", \ + "message": { \ + "blockNumber": {{shell(self + " _block-number")}}, \ + "seq": {{seq}}, \ + "isLast": true, \ + "txs": [\"{{shell(self + " _dummy-tx | xxd -r -p | base64")}}\"], \ + "version": 0, \ + } \ + }] + +seal: + {{self}} _rpc based_sealFrag \ + [{ \ + "signature": "0x4fc733cc2f0b680e15452db40b9453412ccb25507582b192c1ea4fc4deaf709845002ab44af42327ed4b8b12943412810a8d9984ea1609dfc6f77338f8c395b41c", \ + "message": { \ + "totalFrags": 8, \ + "blockNumber": {{shell(self + " _block-number")}}, \ + "gasUsed": 43806, \ + "gasLimit": 60000000, \ + "parentHash": "0x3d0f61f441af7d1640cb15cd7250bae72d8b334e27245ea44b536407892ec57c", \ + "transactionsRoot": "0x783425e75723ac77ea7f0f47fb4a7858f63deceb80137a0e53fa09703f477cc0", \ + "receiptsRoot": "0x6ff8f783179faedd1aef7e55889a1017ec700504ba6bedffd826a28a47b1a5a2", \ + "stateRoot": "0xc6a987cccdd0665f4d38c730dc05fb8b69497d45094b2b3615954686ff765f87", \ + "blockHash": "0xf3b170b6aee95faa665f77ad1ed0efe7bd29553aa2402e35de7ba3ce55d6974f", \ + } \ + }] + + +env: + {{self}} _rpc based_env \ + [{ \ + "signature": "0x4fc733cc2f0b680e15452db40b9453412ccb25507582b192c1ea4fc4deaf709845002ab44af42327ed4b8b12943412810a8d9984ea1609dfc6f77338f8c395b41c", \ + "message": { \ + "totalFrags": 2, \ + "number": {{shell(self + " _block-number")}}, \ + "beneficiary": "0x1234567890123456789012345678901234567890", \ + "timestamp": 2739281173, \ + "gasLimit": 3, \ + "baseFee": 4, \ + "difficulty": "0x5", \ + "prevrandao": "0xe75fae0065403d4091f3d6549c4219db69c96d9de761cfc75fe9792b6166c758", \ + "parentHash": "0xe75fae0065403d4091f3d6549c4219db69c96d9de761cfc75fe9792b6166c758", \ + "parentBeaconRoot": "0xe75fae0065403d4091f3d6549c4219db69c96d9de761cfc75fe9792b6166c758", \ + "extraData": "0x010203", \ + } \ + }] + +based-manifest := canonicalize(join(justfile_directory(), "..", "based", "Cargo.toml")) + +tx-spammer: + PORTAL_PORT="$({{parent}} portal address | cut -d':' -f3)" cargo test --manifest-path {{based-manifest}} --release \ + -- tx_spammer --ignored --nocapture + +gateway-spam port: # TODO: improve docs + #!/usr/bin/env bash + set -euo pipefail + + FOLLOWER_NODE_DATA=$({{parent}} follower-node _is-configured) + GATEWAY_DATA="$FOLLOWER_NODE_DATA"/data/gateway + + cargo run --manifest-path {{based-manifest}} --profile=release --bin bop-gateway --features shmem \ + -- --db.datadir $FOLLOWER_NODE_DATA/data/gateway \ + --rpc.fallback_url http://127.0.0.1:$OP_EL_PORT \ + --chain $FOLLOWER_NODE_DATA/config/genesis.json \ + --rpc.port {{port}} \ + --gossip.root_peer_url http://127.0.0.1:$BOP_NODE_PORT \ + --mock Spammer \ + --sequencer.commit_sealed_frags_to_db + +gateway-bench port: # TODO: improve docs + #!/usr/bin/env bash + set -euo pipefail + + FOLLOWER_NODE_DATA=$({{parent}} follower-node _is-configured) + GATEWAY_DATA="$FOLLOWER_NODE_DATA"/data/gateway + + cargo run --manifest-path {{based-manifest}} --profile=release-with-debug --bin bop-gateway --features shmem \ + -- --db.datadir $FOLLOWER_NODE_DATA/data/gateway \ + --rpc.fallback_url http://127.0.0.1:$OP_EL_PORT \ + --chain $FOLLOWER_NODE_DATA/config/genesis.json \ + --rpc.port {{port}} \ + --gossip.root_peer_url http://127.0.0.1:$BOP_NODE_PORT \ + --mock Benchmark