Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
5b3bfaa
feat(ci): add performance tracking workflow for AvalancheGo benchmarks
Elvis339 Nov 27, 2025
d64e77d
ci: track performance
Elvis339 Nov 27, 2025
a55cb7d
docs
Elvis339 Dec 2, 2025
d82ed73
ci(perf): add benchmark workflow with nix-based just commands
Elvis339 Dec 4, 2025
8c42437
docs
Elvis339 Dec 4, 2025
04d6367
docs
Elvis339 Dec 4, 2025
ac94798
address PR
Elvis339 Dec 8, 2025
126d564
lint: descriptive link text
Elvis339 Dec 8, 2025
a261cdf
docs
Elvis339 Dec 8, 2025
3b95709
ci: update workflow for C-Chain reexecution benchmarks and improve ju…
Elvis339 Dec 30, 2025
e7fec1b
docs
Elvis339 Jan 25, 2026
f56b9ac
debug
Elvis339 Jan 25, 2026
72efe2f
refactor(benchmark): trigger C-Chain benchmarks via Firewood CI workflow
Elvis339 Jan 25, 2026
1406e14
temp
Elvis339 Jan 25, 2026
aa785b1
docs
Elvis339 Jan 25, 2026
119cc33
docs
Elvis339 Jan 25, 2026
344bcf3
ci(gh-pages): temp. add workflow_dispatch to rebuild Pages
Elvis339 Jan 25, 2026
29ee34c
ci(gh-pages): remove temp. set workflow_dispatch
Elvis339 Jan 25, 2026
1d648ea
chore: split PR - extract local tooling to separate PR
Elvis339 Jan 26, 2026
6ee9ccc
ci(track-performance): add scheduled C-Chain reexecution benchmarks
Elvis339 Jan 28, 2026
c5efed7
chore(bench-cchain-reexecution): use AVALANCHEGO_REF instead of hard-…
Elvis339 Jan 28, 2026
5d39f47
chore(bench-cchain-reexecution): clarify help text wording
Elvis339 Jan 28, 2026
9d16496
ci(track-performance): update runner configs and add workflow inputs
Elvis339 Jan 28, 2026
d5b9fca
ci(track-performance): remove JSON configs, simplify benchmark workflow
Elvis339 Jan 28, 2026
0843424
docs
Elvis339 Jan 28, 2026
93e4443
ci(track-performance): limit runners to ephemeral storage to avoid st…
Elvis339 Jan 29, 2026
ebfec78
ci(gh-pages): auto-deploy after benchmark completes
Elvis339 Jan 29, 2026
d028428
ci(track-performance): remove preset runner options
Elvis339 Jan 29, 2026
42a1d9d
ci(gh-pages): repository_dispatch
Elvis339 Jan 29, 2026
07ce5ca
test
Elvis339 Jan 29, 2026
13a196f
revert
Elvis339 Jan 30, 2026
34fe2de
ci(track-performance): set alert-threshold 150%
Elvis339 Jan 30, 2026
408809a
ci(track-performance): schedule weekly run on Saturday 00:00 ET
Elvis339 Jan 30, 2026
1d90538
Merge branch 'main' into es/scheduled-perf-tracking
Elvis339 Jan 30, 2026
22e2be1
ci(github-pages): auto rebuild benchmarks when results are pushed
Elvis339 Jan 30, 2026
54265b9
ci(gh-pages): auto-deploy
Elvis339 Jan 30, 2026
a44160d
ci(gh-pages): auto deploy
Elvis339 Jan 30, 2026
4e887c3
rm temp branch
Elvis339 Jan 30, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions .github/workflows/gh-pages.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,27 @@ on:
branches:
- "main"
- "rkuris/gh-pages"
# Rebuild pages after benchmark workflow completes.
# Currently only scheduled runs (which run on main) trigger this.
# If we later add feature branch benchmarks, remove `branches` filter -
# data separation is handled in track-performance.yml (bench/ vs dev/bench/).
workflow_run:
workflows: ["C-Chain Reexecution Performance Tracking"]
types: [completed]
branches: [main]

env:
CARGO_TERM_COLOR: always

jobs:
build:
# Skip if triggered by failed workflow_run
if: github.event_name != 'workflow_run' || github.event.workflow_run.conclusion == 'success'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
ref: main # Always build docs from main
- uses: dtolnay/rust-toolchain@stable
# caution: this is the same restore as in ci.yaml
- uses: Swatinem/rust-cache@v2
Expand Down
136 changes: 93 additions & 43 deletions .github/workflows/track-performance.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,18 @@
name: C-Chain Reexecution Performance Tracking

on:
schedule:
# Daily at 05:00 UTC (00:00 ET) - 1M blocks: 40M → 41M
- cron: '0 5 * * *'
# Weekly Saturday at 05:00 UTC (00:00 ET) - 10M blocks: 50M → 60M
- cron: '0 5 * * 6'
workflow_dispatch:
inputs:
firewood:
description: 'Firewood commit/branch/tag to test (leave empty to use the commit that triggered the workflow)'
default: ''
libevm:
description: 'libevm commit/branch/tag to test (leave empty to skip)'
description: 'libevm commit/branch/tag to test (leave empty to use AvalancheGo version of libevm)'
default: ''
avalanchego:
description: 'AvalancheGo commit/branch/tag to test against'
Expand All @@ -19,7 +24,7 @@ on:
default: ''
config:
description: 'Config (e.g., firewood, hashdb)'
default: ''
default: 'firewood'
start-block:
default: ''
end-block:
Expand All @@ -33,17 +38,69 @@ on:
runner:
description: 'Runner to use in AvalancheGo'
required: true
type: choice
options:
- avalanche-avalanchego-runner-2ti
- avago-runner-i4i-4xlarge-local-ssd
- avago-runner-m6i-4xlarge-ebs-fast
timeout-minutes:
description: 'Timeout in minutes'
default: ''

jobs:
record-benchmark-to-gh-pages:
configure-benchmark:
runs-on: ubuntu-latest
outputs:
name: ${{ steps.resolve.outputs.name }}
test: ${{ steps.resolve.outputs.test }}
config: ${{ steps.resolve.outputs.config }}
start-block: ${{ steps.resolve.outputs.start-block }}
end-block: ${{ steps.resolve.outputs.end-block }}
block-dir-src: ${{ steps.resolve.outputs.block-dir-src }}
current-state-dir-src: ${{ steps.resolve.outputs.current-state-dir-src }}
runner: ${{ steps.resolve.outputs.runner }}
timeout-minutes: ${{ steps.resolve.outputs.timeout-minutes }}
steps:
- name: Resolve benchmark config
id: resolve
run: |
if [[ "${{ github.event_name }}" == "schedule" ]]; then
case "${{ github.event.schedule }}" in
"0 5 * * *") # Daily at 05:00 UTC: 1M blocks (40M → 41M)
echo "name=daily-40m-41m" >> "$GITHUB_OUTPUT"
echo "config=firewood" >> "$GITHUB_OUTPUT"
echo "start-block=40000001" >> "$GITHUB_OUTPUT"
echo "end-block=41000000" >> "$GITHUB_OUTPUT"
echo "block-dir-src=cchain-mainnet-blocks-40m-50m-ldb" >> "$GITHUB_OUTPUT"
echo "current-state-dir-src=cchain-current-state-firewood-40m" >> "$GITHUB_OUTPUT"
echo "runner=avago-runner-i4i-2xlarge-local-ssd" >> "$GITHUB_OUTPUT"
echo "timeout-minutes=720" >> "$GITHUB_OUTPUT"
;;
"0 5 * * 6") # Weekly Saturday at 05:00 UTC: 10M blocks (50M → 60M)
echo "name=weekly-50m-60m" >> "$GITHUB_OUTPUT"
echo "config=firewood" >> "$GITHUB_OUTPUT"
echo "start-block=50000001" >> "$GITHUB_OUTPUT"
echo "end-block=60000000" >> "$GITHUB_OUTPUT"
echo "block-dir-src=cchain-mainnet-blocks-50m-60m-ldb" >> "$GITHUB_OUTPUT"
echo "current-state-dir-src=cchain-current-state-firewood-50m" >> "$GITHUB_OUTPUT"
echo "runner=avago-runner-i4i-2xlarge-local-ssd" >> "$GITHUB_OUTPUT"
echo "timeout-minutes=2880" >> "$GITHUB_OUTPUT"
;;
*)
echo "::error::Unknown schedule: ${{ github.event.schedule }}"
exit 1
;;
esac
else
# Manual dispatch - pass through inputs directly
echo "name=manual" >> "$GITHUB_OUTPUT"
echo "test=${{ inputs.test }}" >> "$GITHUB_OUTPUT"
echo "config=${{ inputs.config }}" >> "$GITHUB_OUTPUT"
echo "start-block=${{ inputs.start-block }}" >> "$GITHUB_OUTPUT"
echo "end-block=${{ inputs.end-block }}" >> "$GITHUB_OUTPUT"
echo "block-dir-src=${{ inputs.block-dir-src }}" >> "$GITHUB_OUTPUT"
echo "current-state-dir-src=${{ inputs.current-state-dir-src }}" >> "$GITHUB_OUTPUT"
echo "runner=${{ inputs.runner }}" >> "$GITHUB_OUTPUT"
echo "timeout-minutes=${{ inputs.timeout-minutes }}" >> "$GITHUB_OUTPUT"
fi

benchmark:
needs: configure-benchmark
runs-on: ubuntu-latest
permissions:
contents: write # Required for github-action-benchmark to push to gh-pages
Expand All @@ -57,26 +114,26 @@ jobs:

- name: Trigger C-Chain Reexecution Benchmark
run: |
if [[ -n "${{ inputs.test }}" ]]; then
./scripts/bench-cchain-reexecution.sh trigger "${{ inputs.test }}"
if [[ -n "${{ needs.configure-benchmark.outputs.test }}" ]]; then
./scripts/bench-cchain-reexecution.sh trigger "${{ needs.configure-benchmark.outputs.test }}"
else
./scripts/bench-cchain-reexecution.sh trigger
fi
env:
GH_TOKEN: ${{ secrets.FIREWOOD_AVALANCHEGO_GITHUB_TOKEN }}
# Custom mode (ignored when test is specified)
CONFIG: ${{ inputs.config }}
START_BLOCK: ${{ inputs.start-block }}
END_BLOCK: ${{ inputs.end-block }}
BLOCK_DIR_SRC: ${{ inputs.block-dir-src }}
CURRENT_STATE_DIR_SRC: ${{ inputs.current-state-dir-src }}
CONFIG: ${{ needs.configure-benchmark.outputs.config }}
START_BLOCK: ${{ needs.configure-benchmark.outputs.start-block }}
END_BLOCK: ${{ needs.configure-benchmark.outputs.end-block }}
BLOCK_DIR_SRC: ${{ needs.configure-benchmark.outputs.block-dir-src }}
CURRENT_STATE_DIR_SRC: ${{ needs.configure-benchmark.outputs.current-state-dir-src }}
# Refs
FIREWOOD_REF: ${{ inputs.firewood || github.sha }}
AVALANCHEGO_REF: ${{ inputs.avalanchego }}
LIBEVM_REF: ${{ inputs.libevm }}
# Execution
RUNNER: ${{ inputs.runner }}
TIMEOUT_MINUTES: ${{ inputs.timeout-minutes }}
RUNNER: ${{ needs.configure-benchmark.outputs.runner }}
TIMEOUT_MINUTES: ${{ needs.configure-benchmark.outputs.timeout-minutes }}

# github.ref controls where results are stored (not what gets benchmarked):
# - main branch → bench/ (official history)
Expand All @@ -102,42 +159,35 @@ jobs:
summary-always: true
auto-push: true
fail-on-alert: true
alert-threshold: "150%"
comment-on-alert: false
gh-pages-branch: benchmark-data
benchmark-data-dir-path: ${{ steps.location.outputs.data-dir }}

- name: Summary
run: |
if [ "${{ steps.store.outcome }}" == "failure" ]; then
echo "::warning::Benchmark storage failed - results were not saved to GitHub Pages"
fi
[[ "${{ steps.store.outcome }}" == "failure" ]] && echo "::warning::Benchmark storage failed"

{
echo "## Firewood Performance Benchmark Results"
echo "## Benchmark: ${{ needs.configure-benchmark.outputs.name }}"
echo
echo "**Configuration:**"

if [ -n "${{ inputs.test }}" ]; then
echo "- Mode: Predefined test"
echo "- Test: \`${{ inputs.test }}\`"
if [[ -n "${{ needs.configure-benchmark.outputs.test }}" ]]; then
echo "| Parameter | Value |"
echo "|-----------|-------|"
echo "| Test | \`${{ needs.configure-benchmark.outputs.test }}\` |"
else
echo "- Mode: Custom parameters"
echo "- Config: \`${{ inputs.config }}\`"
echo "- Blocks: \`${{ inputs.start-block }}\` → \`${{ inputs.end-block }}\`"
echo "- Block source: \`${{ inputs.block-dir-src }}\`"
echo "- State source: \`${{ inputs.current-state-dir-src }}\`"
fi

echo "- Firewood: \`${{ inputs.firewood || github.sha }}\`"
if [ -n "${{ inputs.libevm }}" ]; then
echo "- libevm: \`${{ inputs.libevm }}\`"
echo "| Parameter | Value |"
echo "|-----------|-------|"
echo "| Config | \`${{ needs.configure-benchmark.outputs.config }}\` |"
echo "| Blocks | \`${{ needs.configure-benchmark.outputs.start-block }}\` → \`${{ needs.configure-benchmark.outputs.end-block }}\` |"
echo "| Block source | \`${{ needs.configure-benchmark.outputs.block-dir-src }}\` |"
echo "| State source | \`${{ needs.configure-benchmark.outputs.current-state-dir-src }}\` |"
fi
echo "- AvalancheGo: \`${{ inputs.avalanchego }}\`"
echo "- Runner: \`${{ inputs.runner }}\`"
echo "- Timeout: \`${{ inputs.timeout-minutes }}\` minutes"
echo "| Firewood | \`${{ inputs.firewood || github.sha }}\` |"
echo "| AvalancheGo | \`${{ inputs.avalanchego || 'master' }}\` |"
echo "| libevm | \`${{ inputs.libevm || '-' }}\` |"
echo "| Runner | \`${{ needs.configure-benchmark.outputs.runner }}\` |"
echo

echo "**Links:**"
echo "- [Performance Trends](https://ava-labs.github.io/firewood/${{ steps.location.outputs.data-dir }}/)"
} >> $GITHUB_STEP_SUMMARY
echo "[View trends](https://ava-labs.github.io/firewood/${{ steps.location.outputs.data-dir }}/)"
} >> "$GITHUB_STEP_SUMMARY"

5 changes: 3 additions & 2 deletions scripts/bench-cchain-reexecution.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ set -euo pipefail
# GH_TOKEN GitHub token for API access (required)
# TEST Predefined test name, alternative to arg (optional)
# FIREWOOD_REF Firewood commit/tag/branch, empty = AvalancheGo's go.mod default (optional)
# AVALANCHEGO_REF AvalancheGo ref to test against (default: master)
# AVALANCHEGO_REF AvalancheGo branch/tag to test against (default: master)
# NOTE: Must be a branch or tag name, not a commit SHA (GitHub API limitation)
# RUNNER GitHub Actions runner label (default: avalanche-avalanchego-runner-2ti)
# LIBEVM_REF libevm ref (optional)
# TIMEOUT_MINUTES Workflow timeout in minutes (optional)
Expand Down Expand Up @@ -330,7 +331,7 @@ COMMANDS
status <run_id> Check run status
list List recent runs
tests Show available tests
help Show this help
help Show this help message

TESTS
EOF
Expand Down
Loading