From 904edeb45da27db6914d885bc7be50d65dcc38fb Mon Sep 17 00:00:00 2001 From: Danyal Prout Date: Fri, 6 Feb 2026 19:08:58 +0800 Subject: [PATCH 1/2] feat(gobrr): load tester for base/mempool --- Cargo.lock | 665 +++++++++++++++++- Cargo.toml | 20 +- bin/basectl/Cargo.toml | 3 + bin/basectl/main.rs | 57 +- crates/basectl/Cargo.toml | 2 + crates/basectl/src/app/core.rs | 14 +- crates/basectl/src/app/mod.rs | 2 +- crates/basectl/src/app/resources.rs | 25 +- crates/basectl/src/app/router.rs | 25 +- crates/basectl/src/app/runner.rs | 243 ++++++- .../basectl/src/app/views/command_center.rs | 8 +- crates/basectl/src/app/views/config.rs | 16 +- crates/basectl/src/app/views/da_monitor.rs | 2 +- crates/basectl/src/app/views/factory.rs | 5 +- crates/basectl/src/app/views/flashblocks.rs | 3 +- crates/basectl/src/app/views/loadtest.rs | 634 +++++++++++++++++ crates/basectl/src/app/views/mod.rs | 2 + crates/basectl/src/commands/common.rs | 28 +- crates/basectl/src/rpc.rs | 25 +- crates/gobrr/Cargo.toml | 33 + crates/gobrr/src/blocks.rs | 92 +++ crates/gobrr/src/client.rs | 52 ++ crates/gobrr/src/config.rs | 166 +++++ crates/gobrr/src/confirmer.rs | 72 ++ crates/gobrr/src/flashblock_watcher.rs | 97 +++ crates/gobrr/src/funder.rs | 281 ++++++++ crates/gobrr/src/handle.rs | 218 ++++++ crates/gobrr/src/lib.rs | 25 + crates/gobrr/src/orchestrator.rs | 225 ++++++ crates/gobrr/src/runner.rs | 400 +++++++++++ crates/gobrr/src/sender.rs | 361 ++++++++++ crates/gobrr/src/signer.rs | 286 ++++++++ crates/gobrr/src/stats.rs | 67 ++ crates/gobrr/src/tracker.rs | 293 ++++++++ crates/gobrr/src/wallet.rs | 46 ++ 35 files changed, 4365 insertions(+), 128 deletions(-) create mode 100644 crates/basectl/src/app/views/loadtest.rs create mode 100644 crates/gobrr/Cargo.toml create mode 100644 crates/gobrr/src/blocks.rs create mode 100644 crates/gobrr/src/client.rs create mode 100644 crates/gobrr/src/config.rs create mode 100644 crates/gobrr/src/confirmer.rs create mode 100644 crates/gobrr/src/flashblock_watcher.rs create mode 100644 crates/gobrr/src/funder.rs create mode 100644 crates/gobrr/src/handle.rs create mode 100644 crates/gobrr/src/lib.rs create mode 100644 crates/gobrr/src/orchestrator.rs create mode 100644 crates/gobrr/src/runner.rs create mode 100644 crates/gobrr/src/sender.rs create mode 100644 crates/gobrr/src/signer.rs create mode 100644 crates/gobrr/src/stats.rs create mode 100644 crates/gobrr/src/tracker.rs create mode 100644 crates/gobrr/src/wallet.rs diff --git a/Cargo.lock b/Cargo.lock index a35c30c..e209592 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -73,7 +73,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -109,7 +109,7 @@ dependencies = [ "futures", "futures-util", "serde_json", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -138,7 +138,7 @@ dependencies = [ "alloy-rlp", "crc", "serde", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -163,7 +163,7 @@ dependencies = [ "alloy-rlp", "borsh", "serde", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -199,7 +199,7 @@ dependencies = [ "serde", "serde_with", "sha2", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -225,7 +225,7 @@ dependencies = [ "http", "serde", "serde_json", - "thiserror", + "thiserror 2.0.18", "tracing", ] @@ -252,7 +252,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -328,7 +328,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror", + "thiserror 2.0.18", "tokio", "tracing", "url", @@ -439,7 +439,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -477,7 +477,26 @@ dependencies = [ "either", "elliptic-curve", "k256", - "thiserror", + "thiserror 2.0.18", +] + +[[package]] +name = "alloy-signer-local" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28bd71507db58477151a6fe6988fa62a4b778df0f166c3e3e1ef11d059fe5fa" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-primitives", + "alloy-signer", + "async-trait", + "coins-bip32", + "coins-bip39", + "k256", + "rand 0.8.5", + "thiserror 2.0.18", + "zeroize", ] [[package]] @@ -558,14 +577,14 @@ checksum = "b321f506bd67a434aae8e8a7dfe5373bf66137c149a5f09c9e7dfb0ca43d7c91" dependencies = [ "alloy-json-rpc", "auto_impl", - "base64", + "base64 0.22.1", "derive_more", "futures", "futures-utils-wasm", "parking_lot", "serde", "serde_json", - "thiserror", + "thiserror 2.0.18", "tokio", "tower", "tracing", @@ -604,7 +623,7 @@ dependencies = [ "nybbles", "serde", "smallvec", - "thiserror", + "thiserror 2.0.18", "tracing", ] @@ -701,7 +720,7 @@ dependencies = [ "objc2-foundation", "parking_lot", "percent-encoding", - "windows-sys 0.60.2", + "windows-sys 0.59.0", "x11rb", ] @@ -962,7 +981,7 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "base-flashtypes" version = "0.0.0" -source = "git+https://github.com/base/base.git#720f6e1a73faabc938402b0cb617d3960db742d4" +source = "git+https://github.com/base/base.git?rev=720f6e1a#720f6e1a73faabc938402b0cb617d3960db742d4" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -972,7 +991,7 @@ dependencies = [ "bytes", "serde", "serde_json", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -981,6 +1000,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + [[package]] name = "base64" version = "0.22.1" @@ -999,8 +1024,11 @@ version = "0.0.0" dependencies = [ "anyhow", "basectl-cli", + "chrono", "clap", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -1019,15 +1047,23 @@ dependencies = [ "crossterm", "dirs", "futures-util", + "gobrr", "ratatui", "serde", "serde_json", "serde_yaml", "tokio", "tokio-tungstenite", + "tracing", "url", ] +[[package]] +name = "bech32" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" + [[package]] name = "bit-set" version = "0.8.0" @@ -1142,6 +1178,16 @@ dependencies = [ "alloc-stdlib", ] +[[package]] +name = "bs58" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" +dependencies = [ + "sha2", + "tinyvec", +] + [[package]] name = "bumpalo" version = "3.19.1" @@ -1305,6 +1351,57 @@ dependencies = [ "error-code", ] +[[package]] +name = "coins-bip32" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2073678591747aed4000dd468b97b14d7007f7936851d3f2f01846899f5ebf08" +dependencies = [ + "bs58", + "coins-core", + "digest 0.10.7", + "hmac", + "k256", + "serde", + "sha2", + "thiserror 1.0.69", +] + +[[package]] +name = "coins-bip39" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74b169b26623ff17e9db37a539fe4f15342080df39f129ef7631df7683d6d9d4" +dependencies = [ + "bitvec", + "coins-bip32", + "hmac", + "once_cell", + "pbkdf2", + "rand 0.8.5", + "sha2", + "thiserror 1.0.69", +] + +[[package]] +name = "coins-core" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b962ad8545e43a28e14e87377812ba9ae748dd4fd963f4c10e9fcc6d13475b" +dependencies = [ + "base64 0.21.7", + "bech32", + "bs58", + "const-hex", + "digest 0.10.7", + "generic-array", + "ripemd", + "serde", + "sha2", + "sha3", + "thiserror 1.0.69", +] + [[package]] name = "colorchoice" version = "1.0.4" @@ -1421,6 +1518,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + [[package]] name = "crossbeam-channel" version = "0.5.15" @@ -1430,6 +1533,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.21" @@ -1672,7 +1784,7 @@ dependencies = [ "libc", "option-ext", "redox_users", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -1770,6 +1882,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "enum-as-inner" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "enum-ordinalize" version = "4.3.2" @@ -1803,7 +1927,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -2091,9 +2215,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", + "js-sys", "libc", "r-efi", "wasip2", + "wasm-bindgen", ] [[package]] @@ -2102,6 +2228,35 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" +[[package]] +name = "gobrr" +version = "0.0.0" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-signer", + "alloy-signer-local", + "alloy-transport-http", + "anyhow", + "base-flashtypes", + "futures-util", + "humantime", + "op-alloy-network", + "rand 0.8.5", + "reqwest", + "serde", + "serde_json", + "serde_yaml", + "tokio", + "tokio-tungstenite", + "tracing", + "url", +] + [[package]] name = "group" version = "0.13.0" @@ -2187,6 +2342,52 @@ dependencies = [ "arrayvec", ] +[[package]] +name = "hickory-proto" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8a6fe56c0038198998a6f217ca4e7ef3a5e51f46163bd6dd60b5c71ca6c6502" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.9.2", + "ring", + "thiserror 2.0.18", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc62a9a99b0bfb44d2ab95a7208ac952d31060efc16241c87eaf36406fecf87a" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "moka", + "once_cell", + "parking_lot", + "rand 0.9.2", + "resolv-conf", + "smallvec", + "thiserror 2.0.18", + "tokio", + "tracing", +] + [[package]] name = "hmac" version = "0.12.1" @@ -2235,6 +2436,12 @@ version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" +[[package]] +name = "humantime" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + [[package]] name = "hyper" version = "1.8.1" @@ -2256,6 +2463,23 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots", +] + [[package]] name = "hyper-tls" version = "0.6.0" @@ -2278,7 +2502,7 @@ version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" dependencies = [ - "base64", + "base64 0.22.1", "bytes", "futures-channel", "futures-util", @@ -2289,7 +2513,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.2", "tokio", "tower-service", "tracing", @@ -2506,6 +2730,18 @@ dependencies = [ "syn 2.0.114", ] +[[package]] +name = "ipconfig" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +dependencies = [ + "socket2 0.5.10", + "widestring", + "windows-sys 0.48.0", + "winreg", +] + [[package]] name = "ipnet" version = "2.11.0" @@ -2577,7 +2813,7 @@ version = "9.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" dependencies = [ - "base64", + "base64 0.22.1", "js-sys", "pem", "ring", @@ -2598,6 +2834,7 @@ dependencies = [ "once_cell", "serdect", "sha2", + "signature", ] [[package]] @@ -2698,6 +2935,12 @@ dependencies = [ "hashbrown 0.16.1", ] +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "macro-string" version = "0.1.4" @@ -2766,6 +3009,23 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "moka" +version = "0.12.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ac832c50ced444ef6be0767a008b02c106a909ba79d1d830501e94b96f6b7e" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "equivalent", + "parking_lot", + "portable-atomic", + "smallvec", + "tagptr", + "uuid", +] + [[package]] name = "moxcms" version = "0.7.11" @@ -2799,7 +3059,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -2960,6 +3220,10 @@ name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +dependencies = [ + "critical-section", + "portable-atomic", +] [[package]] name = "once_cell_polyfill" @@ -2967,6 +3231,59 @@ version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" +[[package]] +name = "op-alloy-consensus" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "726da827358a547be9f1e37c2a756b9e3729cb0350f43408164794b370cad8ae" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-network", + "alloy-primitives", + "alloy-rlp", + "alloy-rpc-types-eth", + "alloy-serde", + "derive_more", + "serde", + "thiserror 2.0.18", +] + +[[package]] +name = "op-alloy-network" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63f27e65be273ec8fcb0b6af0fd850b550979465ab93423705ceb3dfddbd2ab" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-signer", + "op-alloy-consensus", + "op-alloy-rpc-types", +] + +[[package]] +name = "op-alloy-rpc-types" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562dd4462562c41f9fdc4d860858c40e14a25df7f983ae82047f15f08fce4d19" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "derive_more", + "op-alloy-consensus", + "serde", + "serde_json", + "thiserror 2.0.18", +] + [[package]] name = "openssl" version = "0.10.75" @@ -3021,7 +3338,7 @@ dependencies = [ "futures-sink", "js-sys", "pin-project-lite", - "thiserror", + "thiserror 2.0.18", "tracing", ] @@ -3100,13 +3417,23 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "pbkdf2" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" +dependencies = [ + "digest 0.10.7", + "hmac", +] + [[package]] name = "pem" version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" dependencies = [ - "base64", + "base64 0.22.1", "serde_core", ] @@ -3187,6 +3514,12 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "portable-atomic" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" + [[package]] name = "potential_utf" version = "0.1.4" @@ -3302,6 +3635,61 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2 0.6.2", + "thiserror 2.0.18", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.18", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.6.2", + "tracing", + "windows-sys 0.59.0", +] + [[package]] name = "quote" version = "1.0.44" @@ -3441,7 +3829,7 @@ checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" dependencies = [ "getrandom 0.2.17", "libredox", - "thiserror", + "thiserror 2.0.18", ] [[package]] @@ -3487,20 +3875,25 @@ version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ - "base64", + "base64 0.22.1", "bytes", "futures-core", + "hickory-resolver", "http", "http-body", "http-body-util", "hyper", + "hyper-rustls", "hyper-tls", "hyper-util", "js-sys", "log", "native-tls", + "once_cell", "percent-encoding", "pin-project-lite", + "quinn", + "rustls", "rustls-pki-types", "serde", "serde_json", @@ -3508,6 +3901,7 @@ dependencies = [ "sync_wrapper", "tokio", "tokio-native-tls", + "tokio-rustls", "tower", "tower-http", "tower-service", @@ -3515,8 +3909,15 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", ] +[[package]] +name = "resolv-conf" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" + [[package]] name = "rfc6979" version = "0.4.0" @@ -3541,6 +3942,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "rlp" version = "0.5.2" @@ -3638,7 +4048,21 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.2", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls" +version = "0.23.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" +dependencies = [ + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", ] [[package]] @@ -3647,9 +4071,21 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ + "web-time", "zeroize", ] +[[package]] +name = "rustls-webpki" +version = "0.103.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + [[package]] name = "rustversion" version = "1.0.22" @@ -3857,7 +4293,7 @@ version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" dependencies = [ - "base64", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", @@ -4035,7 +4471,7 @@ checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror", + "thiserror 2.0.18", "time", ] @@ -4054,6 +4490,16 @@ dependencies = [ "serde", ] +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "socket2" version = "0.6.2" @@ -4195,6 +4641,12 @@ dependencies = [ "syn 2.0.114", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tap" version = "1.0.1" @@ -4211,7 +4663,16 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.3", - "windows-sys 0.61.2", + "windows-sys 0.59.0", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", ] [[package]] @@ -4220,7 +4681,18 @@ version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl", + "thiserror-impl 2.0.18", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", ] [[package]] @@ -4307,6 +4779,21 @@ dependencies = [ "zerovec", ] +[[package]] +name = "tinyvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" version = "1.49.0" @@ -4319,7 +4806,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.6.2", "tokio-macros", "windows-sys 0.61.2", ] @@ -4345,6 +4832,16 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.18" @@ -4567,7 +5064,7 @@ dependencies = [ "native-tls", "rand 0.9.2", "sha1", - "thiserror", + "thiserror 2.0.18", "utf-8", ] @@ -4685,6 +5182,17 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "uuid" +version = "1.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" +dependencies = [ + "getrandom 0.3.4", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "valuable" version = "0.1.1" @@ -4829,12 +5337,27 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cfaf3c063993ff62e73cb4311efde4db1efb31ab78a3e5c457939ad5cc0bed" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "weezl" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a28ac98ddc8b9274cb41bb4d9d4d5c425b6020c50c46f25559911905610b4a88" +[[package]] +name = "widestring" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72069c3113ab32ab29e5584db3c6ec55d416895e60715417b5b883a357c3e471" + [[package]] name = "winapi" version = "0.3.9" @@ -4916,6 +5439,15 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + [[package]] name = "windows-sys" version = "0.52.0" @@ -4952,6 +5484,21 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + [[package]] name = "windows-targets" version = "0.52.6" @@ -4985,6 +5532,12 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" @@ -4997,6 +5550,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" @@ -5009,6 +5568,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + [[package]] name = "windows_i686_gnu" version = "0.52.6" @@ -5033,6 +5598,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + [[package]] name = "windows_i686_msvc" version = "0.52.6" @@ -5045,6 +5616,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" @@ -5057,6 +5634,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" @@ -5069,6 +5652,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" @@ -5090,6 +5679,16 @@ dependencies = [ "memchr", ] +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + [[package]] name = "wit-bindgen" version = "0.51.0" diff --git a/Cargo.toml b/Cargo.toml index 9209364..589ed76 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,9 @@ license = "MIT" [workspace.lints.rust] missing-debug-implementations = "warn" +rust-2018-idioms = "warn" +elided-lifetimes-in-paths = "allow" +trivial-casts = "warn" unreachable-pub = "warn" unused-must-use = "deny" unnameable-types = "warn" @@ -42,6 +45,12 @@ explicit-iter-loop = "warn" iter-with-drain = "warn" needless-pass-by-ref-mut = "warn" string-lit-as-bytes = "warn" +needless-continue = "warn" +semicolon-if-nothing-returned = "warn" +manual-let-else = "warn" +map-unwrap-or = "warn" +inefficient-to-string = "warn" +checked-conversions = "warn" [workspace.dependencies] clap = { version = "4.0", features = ["derive", "env"] } @@ -83,12 +92,21 @@ alloy-rpc-client = { version = "1.5.2" } alloy-transport-http = { version = "1.5.2" } alloy-sol-types = { version = "1.5.2" } alloy-contract = { version = "1.5.2" } +alloy-signer = { version = "1.5.2" } +alloy-signer-local = { version = "1.5.2", features = ["mnemonic"] } +alloy-network = { version = "1.5.2" } +reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "hickory-dns"] } +rand = "0.8" +humantime = "2.1" # op-alloy op-alloy-rpc-types = { version = "0.22.0", default-features = false } op-alloy-rpc-types-engine = { version = "0.22.0", default-features = false } op-alloy-consensus = { version = "0.22.0", default-features = false } +op-alloy-network = { version = "0.22.0" } + # base -base-flashtypes = { git = "https://github.com/base/base.git" } +base-flashtypes = { git = "https://github.com/base/base.git", rev = "720f6e1a" } basectl-cli = { path = "crates/basectl" } +gobrr = { path = "crates/gobrr" } diff --git a/bin/basectl/Cargo.toml b/bin/basectl/Cargo.toml index 83c73a3..2a5e031 100644 --- a/bin/basectl/Cargo.toml +++ b/bin/basectl/Cargo.toml @@ -13,6 +13,9 @@ path = "main.rs" [dependencies] basectl-cli = { workspace = true } +chrono = { workspace = true } clap = { workspace = true } tokio = { workspace = true } anyhow = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } diff --git a/bin/basectl/main.rs b/bin/basectl/main.rs index c1eb277..406acdf 100644 --- a/bin/basectl/main.rs +++ b/bin/basectl/main.rs @@ -1,8 +1,13 @@ +use std::{fs::File, path::Path}; + +use anyhow::bail; use basectl_cli::{ - app::{ViewId, run_app, run_app_with_view}, + app::{ViewId, run_app_with_view, run_loadtest_logs, run_loadtest_tui}, config::ChainConfig, }; +use chrono::Local; use clap::{Parser, Subcommand}; +use tracing_subscriber::{EnvFilter, fmt::layer, layer::SubscriberExt, util::SubscriberInitExt}; #[derive(Debug, Parser)] #[command(name = "basectl")] @@ -30,6 +35,16 @@ enum Commands { /// Command center (combined view) #[command(visible_alias = "cc")] CommandCenter, + /// Run a load test with real-time TUI dashboard + #[command(visible_alias = "lt")] + Loadtest { + /// Path to gobrr YAML config file + #[arg(long = "file", short = 'f')] + file: String, + /// Output text summary every 2s instead of TUI (for headless/CI) + #[arg(long = "logs")] + logs: bool, + }, } #[tokio::main] @@ -39,12 +54,50 @@ async fn main() -> anyhow::Result<()> { let chain_config = ChainConfig::load(&cli.config).await?; match cli.command { + Some(Commands::Loadtest { file, logs }) => { + let path = Path::new(&file); + if !path.is_file() { + bail!("Load test config not found: {}", path.display()); + } + + // Create log file with timestamp + let timestamp = Local::now().format("%Y%m%d-%H%M%S"); + let log_filename = format!("load-test-{timestamp}.log"); + let log_file = File::create(&log_filename)?; + + let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| { + EnvFilter::new("warn,gobrr=debug,basectl=debug,basectl_cli=debug") + }); + + if logs { + // Headless mode: write to both file and stdout + let file_layer = layer().with_writer(log_file).with_ansi(false); + let stdout_layer = layer().with_writer(std::io::stdout); + + tracing_subscriber::registry() + .with(env_filter) + .with(file_layer) + .with(stdout_layer) + .init(); + + eprintln!("Logging to: {log_filename}"); + run_loadtest_logs(chain_config, file).await + } else { + // TUI mode: write only to file (TUI controls terminal) + let file_layer = layer().with_writer(log_file).with_ansi(false); + + tracing_subscriber::registry().with(env_filter).with(file_layer).init(); + + eprintln!("Logging to: {log_filename}"); + run_loadtest_tui(chain_config, file).await + } + } Some(Commands::Config) => run_app_with_view(chain_config, ViewId::Config).await, Some(Commands::Flashblocks) => run_app_with_view(chain_config, ViewId::Flashblocks).await, Some(Commands::Da) => run_app_with_view(chain_config, ViewId::DaMonitor).await, Some(Commands::CommandCenter) => { run_app_with_view(chain_config, ViewId::CommandCenter).await } - None => run_app(chain_config).await, + None => run_app_with_view(chain_config, ViewId::Home).await, } } diff --git a/crates/basectl/Cargo.toml b/crates/basectl/Cargo.toml index 06b28fc..a4cd460 100644 --- a/crates/basectl/Cargo.toml +++ b/crates/basectl/Cargo.toml @@ -27,3 +27,5 @@ alloy-primitives = { workspace = true } alloy-sol-types = { workspace = true } alloy-contract = { workspace = true } arboard = { workspace = true } +gobrr = { workspace = true } +tracing = { workspace = true } diff --git a/crates/basectl/src/app/core.rs b/crates/basectl/src/app/core.rs index 8337975..c625351 100644 --- a/crates/basectl/src/app/core.rs +++ b/crates/basectl/src/app/core.rs @@ -46,6 +46,9 @@ impl App { self.resources.da.poll(); self.resources.flash.poll(); self.resources.poll_sys_config(); + if let Some(ref mut lt) = self.resources.loadtest { + lt.poll(); + } let action = current_view.tick(&mut self.resources); if self.handle_action(action, &mut current_view, view_factory) { @@ -76,13 +79,10 @@ impl App { Action::None } KeyCode::Char('q') => Action::Quit, - KeyCode::Esc => { - if self.router.current() == ViewId::Home { - Action::Quit - } else { - Action::SwitchView(ViewId::Home) - } - } + KeyCode::Esc => match self.router.current() { + ViewId::Home | ViewId::LoadTest => Action::Quit, + _ => Action::SwitchView(ViewId::Home), + }, _ => current_view.handle_key(key, &mut self.resources), }; diff --git a/crates/basectl/src/app/mod.rs b/crates/basectl/src/app/mod.rs index 0996639..f267f7e 100644 --- a/crates/basectl/src/app/mod.rs +++ b/crates/basectl/src/app/mod.rs @@ -11,5 +11,5 @@ pub use core::App; pub use action::Action; pub use resources::{DaState, FlashState, Resources}; pub use router::{Router, ViewId}; -pub use runner::{run_app, run_app_with_view}; +pub use runner::{run_app_with_view, run_loadtest_logs, run_loadtest_tui}; pub use view::View; diff --git a/crates/basectl/src/app/resources.rs b/crates/basectl/src/app/resources.rs index 4ae3fde..1ce0195 100644 --- a/crates/basectl/src/app/resources.rs +++ b/crates/basectl/src/app/resources.rs @@ -19,6 +19,7 @@ pub struct Resources { pub flash: FlashState, pub system_config: Option, sys_config_rx: Option>, + pub loadtest: Option, } #[derive(Debug)] @@ -55,6 +56,7 @@ impl Resources { flash: FlashState::new(), system_config: None, sys_config_rx: None, + loadtest: None, } } @@ -271,11 +273,30 @@ impl FlashState { let time_diff_ms = self.entries.front().map(|prev| (received_at - prev.timestamp).num_milliseconds()); + let tx_count = fb.diff.transactions.len(); + let gas_used = fb.diff.gas_used; + + // Compute cumulative values by finding previous entry for same block + let (cumulative_tx_count, cumulative_gas_used) = if fb.index == 0 { + // First flash of this block, cumulative = diff + (tx_count, gas_used) + } else { + // Find the most recent entry for the same block to get its cumulative values + self.entries + .iter() + .find(|e| e.block_number == fb.metadata.block_number) + .map_or((tx_count, gas_used), |prev| { + (prev.cumulative_tx_count + tx_count, prev.cumulative_gas_used + gas_used) + }) + }; + let entry = FlashblockEntry { block_number: fb.metadata.block_number, index: fb.index, - tx_count: fb.diff.transactions.len(), - gas_used: fb.diff.gas_used, + tx_count, + gas_used, + cumulative_tx_count, + cumulative_gas_used, gas_limit: self.current_gas_limit, base_fee, prev_base_fee, diff --git a/crates/basectl/src/app/router.rs b/crates/basectl/src/app/router.rs index 71445d4..5e26adc 100644 --- a/crates/basectl/src/app/router.rs +++ b/crates/basectl/src/app/router.rs @@ -5,41 +5,24 @@ pub enum ViewId { DaMonitor, Flashblocks, Config, + LoadTest, } #[derive(Debug)] pub struct Router { current: ViewId, - history: Vec, } impl Router { pub const fn new(initial: ViewId) -> Self { - Self { current: initial, history: Vec::new() } + Self { current: initial } } pub const fn current(&self) -> ViewId { self.current } - pub fn switch_to(&mut self, view: ViewId) { - if view != self.current { - self.history.push(self.current); - self.current = view; - } - } - - pub fn back(&mut self) -> bool { - if let Some(prev) = self.history.pop() { - self.current = prev; - true - } else { - false - } - } - - pub fn go_home(&mut self) { - self.history.clear(); - self.current = ViewId::Home; + pub const fn switch_to(&mut self, view: ViewId) { + self.current = view; } } diff --git a/crates/basectl/src/app/runner.rs b/crates/basectl/src/app/runner.rs index 8a6feb5..c297f17 100644 --- a/crates/basectl/src/app/runner.rs +++ b/crates/basectl/src/app/runner.rs @@ -2,6 +2,8 @@ use std::time::Duration; use anyhow::Result; use base_flashtypes::Flashblock; +use chrono::Local; +use gobrr::LoadTestPhase; use tokio::sync::mpsc; use super::{App, Resources, ViewId, views::create_view}; @@ -15,24 +17,247 @@ use crate::{ }, }; -pub async fn run_app(config: ChainConfig) -> Result<()> { +pub async fn run_app_with_view(config: ChainConfig, initial_view: ViewId) -> Result<()> { let mut resources = Resources::new(config.clone()); start_background_services(&config, &mut resources); - let app = App::new(resources, ViewId::Home); + let app = App::new(resources, initial_view); app.run(create_view).await } -pub async fn run_app_with_view(config: ChainConfig, initial_view: ViewId) -> Result<()> { +/// Run load test with TUI dashboard. +pub async fn run_loadtest_tui(config: ChainConfig, file: String) -> Result<()> { let mut resources = Resources::new(config.clone()); - start_background_services(&config, &mut resources); - let app = App::new(resources, initial_view); + let handle = gobrr::start_load_test(&file).await?; + activate_loadtest(&mut resources, handle, file); + + let app = App::new(resources, ViewId::LoadTest); app.run(create_view).await } +/// Given a successful `LoadTestHandle`, activate the load test via gobrr's +/// orchestrator and store the resulting state. +fn activate_loadtest( + resources: &mut Resources, + handle: gobrr::LoadTestHandle, + config_file: String, +) { + resources.loadtest = Some(gobrr::activate(handle, config_file)); +} + +/// Run load test with text logs output (for headless/CI environments). +pub async fn run_loadtest_logs(config: ChainConfig, file: String) -> Result<()> { + let mut resources = Resources::new(config.clone()); + start_background_services(&config, &mut resources); + + let handle = gobrr::start_load_test(&file).await?; + activate_loadtest(&mut resources, handle, file); + + let mut poll_interval = tokio::time::interval(Duration::from_millis(100)); + let mut print_interval = tokio::time::interval(Duration::from_secs(2)); + let mut last_failed = 0u64; + let mut last_failure_reasons: std::collections::HashMap = + std::collections::HashMap::new(); + + println!("Load test started. Press Ctrl+C to stop.\n"); + + loop { + tokio::select! { + biased; + _ = tokio::signal::ctrl_c() => { + println!("\nReceived Ctrl+C, shutting down..."); + break; + } + _ = poll_interval.tick() => { + // Poll all resources to drain channels + resources.flash.poll(); + resources.da.poll(); + resources.poll_sys_config(); + if let Some(ref mut lt) = resources.loadtest { + lt.poll(); + + // Check for new errors and print immediately + if let Some(ref stats) = lt.stats + && stats.failed > last_failed + { + // Find new failure reasons + for (reason, &count) in &stats.failure_reasons { + let prev_count = + last_failure_reasons.get(reason).copied().unwrap_or(0); + if count > prev_count { + println!("[ERROR] {reason} (total: {count})"); + } + } + last_failed = stats.failed; + last_failure_reasons = stats.failure_reasons.clone(); + } + } + } + _ = print_interval.tick() => { + print_loadtest_summary(&resources); + + // Check if complete + if let Some(ref lt) = resources.loadtest + && lt.phase == LoadTestPhase::Complete + { + println!("\nLoad test complete."); + break; + } + } + } + } + + // Trigger shutdown if not already done + if let Some(ref mut lt) = resources.loadtest + && let Some(tx) = lt.shutdown_tx.take() + { + let _ = tx.send(()); + } + + // Wait a bit for final stats + tokio::time::sleep(Duration::from_millis(500)).await; + if let Some(ref mut lt) = resources.loadtest { + lt.poll(); + } + print_loadtest_summary(&resources); + + Ok(()) +} + +fn print_loadtest_summary(resources: &Resources) { + let Some(lt) = &resources.loadtest else { + println!("No load test active"); + return; + }; + + let now = Local::now(); + let elapsed = lt.stats.as_ref().map_or(0.0, |s| s.elapsed_secs); + let elapsed_str = format_elapsed_secs(elapsed); + let duration_str = + lt.duration.map_or_else(|| "indefinite".to_string(), |d| format!("{}s", d.as_secs())); + + let phase_str = format!("{}", lt.phase); + + println!( + "[{}] LoadTest Status (elapsed: {} / {}) - {}", + now.format("%Y-%m-%d %H:%M:%S"), + elapsed_str, + duration_str, + phase_str + ); + println!("{}", "─".repeat(60)); + + // Block info from flash state (use cumulative values for the block) + if let Some(entry) = resources.flash.entries.front() { + let gas_pct = if entry.gas_limit > 0 { + (entry.cumulative_gas_used as f64 / entry.gas_limit as f64 * 100.0) as u64 + } else { + 0 + }; + println!( + "Block: {} | Gas: {}/{} ({}%) | Txs: {}", + entry.block_number, + format_gas(entry.cumulative_gas_used), + format_gas(entry.gas_limit), + gas_pct, + entry.cumulative_tx_count + ); + } else { + println!("Block: N/A"); + } + + // Mempool / txpool status + if !lt.txpool_status.is_empty() { + println!("\nMempool:"); + for status in <.txpool_status { + let host_display = status + .host + .strip_prefix("http://") + .or_else(|| status.host.strip_prefix("https://")) + .unwrap_or(&status.host); + println!(" {} pending={} queued={}", host_display, status.pending, status.queued); + } + } + + // Stats + if let Some(stats) = <.stats { + println!("\nThroughput:"); + println!(" Send TPS: {:.1} | Confirmed TPS: {:.1}", stats.tps(), stats.confirmed_tps()); + + println!("\nTransactions:"); + println!( + " Sent: {} Confirmed: {} Pending: {} Failed: {} Timed Out: {}", + stats.sent, + stats.confirmed, + stats.pending(), + stats.failed, + stats.timed_out + ); + + if stats.fb_inclusion_count > 0 { + println!("\nFB Inclusion (flashblocks):"); + println!( + " P50: {}ms P95: {}ms P99: {}ms", + stats.fb_percentile(50.0), + stats.fb_percentile(95.0), + stats.fb_percentile(99.0) + ); + } + + if stats.block_inclusion_count > 0 { + println!("\nBlock Inclusion (RPC):"); + println!( + " P50: {}ms P95: {}ms P99: {}ms", + stats.block_percentile(50.0), + stats.block_percentile(95.0), + stats.block_percentile(99.0) + ); + } + + if !stats.failure_reasons.is_empty() { + let total_errors: u64 = stats.failure_reasons.values().sum(); + println!("\nErrors: {total_errors}"); + for (reason, count) in &stats.failure_reasons { + let display_reason = if reason.len() > 30 { &reason[..30] } else { reason }; + println!(" {display_reason}: {count}"); + } + } + } else { + println!("\nWaiting for stats..."); + } + + println!(); +} + +fn format_elapsed_secs(secs: f64) -> String { + let total = secs as u64; + let hours = total / 3600; + let minutes = (total % 3600) / 60; + let seconds = total % 60; + if hours > 0 { + format!("{hours}h{minutes:02}m{seconds:02}s") + } else if minutes > 0 { + format!("{minutes}m{seconds:02}s") + } else { + format!("{seconds}s") + } +} + +fn format_gas(gas: u64) -> String { + if gas >= 1_000_000_000 { + format!("{:.1}B", gas as f64 / 1_000_000_000.0) + } else if gas >= 1_000_000 { + format!("{:.1}M", gas as f64 / 1_000_000.0) + } else if gas >= 1_000 { + format!("{:.0}K", gas as f64 / 1_000.0) + } else { + gas.to_string() + } +} + fn start_background_services(config: &ChainConfig, resources: &mut Resources) { let (fb_tx, fb_rx) = mpsc::channel::(100); let (da_fb_tx, da_fb_rx) = mpsc::channel::(100); @@ -49,11 +274,15 @@ fn start_background_services(config: &ChainConfig, resources: &mut Resources) { let ws_url2 = config.flashblocks_ws.to_string(); tokio::spawn(async move { - let _ = run_flashblock_ws_timestamped(ws_url, fb_tx).await; + if let Err(e) = run_flashblock_ws_timestamped(ws_url.clone(), fb_tx).await { + tracing::warn!(url = %ws_url, error = %e, "Flashblocks timestamped websocket disconnected"); + } }); tokio::spawn(async move { - let _ = run_flashblock_ws(ws_url2, da_fb_tx).await; + if let Err(e) = run_flashblock_ws(ws_url2.clone(), da_fb_tx).await { + tracing::warn!(url = %ws_url2, error = %e, "Flashblocks websocket disconnected"); + } }); let rpc_url = config.rpc.to_string(); diff --git a/crates/basectl/src/app/views/command_center.rs b/crates/basectl/src/app/views/command_center.rs index 6d8fbfa..77994e8 100644 --- a/crates/basectl/src/app/views/command_center.rs +++ b/crates/basectl/src/app/views/command_center.rs @@ -296,9 +296,9 @@ fn render_config_panel(f: &mut Frame, area: Rect, resources: &Resources) { let denominator = sys.eip1559_denominator.unwrap_or(0); let basefee_scalar = - sys.basefee_scalar.map(|s| s.to_string()).unwrap_or_else(|| "-".to_string()); + sys.basefee_scalar.map_or_else(|| "-".to_string(), |s| s.to_string()); let blobbasefee_scalar = - sys.blobbasefee_scalar.map(|s| s.to_string()).unwrap_or_else(|| "-".to_string()); + sys.blobbasefee_scalar.map_or_else(|| "-".to_string(), |s| s.to_string()); vec![ Line::from(vec![ @@ -370,7 +370,7 @@ fn render_stats_panel(f: &mut Frame, area: Rect, resources: &Resources) { Line::from(vec![ Span::styled("Last batch: ", Style::default().fg(Color::DarkGray)), Span::styled( - time_since.map(format_duration).unwrap_or_else(|| "-".to_string()), + time_since.map_or_else(|| "-".to_string(), format_duration), Style::default().fg(Color::White), ), Span::raw(" "), @@ -519,7 +519,7 @@ fn render_flash_panel( }; let (base_fee_str, base_fee_style) = if entry.index == 0 { - let fee_str = entry.base_fee.map(format_gwei).unwrap_or_else(|| "-".to_string()); + let fee_str = entry.base_fee.map_or_else(|| "-".to_string(), format_gwei); let style = match (entry.base_fee, entry.prev_base_fee) { (Some(curr), Some(prev)) if curr > prev => Style::default().fg(Color::Red), (Some(curr), Some(prev)) if curr < prev => Style::default().fg(Color::Green), diff --git a/crates/basectl/src/app/views/config.rs b/crates/basectl/src/app/views/config.rs index 3dc9c2a..0a22816 100644 --- a/crates/basectl/src/app/views/config.rs +++ b/crates/basectl/src/app/views/config.rs @@ -63,11 +63,8 @@ impl View for ConfigView { fn render_chain_config(f: &mut Frame, area: Rect, resources: &Resources) { let config = &resources.config; - let batcher_str = config - .batcher_address - .as_ref() - .map(|a| format!("{a:#x}")) - .unwrap_or_else(|| "-".to_string()); + let batcher_str = + config.batcher_address.as_ref().map_or_else(|| "-".to_string(), |a| format!("{a:#x}")); let mut lines = vec![ Line::from(vec![ @@ -84,7 +81,7 @@ fn render_chain_config(f: &mut Frame, area: Rect, resources: &Resources) { ("RPC", config.rpc.as_str()), ("Flashblocks WS", config.flashblocks_ws.as_str()), ("L1 RPC", config.l1_rpc.as_str()), - ("Op-Node RPC", config.op_node_rpc.as_ref().map(|u| u.as_str()).unwrap_or("-")), + ("Op-Node RPC", config.op_node_rpc.as_ref().map_or("-", |u| u.as_str())), ("Batcher Address", &batcher_str), ]; @@ -113,12 +110,11 @@ fn render_system_config(f: &mut Frame, area: Rect, resources: &Resources) { let content = match &resources.system_config { Some(sys) => { - let gas_limit_str = - sys.gas_limit.map(|g| g.to_string()).unwrap_or_else(|| "-".to_string()); + let gas_limit_str = sys.gas_limit.map_or_else(|| "-".to_string(), |g| g.to_string()); let elasticity_str = - sys.eip1559_elasticity.map(|e| e.to_string()).unwrap_or_else(|| "-".to_string()); + sys.eip1559_elasticity.map_or_else(|| "-".to_string(), |e| e.to_string()); let denominator_str = - sys.eip1559_denominator.map(|d| d.to_string()).unwrap_or_else(|| "-".to_string()); + sys.eip1559_denominator.map_or_else(|| "-".to_string(), |d| d.to_string()); let lines = vec![ Line::from(vec![ diff --git a/crates/basectl/src/app/views/da_monitor.rs b/crates/basectl/src/app/views/da_monitor.rs index 8d75cfb..79972f5 100644 --- a/crates/basectl/src/app/views/da_monitor.rs +++ b/crates/basectl/src/app/views/da_monitor.rs @@ -182,7 +182,7 @@ fn render_stats_panel(f: &mut Frame, area: Rect, resources: &Resources) { Line::from(vec![ Span::styled("Last batch: ", Style::default().fg(Color::DarkGray)), Span::styled( - time_since.map(format_duration).unwrap_or_else(|| "-".to_string()), + time_since.map_or_else(|| "-".to_string(), format_duration), Style::default().fg(Color::White), ), ]), diff --git a/crates/basectl/src/app/views/factory.rs b/crates/basectl/src/app/views/factory.rs index 1f270b7..e0fecba 100644 --- a/crates/basectl/src/app/views/factory.rs +++ b/crates/basectl/src/app/views/factory.rs @@ -1,4 +1,6 @@ -use super::{CommandCenterView, ConfigView, DaMonitorView, FlashblocksView, HomeView}; +use super::{ + CommandCenterView, ConfigView, DaMonitorView, FlashblocksView, HomeView, LoadTestView, +}; use crate::app::{View, ViewId}; pub fn create_view(view_id: ViewId) -> Box { @@ -8,5 +10,6 @@ pub fn create_view(view_id: ViewId) -> Box { ViewId::DaMonitor => Box::new(DaMonitorView::new()), ViewId::Flashblocks => Box::new(FlashblocksView::new()), ViewId::Config => Box::new(ConfigView::new()), + ViewId::LoadTest => Box::new(LoadTestView::new()), } } diff --git a/crates/basectl/src/app/views/flashblocks.rs b/crates/basectl/src/app/views/flashblocks.rs index 78f9193..c2db3bf 100644 --- a/crates/basectl/src/app/views/flashblocks.rs +++ b/crates/basectl/src/app/views/flashblocks.rs @@ -185,8 +185,7 @@ impl View for FlashblocksView { }; let (base_fee_str, base_fee_style) = if entry.index == 0 { - let fee_str = - entry.base_fee.map(format_gwei).unwrap_or_else(|| "-".to_string()); + let fee_str = entry.base_fee.map_or_else(|| "-".to_string(), format_gwei); let style = match (entry.base_fee, entry.prev_base_fee) { (Some(curr), Some(prev)) if curr > prev => Style::default().fg(Color::Red), (Some(curr), Some(prev)) if curr < prev => { diff --git a/crates/basectl/src/app/views/loadtest.rs b/crates/basectl/src/app/views/loadtest.rs new file mode 100644 index 0000000..eda8a0a --- /dev/null +++ b/crates/basectl/src/app/views/loadtest.rs @@ -0,0 +1,634 @@ +use arboard::Clipboard; +use crossterm::event::{KeyCode, KeyEvent}; +use gobrr::LoadTestPhase; +use ratatui::{ + layout::{Constraint, Direction, Layout, Rect}, + prelude::*, + widgets::{Block, Borders, Cell, Paragraph, Row, Table, TableState}, +}; + +use crate::{ + app::{Action, Resources, View}, + commands::common::{ + COLOR_ACTIVE_BORDER, COLOR_BASE_BLUE, COLOR_ROW_HIGHLIGHTED, COLOR_ROW_SELECTED, + build_gas_bar, format_gas, format_gwei, time_diff_color, + }, + tui::Keybinding, +}; + +const GAS_BAR_CHARS: usize = 40; +const DEFAULT_ELASTICITY: u64 = 6; + +const DASHBOARD_KEYBINDINGS: &[Keybinding] = &[ + Keybinding { key: "Esc/q", description: "Quit" }, + Keybinding { key: "?", description: "Toggle help" }, + Keybinding { key: "Space", description: "Pause/Resume flash" }, + Keybinding { key: "Up/k", description: "Scroll up" }, + Keybinding { key: "Down/j", description: "Scroll down" }, + Keybinding { key: "PgUp", description: "Page up" }, + Keybinding { key: "PgDn", description: "Page down" }, + Keybinding { key: "y", description: "Copy block number" }, +]; + +#[derive(Debug)] +struct DashboardState { + table_state: TableState, + auto_scroll: bool, +} + +impl DashboardState { + fn new() -> Self { + let mut table_state = TableState::default(); + table_state.select(Some(0)); + Self { table_state, auto_scroll: true } + } +} + +#[derive(Debug, Default)] +pub struct LoadTestView { + dashboard: Option, +} + +impl LoadTestView { + pub const fn new() -> Self { + Self { dashboard: None } + } +} + +impl View for LoadTestView { + fn keybindings(&self) -> &'static [Keybinding] { + DASHBOARD_KEYBINDINGS + } + + fn tick(&mut self, resources: &mut Resources) -> Action { + if resources.loadtest.is_some() { + if self.dashboard.is_none() { + self.dashboard = Some(DashboardState::new()); + } + if let Some(state) = self.dashboard.as_mut() + && state.auto_scroll + && !resources.flash.entries.is_empty() + { + state.table_state.select(Some(0)); + } + } else { + self.dashboard = None; + } + Action::None + } + + fn handle_key(&mut self, key: KeyEvent, resources: &mut Resources) -> Action { + if let Some(state) = self.dashboard.as_mut() { + handle_dashboard_key(key, &mut state.table_state, &mut state.auto_scroll, resources) + } else { + Action::None + } + } + + fn render(&mut self, frame: &mut Frame, area: Rect, resources: &Resources) { + if let Some(state) = self.dashboard.as_mut() { + render_dashboard(frame, area, resources, &state.table_state); + } else { + render_idle(frame, area); + } + } +} + +fn render_idle(frame: &mut Frame, area: Rect) { + let block = Block::default() + .title(" Load Test ") + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::DarkGray)); + + let inner = block.inner(area); + frame.render_widget(block, area); + + let lines = vec![ + Line::from(""), + Line::from(Span::styled( + "No load test active", + Style::default().fg(Color::DarkGray).add_modifier(Modifier::BOLD), + )), + Line::from(""), + Line::from(Span::styled( + "Run: basectl loadtest --file ", + Style::default().fg(Color::DarkGray), + )), + ]; + + let para = Paragraph::new(lines).alignment(Alignment::Center); + frame.render_widget(para, inner); +} + +fn handle_dashboard_key( + key: KeyEvent, + table_state: &mut TableState, + auto_scroll: &mut bool, + resources: &mut Resources, +) -> Action { + match key.code { + KeyCode::Char(' ') => { + resources.flash.paused = !resources.flash.paused; + Action::None + } + KeyCode::Up | KeyCode::Char('k') => { + if let Some(selected) = table_state.selected() { + if selected > 0 { + table_state.select(Some(selected - 1)); + *auto_scroll = false; + } else { + *auto_scroll = true; + } + } + Action::None + } + KeyCode::Down | KeyCode::Char('j') => { + if let Some(selected) = table_state.selected() { + let max = resources.flash.entries.len().saturating_sub(1); + if selected < max { + table_state.select(Some(selected + 1)); + *auto_scroll = false; + } + } + Action::None + } + KeyCode::PageUp => { + if let Some(selected) = table_state.selected() { + let new_pos = selected.saturating_sub(10); + table_state.select(Some(new_pos)); + *auto_scroll = new_pos == 0; + } + Action::None + } + KeyCode::PageDown => { + if let Some(selected) = table_state.selected() { + let max = resources.flash.entries.len().saturating_sub(1); + let new_pos = (selected + 10).min(max); + table_state.select(Some(new_pos)); + *auto_scroll = false; + } + Action::None + } + KeyCode::Home | KeyCode::Char('g') => { + table_state.select(Some(0)); + *auto_scroll = true; + Action::None + } + KeyCode::Char('y') => { + if let Some(idx) = table_state.selected() + && let Some(entry) = resources.flash.entries.get(idx) + && let Ok(mut clipboard) = Clipboard::new() + { + let _ = clipboard.set_text(entry.block_number.to_string()); + } + Action::None + } + _ => Action::None, + } +} + +fn render_dashboard( + frame: &mut Frame, + area: Rect, + resources: &Resources, + table_state: &TableState, +) { + let is_complete = + resources.loadtest.as_ref().is_some_and(|lt| lt.phase == LoadTestPhase::Complete); + + // Main layout: header + optional banner + body + let main_chunks = if is_complete { + Layout::default() + .direction(Direction::Vertical) + .constraints([ + Constraint::Length(1), // header bar + Constraint::Length(1), // completion banner + Constraint::Min(0), // body + ]) + .split(area) + } else { + Layout::default() + .direction(Direction::Vertical) + .constraints([ + Constraint::Length(1), // header bar + Constraint::Min(0), // body + ]) + .split(area) + }; + + render_header(frame, main_chunks[0], resources); + + let body_area = if is_complete { + // Render completion banner + render_completion_banner(frame, main_chunks[1], resources); + main_chunks[2] + } else { + main_chunks[1] + }; + + // Body: top (55%) + bottom (45%) + let body_chunks = Layout::default() + .direction(Direction::Vertical) + .constraints([ + Constraint::Percentage(55), // metrics + txpool + Constraint::Percentage(45), // flashblocks + ]) + .split(body_area); + + // Top: metrics (50%) + txpool (50%) + let top_chunks = Layout::default() + .direction(Direction::Horizontal) + .constraints([ + Constraint::Percentage(50), // metrics + Constraint::Percentage(50), // txpool + ]) + .split(body_chunks[0]); + + render_metrics(frame, top_chunks[0], resources); + render_txpool(frame, top_chunks[1], resources); + render_flashblocks(frame, body_chunks[1], resources, table_state); +} + +fn render_header(f: &mut Frame, area: Rect, resources: &Resources) { + let Some(lt) = &resources.loadtest else { + let para = + Paragraph::new("No load test active").style(Style::default().fg(Color::DarkGray)); + f.render_widget(para, area); + return; + }; + + let elapsed = lt.stats.as_ref().map_or(0.0, |s| s.elapsed_secs); + let elapsed_str = format_elapsed(elapsed); + + let duration_str = + lt.duration.map_or_else(|| "indefinite".to_string(), |d| format!("{}s", d.as_secs())); + + let tps_str = lt.target_tps.map_or_else(|| "unlimited".to_string(), |tps| format!("{tps}")); + + let phase_color = match lt.phase { + LoadTestPhase::Running => Color::Green, + LoadTestPhase::Complete => Color::Cyan, + LoadTestPhase::Starting | LoadTestPhase::Draining => Color::Yellow, + }; + + let spans = vec![ + Span::styled( + " LOAD TEST ", + Style::default().fg(Color::Black).bg(COLOR_BASE_BLUE).add_modifier(Modifier::BOLD), + ), + Span::raw(" "), + Span::styled(<.config_file, Style::default().fg(Color::White)), + Span::raw(" "), + Span::styled(format!("{elapsed_str}/{duration_str}"), Style::default().fg(Color::Cyan)), + Span::raw(" "), + Span::styled(format!("TPS: {tps_str}"), Style::default().fg(Color::White)), + Span::raw(" "), + Span::styled( + format!("[{}]", lt.phase), + Style::default().fg(phase_color).add_modifier(Modifier::BOLD), + ), + ]; + + let para = Paragraph::new(Line::from(spans)); + f.render_widget(para, area); +} + +fn render_completion_banner(f: &mut Frame, area: Rect, resources: &Resources) { + let (sent, confirmed, failed) = resources + .loadtest + .as_ref() + .and_then(|lt| lt.stats.as_ref()) + .map_or((0, 0, 0), |s| (s.sent, s.confirmed, s.failed)); + + let text = + format!("✓ LOAD TEST COMPLETE - {sent} sent, {confirmed} confirmed, {failed} failed"); + + let para = Paragraph::new(text) + .style(Style::default().fg(Color::White).bg(Color::Green).add_modifier(Modifier::BOLD)) + .alignment(Alignment::Center); + + f.render_widget(para, area); +} + +fn render_metrics(f: &mut Frame, area: Rect, resources: &Resources) { + let block = Block::default() + .title(" Metrics ") + .borders(Borders::ALL) + .border_style(Style::default().fg(COLOR_ACTIVE_BORDER)); + + let inner = block.inner(area); + f.render_widget(block, area); + + let Some(stats) = resources.loadtest.as_ref().and_then(|lt| lt.stats.as_ref()) else { + let para = + Paragraph::new("Waiting for stats...").style(Style::default().fg(Color::DarkGray)); + f.render_widget(para, inner); + return; + }; + + let label_style = Style::default().fg(Color::DarkGray); + let value_style = Style::default().fg(Color::White).add_modifier(Modifier::BOLD); + let section_style = Style::default().fg(COLOR_BASE_BLUE).add_modifier(Modifier::BOLD); + let error_style = Style::default().fg(Color::Red).add_modifier(Modifier::BOLD); + + let mut lines = Vec::new(); + + // Throughput + lines.push(Line::from(Span::styled("THROUGHPUT", section_style))); + lines.push(Line::from(vec![ + Span::styled(" Send TPS ", label_style), + Span::styled(format!("{:.0} tx/s", stats.tps()), value_style), + ])); + lines.push(Line::from(vec![ + Span::styled(" Confirmed TPS ", label_style), + Span::styled(format!("{:.0} tx/s", stats.confirmed_tps()), value_style), + ])); + lines.push(Line::from("")); + + // Transactions + lines.push(Line::from(Span::styled("TRANSACTIONS", section_style))); + lines.push(Line::from(vec![ + Span::styled(" Sent ", label_style), + Span::styled(format_number(stats.sent), value_style), + ])); + lines.push(Line::from(vec![ + Span::styled(" Confirmed ", label_style), + Span::styled(format_number(stats.confirmed), value_style), + ])); + lines.push(Line::from(vec![ + Span::styled(" Pending ", label_style), + Span::styled( + format_number(stats.pending()), + Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD), + ), + ])); + lines.push(Line::from(vec![ + Span::styled(" Failed ", label_style), + Span::styled( + format_number(stats.failed), + if stats.failed > 0 { error_style } else { value_style }, + ), + ])); + lines.push(Line::from(vec![ + Span::styled(" Timed Out ", label_style), + Span::styled( + format_number(stats.timed_out), + if stats.timed_out > 0 { error_style } else { value_style }, + ), + ])); + lines.push(Line::from("")); + + // Flashblock Inclusion Times + lines.push(Line::from(Span::styled("FB INCLUSION", section_style))); + if stats.fb_inclusion_count > 0 { + lines.push(Line::from(vec![ + Span::styled(" P50 ", label_style), + Span::styled(format_ms(stats.fb_percentile(50.0)), value_style), + Span::styled(" P95 ", label_style), + Span::styled(format_ms(stats.fb_percentile(95.0)), value_style), + Span::styled(" P99 ", label_style), + Span::styled(format_ms(stats.fb_percentile(99.0)), value_style), + ])); + } else { + lines.push(Line::from(Span::styled(" N/A", Style::default().fg(Color::DarkGray)))); + } + lines.push(Line::from("")); + + // Block Inclusion Times + lines.push(Line::from(Span::styled("BLOCK INCLUSION", section_style))); + if stats.block_inclusion_count > 0 { + lines.push(Line::from(vec![ + Span::styled(" P50 ", label_style), + Span::styled(format_ms(stats.block_percentile(50.0)), value_style), + Span::styled(" P95 ", label_style), + Span::styled(format_ms(stats.block_percentile(95.0)), value_style), + Span::styled(" P99 ", label_style), + Span::styled(format_ms(stats.block_percentile(99.0)), value_style), + ])); + } else { + lines.push(Line::from(Span::styled(" N/A", Style::default().fg(Color::DarkGray)))); + } + + // Errors + if !stats.failure_reasons.is_empty() { + lines.push(Line::from("")); + let total_errors: u64 = stats.failure_reasons.values().sum(); + lines.push(Line::from(vec![ + Span::styled("ERRORS", section_style), + Span::raw(" "), + Span::styled(format!("{total_errors} total"), error_style), + ])); + for (reason, count) in &stats.failure_reasons { + let display_reason = if reason.len() > 20 { &reason[..20] } else { reason }; + lines.push(Line::from(vec![ + Span::styled(format!(" {display_reason:<20} "), label_style), + Span::styled(count.to_string(), Style::default().fg(Color::Red)), + ])); + } + } + + let para = Paragraph::new(lines); + f.render_widget(para, inner); +} + +fn render_txpool(f: &mut Frame, area: Rect, resources: &Resources) { + let block = Block::default() + .title(" Txpool ") + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::DarkGray)); + + let inner = block.inner(area); + f.render_widget(block, area); + + let Some(lt) = &resources.loadtest else { + return; + }; + + if lt.txpool_hosts.is_empty() { + let para = Paragraph::new("No management hosts configured") + .style(Style::default().fg(Color::DarkGray)); + f.render_widget(para, inner); + return; + } + + if lt.txpool_status.is_empty() { + let para = Paragraph::new("Waiting for txpool data...") + .style(Style::default().fg(Color::DarkGray)); + f.render_widget(para, inner); + return; + } + + let header_style = Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD); + let header = Row::new(vec![ + Cell::from("Host").style(header_style), + Cell::from("Pending").style(header_style), + Cell::from("Queued").style(header_style), + ]); + + let rows: Vec = lt + .txpool_status + .iter() + .map(|status| { + let host_display = status + .host + .strip_prefix("http://") + .or_else(|| status.host.strip_prefix("https://")) + .unwrap_or(&status.host); + + let pending_style = if status.pending > 1000 { + Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD) + } else { + Style::default().fg(Color::White) + }; + + Row::new(vec![ + Cell::from(host_display.to_string()).style(Style::default().fg(Color::Cyan)), + Cell::from(format_number(status.pending)).style(pending_style), + Cell::from(format_number(status.queued)).style(Style::default().fg(Color::White)), + ]) + }) + .collect(); + + let widths = [Constraint::Min(20), Constraint::Length(10), Constraint::Length(10)]; + + let table = Table::new(rows, widths).header(header); + f.render_widget(table, inner); +} + +fn render_flashblocks(f: &mut Frame, area: Rect, resources: &Resources, table_state: &TableState) { + let flash = &resources.flash; + + let title = if flash.paused { + format!(" Flashblocks [PAUSED] - {} msgs ", flash.message_count) + } else { + format!(" Flashblocks - {} msgs ", flash.message_count) + }; + + let border_color = if flash.paused { Color::Yellow } else { COLOR_ACTIVE_BORDER }; + + let block = Block::default() + .title(title) + .borders(Borders::ALL) + .border_style(Style::default().fg(border_color)); + + let inner = block.inner(area); + f.render_widget(block, area); + + let highlighted_block = + table_state.selected().and_then(|idx| flash.entries.get(idx)).map(|e| e.block_number); + + let header_style = Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD); + let header = Row::new(vec![ + Cell::from("Block").style(header_style), + Cell::from("Idx").style(header_style), + Cell::from("Txs").style(header_style), + Cell::from("Gas").style(header_style), + Cell::from("Base Fee").style(header_style), + Cell::from("\u{0394}t").style(header_style), + Cell::from("Fill").style(header_style), + ]); + + let rows: Vec = flash + .entries + .iter() + .enumerate() + .map(|(idx, entry)| { + let is_selected = table_state.selected() == Some(idx); + let is_highlighted = highlighted_block == Some(entry.block_number); + + let row_style = if is_selected { + Style::default().bg(COLOR_ROW_SELECTED) + } else if is_highlighted { + Style::default().bg(COLOR_ROW_HIGHLIGHTED) + } else { + Style::default() + }; + + let (base_fee_str, base_fee_style) = if entry.index == 0 { + let fee_str = entry.base_fee.map_or_else(|| "-".to_string(), format_gwei); + let style = match (entry.base_fee, entry.prev_base_fee) { + (Some(curr), Some(prev)) if curr > prev => Style::default().fg(Color::Red), + (Some(curr), Some(prev)) if curr < prev => Style::default().fg(Color::Green), + _ => Style::default().fg(Color::White), + }; + (fee_str, style) + } else { + (String::new(), Style::default()) + }; + + let gas_bar = + build_gas_bar(entry.gas_used, entry.gas_limit, DEFAULT_ELASTICITY, GAS_BAR_CHARS); + + let (time_diff_str, time_style) = entry.time_diff_ms.map_or_else( + || ("-".to_string(), Style::default().fg(Color::DarkGray)), + |ms| (format!("+{ms}ms"), Style::default().fg(time_diff_color(ms))), + ); + + let first_fb_style = if entry.index == 0 { + Style::default().fg(Color::Green) + } else { + Style::default().fg(Color::White) + }; + + Row::new(vec![ + Cell::from(entry.block_number.to_string()).style(first_fb_style), + Cell::from(entry.index.to_string()).style(first_fb_style), + Cell::from(entry.tx_count.to_string()).style(first_fb_style), + Cell::from(format_gas(entry.gas_used)), + Cell::from(base_fee_str).style(base_fee_style), + Cell::from(time_diff_str).style(time_style), + Cell::from(gas_bar), + ]) + .style(row_style) + }) + .collect(); + + let widths = [ + Constraint::Length(10), + Constraint::Length(4), + Constraint::Length(4), + Constraint::Length(7), + Constraint::Length(12), + Constraint::Length(8), + Constraint::Min(GAS_BAR_CHARS as u16 + 2), + ]; + + let table = Table::new(rows, widths).header(header); + f.render_stateful_widget(table, inner, &mut table_state.clone()); +} + +fn format_number(n: u64) -> String { + if n >= 1_000_000 { + format!("{:.1}M", n as f64 / 1_000_000.0) + } else if n >= 10_000 { + format!("{:.1}K", n as f64 / 1_000.0) + } else { + let s = n.to_string(); + let mut result = String::new(); + for (i, c) in s.chars().rev().enumerate() { + if i > 0 && i % 3 == 0 { + result.push(','); + } + result.push(c); + } + result.chars().rev().collect() + } +} + +fn format_ms(ms: u64) -> String { + if ms >= 1000 { format!("{:.1}s", ms as f64 / 1000.0) } else { format!("{ms}ms") } +} + +fn format_elapsed(secs: f64) -> String { + let total = secs as u64; + let hours = total / 3600; + let minutes = (total % 3600) / 60; + let seconds = total % 60; + if hours > 0 { + format!("{hours}h{minutes:02}m{seconds:02}s") + } else if minutes > 0 { + format!("{minutes}m{seconds:02}s") + } else { + format!("{seconds}s") + } +} diff --git a/crates/basectl/src/app/views/mod.rs b/crates/basectl/src/app/views/mod.rs index e892b94..f42446a 100644 --- a/crates/basectl/src/app/views/mod.rs +++ b/crates/basectl/src/app/views/mod.rs @@ -4,6 +4,7 @@ mod da_monitor; mod factory; mod flashblocks; mod home; +mod loadtest; pub use command_center::CommandCenterView; pub use config::ConfigView; @@ -11,3 +12,4 @@ pub use da_monitor::DaMonitorView; pub use factory::create_view; pub use flashblocks::FlashblocksView; pub use home::HomeView; +pub use loadtest::LoadTestView; diff --git a/crates/basectl/src/commands/common.rs b/crates/basectl/src/commands/common.rs index 187beee..e9533d3 100644 --- a/crates/basectl/src/commands/common.rs +++ b/crates/basectl/src/commands/common.rs @@ -44,10 +44,6 @@ const BLOCK_COLORS: [Color; 24] = [ const EIGHTH_BLOCKS: [char; 8] = ['▏', '▎', '▍', '▌', '▋', '▊', '▉', '█']; -// ============================================================================= -// Color Constants -// ============================================================================= - // Primary colors pub const COLOR_BASE_BLUE: Color = Color::Rgb(0, 82, 255); pub const COLOR_ACTIVE_BORDER: Color = Color::Rgb(100, 180, 255); @@ -62,25 +58,21 @@ pub const COLOR_BURN: Color = Color::Rgb(100, 200, 100); pub const COLOR_TARGET: Color = Color::Rgb(255, 200, 100); pub const COLOR_GAS_FILL: Color = Color::Rgb(100, 180, 255); -// ============================================================================= -// Duration Constants -// ============================================================================= - pub const EVENT_POLL_TIMEOUT: Duration = Duration::from_millis(100); pub const RATE_WINDOW_30S: Duration = Duration::from_secs(30); pub const RATE_WINDOW_2M: Duration = Duration::from_secs(120); pub const RATE_WINDOW_5M: Duration = Duration::from_secs(300); -// ============================================================================= -// Shared Data Types -// ============================================================================= - #[derive(Clone, Debug)] pub struct FlashblockEntry { pub block_number: u64, pub index: u64, pub tx_count: usize, pub gas_used: u64, + /// Cumulative tx count for this block (sum of all flash indices so far) + pub cumulative_tx_count: usize, + /// Cumulative gas used for this block (sum of all flash indices so far) + pub cumulative_gas_used: u64, pub gas_limit: u64, pub base_fee: Option, pub prev_base_fee: Option, @@ -144,11 +136,11 @@ impl BatchSubmission { } pub fn l1_block_display(&self) -> String { - self.l1_block_number.map(|n| n.to_string()).unwrap_or_else(|| "-".to_string()) + self.l1_block_number.map_or_else(|| "-".to_string(), |n| n.to_string()) } pub fn compression_display(&self) -> String { - self.compression_ratio().map(|r| format!("{r:.2}x")).unwrap_or_else(|| "-".to_string()) + self.compression_ratio().map_or_else(|| "-".to_string(), |r| format!("{r:.2}x")) } } @@ -198,10 +190,6 @@ pub struct LoadingState { pub total_blocks: u64, } -// ============================================================================= -// DA Tracker - Shared State Management for DA Monitoring -// ============================================================================= - #[derive(Debug)] pub struct DaTracker { pub safe_l2_block: u64, @@ -318,10 +306,6 @@ impl DaTracker { } } -// ============================================================================= -// Formatting Functions -// ============================================================================= - pub fn format_bytes(bytes: u64) -> String { if bytes >= 1_000_000_000 { format!("{:.1}G", bytes as f64 / 1_000_000_000.0) diff --git a/crates/basectl/src/rpc.rs b/crates/basectl/src/rpc.rs index 23c7f3f..bd10ea0 100644 --- a/crates/basectl/src/rpc.rs +++ b/crates/basectl/src/rpc.rs @@ -40,6 +40,7 @@ pub async fn fetch_sync_status(op_node_rpc: &str) -> Result { pub async fn run_flashblock_ws(url: String, tx: mpsc::Sender) -> Result<()> { let (ws_stream, _) = connect_async(&url).await?; + tracing::info!(url = %url, "Connected to flashblocks websocket"); let (_, mut read) = ws_stream.split(); while let Some(msg) = read.next().await { @@ -66,6 +67,7 @@ pub async fn run_flashblock_ws_timestamped( tx: mpsc::Sender, ) -> Result<()> { let (ws_stream, _) = connect_async(&url).await?; + tracing::info!(url = %url, "Connected to flashblocks websocket (timestamped)"); let (_, mut read) = ws_stream.split(); while let Some(msg) = read.next().await { @@ -128,14 +130,13 @@ pub async fn fetch_initial_backlog(l2_rpc: &str, op_node_rpc: &str) -> Result() }) - .unwrap_or(0) } }) .buffer_unordered(CONCURRENT_BLOCK_FETCHES) @@ -177,14 +178,13 @@ pub async fn fetch_initial_backlog_with_progress( .await .ok() .flatten() - .map(|block| { + .map_or(0, |block| { block .transactions .txns() .map(|tx| tx.inner.input().len() as u64) .sum::() }) - .unwrap_or(0) } }) .buffer_unordered(CONCURRENT_BLOCK_FETCHES) @@ -236,9 +236,8 @@ pub async fn run_block_fetcher( mut request_rx: mpsc::Receiver, result_tx: mpsc::Sender, ) { - let provider = match ProviderBuilder::new().connect(&l2_rpc).await { - Ok(p) => p, - Err(_) => return, + let Ok(provider) = ProviderBuilder::new().connect(&l2_rpc).await else { + return; }; while let Some(block_num) = request_rx.recv().await { @@ -324,9 +323,8 @@ pub async fn run_l1_batcher_watcher( batcher_address: Address, result_tx: mpsc::Sender, ) { - let provider = match ProviderBuilder::new().connect(&l1_rpc).await { - Ok(p) => p, - Err(_) => return, + let Ok(provider) = ProviderBuilder::new().connect(&l1_rpc).await else { + return; }; let mut last_block: Option = None; @@ -335,12 +333,11 @@ pub async fn run_l1_batcher_watcher( loop { interval.tick().await; - let latest = match provider.get_block_number().await { - Ok(n) => n, - Err(_) => continue, + let Ok(latest) = provider.get_block_number().await else { + continue; }; - let start_block = last_block.map(|b| b + 1).unwrap_or_else(|| latest.saturating_sub(5)); + let start_block = last_block.map_or_else(|| latest.saturating_sub(5), |b| b + 1); for block_num in start_block..=latest { if let Ok(Some(block)) = diff --git a/crates/gobrr/Cargo.toml b/crates/gobrr/Cargo.toml new file mode 100644 index 0000000..f5018ba --- /dev/null +++ b/crates/gobrr/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "gobrr" +version.workspace = true +edition.workspace = true +license.workspace = true + +[lints] +workspace = true + +[dependencies] +tokio = { workspace = true } +anyhow = { workspace = true } +tracing = { workspace = true } +rand = { workspace = true } +url = { workspace = true } +reqwest = { workspace = true } +alloy-primitives = { workspace = true } +alloy-provider = { workspace = true, features = ["txpool-api"] } +alloy-network = { workspace = true } +alloy-signer = { workspace = true } +alloy-signer-local = { workspace = true } +alloy-consensus = { workspace = true } +alloy-rpc-types-eth = { workspace = true } +alloy-rpc-client = { workspace = true } +alloy-transport-http = { workspace = true } +op-alloy-network = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +humantime = { workspace = true } +base-flashtypes = { workspace = true } +tokio-tungstenite = { workspace = true } +futures-util = { workspace = true } diff --git a/crates/gobrr/src/blocks.rs b/crates/gobrr/src/blocks.rs new file mode 100644 index 0000000..28c4ce6 --- /dev/null +++ b/crates/gobrr/src/blocks.rs @@ -0,0 +1,92 @@ +use std::time::Duration; + +use alloy_provider::Provider; +use alloy_rpc_types_eth::BlockNumberOrTag; +use anyhow::{Context, Result}; +use tokio::sync::broadcast; +use tracing::{debug, info, warn}; + +use crate::client::{self, Provider as OpProvider}; + +/// An OP-stack block fetched from the RPC. +pub(crate) type OpBlock = ::BlockResponse; + +/// How often the watcher tries to fetch the next block. +const BLOCK_POLL_INTERVAL: Duration = Duration::from_millis(250); + +/// Watches for new blocks and broadcasts events to consumers +pub(crate) struct BlockWatcher { + provider: OpProvider, +} + +impl BlockWatcher { + /// Creates a new `BlockWatcher` + pub(crate) fn new(http_client: reqwest::Client, rpc_url: &str) -> Result { + let provider = client::create_provider(http_client, rpc_url)?; + Ok(Self { provider }) + } + + /// Runs the block watcher loop, broadcasting block events + pub(crate) async fn run( + self, + block_tx: broadcast::Sender, + mut shutdown: broadcast::Receiver<()>, + ) -> Result<()> { + // Get the starting block number + let mut last_block = + self.provider.get_block_number().await.context("Failed to get initial block number")?; + + debug!(block = last_block, "Block watcher started"); + + loop { + tokio::select! { + biased; + _ = shutdown.recv() => { + debug!("Block watcher shutting down"); + break; + } + _ = tokio::time::sleep(BLOCK_POLL_INTERVAL) => { + match self.provider.get_block_by_number(BlockNumberOrTag::Number(last_block + 1)).full().await { + Ok(Some(block)) => { + last_block += 1; + if let Err(e) = block_tx.send(block) { + warn!(block = last_block, error = %e, "Failed to broadcast block event"); + } + } + Ok(None) => {} // not yet available + Err(e) => { + warn!(block = last_block + 1, error = %e, "Failed to fetch block"); + } + } + } + } + } + + Ok(()) + } +} + +/// Runs a block logger that subscribes to block events and logs them. +/// Exits when the block channel closes, which happens after the drain period. +pub(crate) async fn run_block_logger(mut block_rx: broadcast::Receiver) { + loop { + match block_rx.recv().await { + Ok(block) => { + info!( + blockNum = block.header.number, + gasUsed = block.header.gas_used, + gasLimit = block.header.gas_limit, + txnCount = block.transactions.hashes().count(), + "New block" + ); + } + Err(broadcast::error::RecvError::Lagged(n)) => { + debug!(missed = n, "Block logger lagged behind"); + } + Err(broadcast::error::RecvError::Closed) => { + debug!("Block broadcast channel closed, logger shutting down"); + break; + } + } + } +} diff --git a/crates/gobrr/src/client.rs b/crates/gobrr/src/client.rs new file mode 100644 index 0000000..a7be732 --- /dev/null +++ b/crates/gobrr/src/client.rs @@ -0,0 +1,52 @@ +use std::time::Duration; + +use alloy_network::EthereumWallet; +use alloy_provider::{ + Identity, ProviderBuilder, RootProvider, + fillers::{FillProvider, JoinFill, WalletFiller}, +}; +use alloy_rpc_client::RpcClient; +use alloy_transport_http::Http; +use anyhow::{Context, Result}; +use op_alloy_network::Optimism; + +/// Concrete provider type for read-only operations on Optimism +pub(crate) type Provider = RootProvider; + +/// Concrete provider type with wallet for signing transactions on Optimism +pub(crate) type WalletProvider = + FillProvider>, Provider, Optimism>; + +/// Creates a shared HTTP client with connection pooling configured to reduce DNS pressure. +pub(crate) fn create_shared_client() -> reqwest::Client { + reqwest::Client::builder() + .pool_max_idle_per_host(100) + .pool_idle_timeout(Duration::from_secs(90)) + .tcp_keepalive(Duration::from_secs(60)) + .connect_timeout(Duration::from_secs(30)) + .timeout(Duration::from_secs(60)) + .build() + .expect("Failed to build HTTP client") +} + +/// Creates a provider without a wallet for read-only operations +pub(crate) fn create_provider(http_client: reqwest::Client, rpc_url: &str) -> Result { + let url: url::Url = rpc_url.parse().context("Invalid RPC URL")?; + let http = Http::with_client(http_client, url); + let rpc_client = RpcClient::new(http, true); + Ok(RootProvider::new(rpc_client)) +} + +/// Creates a provider with an Ethereum wallet for signing transactions +pub(crate) fn create_wallet_provider( + http_client: reqwest::Client, + rpc_url: &str, + wallet: EthereumWallet, +) -> Result { + let url: url::Url = rpc_url.parse().context("Invalid RPC URL")?; + let http = Http::with_client(http_client, url); + let rpc_client = RpcClient::new(http, true); + let root: Provider = RootProvider::new(rpc_client); + // Use filler() to add only the wallet filler without default fillers + Ok(ProviderBuilder::default().filler(WalletFiller::new(wallet)).connect_provider(root)) +} diff --git a/crates/gobrr/src/config.rs b/crates/gobrr/src/config.rs new file mode 100644 index 0000000..0fc4b99 --- /dev/null +++ b/crates/gobrr/src/config.rs @@ -0,0 +1,166 @@ +use std::{fmt, time::Duration}; + +use alloy_primitives::{Address, Bytes, U256}; +use anyhow::{Context, Result, bail}; +use rand::{Rng, distributions::WeightedIndex, prelude::Distribution}; +use serde::{Deserialize, Serialize}; + +/// Strongly-typed transaction category used across the pipeline. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize)] +pub enum TxType { + EthSend, + EthSendCalldata, +} + +impl fmt::Display for TxType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::EthSend => write!(f, "eth_send"), + Self::EthSendCalldata => write!(f, "eth_send_calldata"), + } + } +} + +/// Gas limit for simple ETH transfer (no calldata) +const GAS_LIMIT_SIMPLE: u64 = 21_000; + +/// Gas per calldata byte. OP Stack L2s charge ~41 gas/byte due to L1 data costs. +/// We use 48 for safety margin. +const GAS_PER_CALLDATA_BYTE: u64 = 48; + +/// Computes gas limit for calldata transactions: 21000 base + `GAS_PER_CALLDATA_BYTE` per byte +const fn compute_gas_limit(calldata_len: usize) -> u64 { + 21_000 + (calldata_len as u64) * GAS_PER_CALLDATA_BYTE +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type")] +pub(crate) enum TxKind { + #[serde(rename = "eth_send")] + EthSend, + #[serde(rename = "eth_send_calldata")] + EthSendCalldata { max_size: usize }, +} + +pub(crate) struct TxParams { + pub(crate) to: Address, + pub(crate) value: U256, + pub(crate) input: Bytes, + pub(crate) gas_limit: u64, + pub(crate) tx_type: TxType, +} + +impl TxKind { + pub(crate) fn build(&self, rng: &mut impl Rng) -> TxParams { + match self { + Self::EthSend => TxParams { + to: Address::ZERO, + value: U256::ZERO, + input: Bytes::new(), + gas_limit: GAS_LIMIT_SIMPLE, + tx_type: TxType::EthSend, + }, + Self::EthSendCalldata { max_size } => { + let input = generate_calldata(rng, *max_size); + let gas_limit = compute_gas_limit(input.len()); + TxParams { + to: Address::ZERO, + value: U256::ZERO, + input, + gas_limit, + tx_type: TxType::EthSendCalldata, + } + } + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WeightedTxKind { + pub weight: u32, + #[serde(flatten)] + pub(crate) kind: TxKind, +} + +#[derive(Clone)] +pub(crate) struct TxSelector { + kinds: Vec, + dist: WeightedIndex, +} + +impl TxSelector { + pub(crate) fn new(weighted: &[WeightedTxKind]) -> Result { + if weighted.is_empty() { + bail!("transactions list must not be empty"); + } + let weights: Vec = weighted.iter().map(|w| w.weight).collect(); + if weights.contains(&0) { + bail!("all transaction weights must be > 0"); + } + let dist = WeightedIndex::new(&weights).context("invalid weights")?; + let kinds = weighted.iter().map(|w| w.kind.clone()).collect(); + Ok(Self { kinds, dist }) + } + + pub(crate) fn select(&self, rng: &mut impl Rng) -> &TxKind { + &self.kinds[self.dist.sample(rng)] + } +} + +const fn default_sender_count() -> u32 { + 10 +} + +const fn default_in_flight() -> u32 { + 16 +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TestConfig { + pub rpc: String, + pub mnemonic: String, + pub funder_key: String, + pub funding_amount: String, + #[serde(default = "default_sender_count")] + pub sender_count: u32, + #[serde(default)] + pub sender_offset: u32, + #[serde(default = "default_in_flight")] + pub in_flight_per_sender: u32, + pub duration: Option, + pub target_tps: Option, + #[serde(default)] + pub txpool_hosts: Vec, + /// WebSocket URL for flashblocks. FB inclusion times are tracked via this stream. + pub flashblocks_ws: String, + pub transactions: Vec, +} + +impl TestConfig { + pub fn load(path: &str) -> Result { + let contents = std::fs::read_to_string(path) + .with_context(|| format!("failed to read config file: {path}"))?; + serde_yaml::from_str(&contents).context("failed to parse config YAML") + } + + pub fn parse_duration(&self) -> Result> { + self.duration + .as_ref() + .map(|d| { + humantime::parse_duration(d.trim()) + .with_context(|| format!("invalid duration: {d}")) + }) + .transpose() + } + + pub fn parse_funding_amount(&self) -> Result { + self.funding_amount.parse::().context("invalid funding_amount") + } +} + +/// Generates random calldata of the specified size. +/// Uses high-entropy random bytes that are uncompressible. +pub(crate) fn generate_calldata(rng: &mut impl Rng, size: usize) -> Bytes { + let data: Vec = (0..size).map(|_| rng.r#gen()).collect(); + Bytes::from(data) +} diff --git a/crates/gobrr/src/confirmer.rs b/crates/gobrr/src/confirmer.rs new file mode 100644 index 0000000..bd3dbc3 --- /dev/null +++ b/crates/gobrr/src/confirmer.rs @@ -0,0 +1,72 @@ +use std::collections::HashSet; + +use alloy_primitives::B256; +use tokio::sync::{broadcast, mpsc}; +use tracing::{debug, info, warn}; + +use crate::{blocks::OpBlock, tracker::TrackerEvent}; + +/// Runs the confirmer task that matches pending transactions against block contents. +/// +/// The confirmer exits when the block channel closes, which happens after the drain period. +/// This ensures it keeps processing confirmations until shutdown is complete. +pub(crate) async fn run_confirmer( + mut pending_rx: mpsc::UnboundedReceiver, + mut block_rx: broadcast::Receiver, + tracker_tx: mpsc::UnboundedSender, +) { + let mut pending: HashSet = HashSet::new(); + + loop { + tokio::select! { + biased; + Some(tx_hash) = pending_rx.recv() => { + pending.insert(tx_hash); + } + result = block_rx.recv() => { + match result { + Ok(block) => { + let mut total_txs: usize = 0; + let mut our_count: u64 = 0; + + for tx_hash in block.transactions.hashes() { + total_txs += 1; + if pending.remove(&tx_hash) { + our_count += 1; + if let Err(e) = tracker_tx.send(TrackerEvent::BlockReceived { + tx_hash, + }) { + warn!(tx_hash = %tx_hash, error = %e, "Failed to send block confirmation to tracker"); + } + } + } + + let gas_used_pct = if block.header.gas_limit > 0 { + (block.header.gas_used as f64 / block.header.gas_limit as f64) * 100.0 + } else { + 0.0 + }; + + if our_count > 0 || total_txs > 0 { + info!( + block = block.header.number, + our_txs = our_count, + total_txs, + gas_used_pct = format!("{gas_used_pct:.1}%"), + pending = pending.len(), + "Block inclusion" + ); + } + } + Err(broadcast::error::RecvError::Lagged(n)) => { + debug!(missed = n, "Confirmer lagged behind block events"); + } + Err(broadcast::error::RecvError::Closed) => { + debug!("Block broadcast channel closed, confirmer shutting down"); + break; + } + } + } + } + } +} diff --git a/crates/gobrr/src/flashblock_watcher.rs b/crates/gobrr/src/flashblock_watcher.rs new file mode 100644 index 0000000..13266b2 --- /dev/null +++ b/crates/gobrr/src/flashblock_watcher.rs @@ -0,0 +1,97 @@ +use std::collections::HashSet; + +use alloy_primitives::{B256, keccak256}; +use base_flashtypes::Flashblock; +use futures_util::StreamExt; +use tokio::sync::{broadcast, mpsc}; +use tokio_tungstenite::connect_async; +use tracing::{debug, info, warn}; + +use crate::tracker::TrackerEvent; + +/// Watches for flashblocks via WebSocket and reports transactions seen. +pub(crate) async fn run_flashblock_watcher( + ws_url: String, + mut pending_rx: mpsc::UnboundedReceiver, + tracker_tx: mpsc::UnboundedSender, + mut shutdown: broadcast::Receiver<()>, +) { + let mut pending: HashSet = HashSet::new(); + + // Connect to WebSocket + let ws_stream = match connect_async(&ws_url).await { + Ok((stream, _)) => { + info!(url = %ws_url, "Connected to flashblocks WebSocket"); + stream + } + Err(e) => { + warn!(url = %ws_url, error = %e, "Failed to connect to flashblocks WebSocket"); + return; + } + }; + + let (_, mut read) = ws_stream.split(); + + loop { + tokio::select! { + biased; + // Handle new pending tx hashes to track + Some(tx_hash) = pending_rx.recv() => { + pending.insert(tx_hash); + } + // Handle incoming flashblock messages + msg_result = read.next() => { + match msg_result { + Some(Ok(msg)) => { + if !msg.is_binary() && !msg.is_text() { + continue; + } + match Flashblock::try_decode_message(msg.into_data()) { + Ok(fb) => { + let mut our_count = 0u64; + // Extract tx hashes from flashblock transactions + for tx_bytes in &fb.diff.transactions { + // Compute tx hash from raw transaction bytes + let tx_hash = keccak256(tx_bytes); + if pending.remove(&tx_hash) { + our_count += 1; + if let Err(e) = tracker_tx.send(TrackerEvent::FlashblockReceived { + tx_hash, + }) { + warn!(tx_hash = %tx_hash, error = %e, "Failed to send FB inclusion to tracker"); + } + } + } + if our_count > 0 { + debug!( + block = fb.metadata.block_number, + index = fb.index, + our_txs = our_count, + pending = pending.len(), + "Flashblock inclusion" + ); + } + } + Err(e) => { + debug!(error = %e, "Failed to decode flashblock message"); + } + } + } + Some(Err(e)) => { + warn!(error = %e, "WebSocket error"); + break; + } + None => { + debug!("WebSocket stream closed"); + break; + } + } + } + // Handle shutdown + _ = shutdown.recv() => { + debug!("Flashblock watcher shutting down"); + break; + } + } + } +} diff --git a/crates/gobrr/src/funder.rs b/crates/gobrr/src/funder.rs new file mode 100644 index 0000000..5eba2a5 --- /dev/null +++ b/crates/gobrr/src/funder.rs @@ -0,0 +1,281 @@ +use std::time::Duration; + +use alloy_consensus::TxEnvelope; +use alloy_network::{EthereumWallet, ReceiptResponse, TransactionBuilder, eip2718::Encodable2718}; +use alloy_primitives::{Address, B256, Bytes, U256}; +use alloy_provider::{Provider, WalletProvider as AlloyWalletProvider}; +use alloy_rpc_client::BatchRequest; +use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionRequest}; +use alloy_signer_local::PrivateKeySigner; +use anyhow::{Context, Result}; +use futures_util::stream::{self, StreamExt}; +use tracing::{info, warn}; + +use crate::client::{WalletProvider, create_wallet_provider}; + +/// Timeout for waiting for a funding transaction receipt +const RECEIPT_TIMEOUT: Duration = Duration::from_secs(60); + +/// Number of requests per JSON-RPC batch +const BATCH_SIZE: usize = 10; +/// Number of batches to run concurrently +const CONCURRENT_BATCHES: usize = 10; + +/// Handles funding sender accounts to a target balance +pub(crate) struct Funder { + provider: WalletProvider, + chain_id: u64, + nonce: u64, +} + +impl Funder { + /// Creates a new Funder with the given HTTP client, RPC URL, signer, and chain ID + pub(crate) async fn new( + http_client: reqwest::Client, + rpc_url: &str, + signer: PrivateKeySigner, + chain_id: u64, + ) -> Result { + let funder_address = signer.address(); + let wallet = EthereumWallet::from(signer); + let provider = create_wallet_provider(http_client, rpc_url, wallet)?; + + let funder_balance = + provider.get_balance(funder_address).await.context("Failed to get funder balance")?; + + // Get initial nonce from pending state + let nonce = provider + .get_transaction_count(funder_address) + .block_id(BlockNumberOrTag::Pending.into()) + .await + .context("Failed to get initial nonce")?; + + info!( + funder = %funder_address, + balance = %funder_balance, + nonce, + "Funder account" + ); + + Ok(Self { provider, chain_id, nonce }) + } + + /// Funds all sender addresses to the target balance using batched RPC calls + pub(crate) async fn fund(&mut self, addresses: &[Address], target_balance: U256) -> Result<()> { + // Step 1: Batch get all balances (10 batches of 10 concurrently) + info!(count = addresses.len(), "Fetching balances for all senders"); + let balances = get_balances_batch(&self.provider, addresses).await?; + + // Step 2: Identify accounts needing funding, assign nonces + let mut to_fund: Vec<(usize, Address, U256, u64)> = Vec::new(); + for (i, (&addr, &balance)) in addresses.iter().zip(&balances).enumerate() { + if balance >= target_balance { + info!(sender = i, address = %addr, balance = %balance, "Already funded"); + continue; + } + let amount_needed = target_balance - balance; + info!( + sender = i, + address = %addr, + current = %balance, + needed = %amount_needed, + "Needs funding" + ); + to_fund.push((i, addr, amount_needed, self.nonce)); + self.nonce += 1; + } + + if to_fund.is_empty() { + info!("All senders already funded"); + return Ok(()); + } + + // Estimate fees once for all transactions + let fees = + self.provider.estimate_eip1559_fees().await.context("Failed to estimate fees")?; + let max_fee = fees.max_fee_per_gas; + + // Step 3: Chunk into batches and send concurrently + info!(count = to_fund.len(), "Sending funding transactions in batches"); + let chunks: Vec> = + to_fund.chunks(BATCH_SIZE).map(|c| c.to_vec()).collect(); + + let wallet = self.provider.wallet(); + let chain_id = self.chain_id; + let provider = &self.provider; + + let batch_results: Vec>> = stream::iter(chunks) + .map(|chunk| async move { + send_funding_batch(provider, wallet, chain_id, max_fee, chunk).await + }) + .buffer_unordered(CONCURRENT_BATCHES) + .collect() + .await; + + // Collect all sent transactions + let mut sent_txs: Vec<(usize, Address, B256)> = Vec::new(); + for result in batch_results { + match result { + Ok(txs) => sent_txs.extend(txs), + Err(e) => { + warn!(error = %e, "Batch funding failed"); + return Err(e).context("Failed to send funding batch"); + } + } + } + + if sent_txs.is_empty() { + info!("No funding transactions were sent"); + return Ok(()); + } + + // Step 4: Wait for confirmations by polling for receipts + info!(count = sent_txs.len(), "Waiting for funding transactions to confirm"); + let start = std::time::Instant::now(); + let poll_interval = Duration::from_millis(500); + + for (i, address, tx_hash) in sent_txs { + loop { + if start.elapsed() > RECEIPT_TIMEOUT { + anyhow::bail!( + "Timeout waiting for funding receipt for sender {i} ({}s)", + RECEIPT_TIMEOUT.as_secs() + ); + } + + match self.provider.get_transaction_receipt(tx_hash).await { + Ok(Some(receipt)) => { + if receipt.status() { + info!( + sender = i, + address = %address, + tx_hash = %tx_hash, + "Funding confirmed" + ); + } else { + anyhow::bail!("Funding transaction failed for sender {i}"); + } + break; + } + Ok(None) => { + // Not yet mined, wait and retry + tokio::time::sleep(poll_interval).await; + } + Err(e) => { + warn!(sender = i, tx_hash = %tx_hash, error = %e, "Error fetching receipt, retrying"); + tokio::time::sleep(poll_interval).await; + } + } + } + } + + info!("All senders funded successfully"); + Ok(()) + } +} + +/// Fetches balances for all addresses using batched RPC calls with concurrency +async fn get_balances_batch(provider: &WalletProvider, addresses: &[Address]) -> Result> { + let chunks: Vec<&[Address]> = addresses.chunks(BATCH_SIZE).collect(); + + // Use buffered (not buffer_unordered) to preserve order + let results: Vec>> = stream::iter(chunks) + .map(|chunk| async move { + let mut batch = BatchRequest::new(provider.client()); + let mut futures = Vec::with_capacity(chunk.len()); + + for &addr in chunk { + match batch.add_call::<_, U256>("eth_getBalance", &(addr, "latest")) { + Ok(fut) => futures.push(fut), + Err(e) => { + return Err(anyhow::anyhow!("Failed to add balance call to batch: {e}")); + } + } + } + + batch.send().await.context("Failed to send balance batch request")?; + + let mut balances = Vec::with_capacity(futures.len()); + for fut in futures { + let balance = fut.await.context("Failed to get balance from batch")?; + balances.push(balance); + } + + Ok(balances) + }) + .buffered(CONCURRENT_BATCHES) + .collect() + .await; + + let mut all_balances = Vec::with_capacity(addresses.len()); + for result in results { + all_balances.extend(result?); + } + + Ok(all_balances) +} + +/// Sends a batch of funding transactions using JSON-RPC batching +async fn send_funding_batch( + provider: &WalletProvider, + wallet: &EthereumWallet, + chain_id: u64, + max_fee: u128, + batch_items: Vec<(usize, Address, U256, u64)>, +) -> Result> { + // Sign all transactions first + let mut signed_txs: Vec<(usize, Address, Bytes, B256)> = Vec::with_capacity(batch_items.len()); + + for (i, to, amount, nonce) in batch_items { + let tx = TransactionRequest::default() + .with_to(to) + .with_value(amount) + .with_nonce(nonce) + .with_chain_id(chain_id) + .with_gas_limit(21_000) + .with_max_fee_per_gas(max_fee) + .with_max_priority_fee_per_gas(0); + + let tx_envelope: TxEnvelope = tx.build(wallet).await.context("Failed to sign tx")?; + let tx_hash = *tx_envelope.tx_hash(); + let raw_bytes = Bytes::from(tx_envelope.encoded_2718()); + + signed_txs.push((i, to, raw_bytes, tx_hash)); + } + + // Build and send batch request + let mut batch = BatchRequest::new(provider.client()); + let mut queued: Vec<(usize, Address, B256, _)> = Vec::with_capacity(signed_txs.len()); + + for (i, addr, raw_bytes, tx_hash) in signed_txs { + match batch.add_call::<_, B256>("eth_sendRawTransaction", &(raw_bytes,)) { + Ok(fut) => queued.push((i, addr, tx_hash, fut)), + Err(e) => { + warn!(sender = i, address = %addr, error = ?e, "Failed to add tx to batch"); + return Err(anyhow::anyhow!("Failed to add tx to batch: {e}")); + } + } + } + + if queued.is_empty() { + return Ok(Vec::new()); + } + + batch.send().await.context("Failed to send funding batch request")?; + + // Collect results + let mut results = Vec::with_capacity(queued.len()); + for (i, addr, tx_hash, fut) in queued { + match fut.await { + Ok(_returned_hash) => { + results.push((i, addr, tx_hash)); + } + Err(e) => { + warn!(sender = i, address = %addr, tx_hash = %tx_hash, error = %e, "Tx failed in batch"); + return Err(anyhow::anyhow!("Tx {tx_hash} failed in batch: {e}")); + } + } + } + + Ok(results) +} diff --git a/crates/gobrr/src/handle.rs b/crates/gobrr/src/handle.rs new file mode 100644 index 0000000..9b634ec --- /dev/null +++ b/crates/gobrr/src/handle.rs @@ -0,0 +1,218 @@ +use std::time::Duration; + +use alloy_primitives::B256; +use tokio::{ + sync::{broadcast, mpsc}, + task::JoinHandle, +}; +use tracing::{info, warn}; + +use crate::tracker::{self, Stats, TrackerEvent}; + +/// Handle to a running load test, returned by `start_load_test`. +#[derive(Debug)] +pub struct LoadTestHandle { + /// Tracker channel for stats polling + tracker_tx: mpsc::UnboundedSender, + /// Shutdown signal for preparers/signers/senders + shutdown_tx: broadcast::Sender<()>, + /// Shutdown signal for drain-phase tasks (stats reporter, block watcher) + drain_shutdown_tx: broadcast::Sender<()>, + /// Block event channel (dropped to signal consumers to exit) + block_tx: broadcast::Sender, + /// Pipeline task handles (preparers, signers, senders) + pipeline_handles: Vec>, + /// Stats reporter handle + stats_handle: JoinHandle<()>, + /// Block watcher handle + block_handle: JoinHandle<()>, + /// Block logger handle + logger_handle: JoinHandle<()>, + /// Confirmer handle + confirmer_handle: JoinHandle<()>, + /// Confirmer pending tx sender (dropped after shutdown to signal confirmer) + confirmer_pending_tx: mpsc::UnboundedSender, + /// Flashblock watcher handle + flashblock_handle: JoinHandle<()>, + /// Flashblock pending tx sender (dropped after shutdown to signal watcher) + flashblock_pending_tx: mpsc::UnboundedSender, + /// Rate limiter replenisher handle + rate_limiter_handle: Option>, + /// Tracker task handle + tracker_handle: JoinHandle<()>, + /// Management hosts from config + txpool_hosts: Vec, + /// Test duration + duration: Option, + /// HTTP client + http_client: reqwest::Client, + /// Target TPS + target_tps: Option, +} + +/// Cloneable stats poller that can be used during drain when the handle is consumed. +#[derive(Debug, Clone)] +pub struct StatsPoller { + tracker_tx: mpsc::UnboundedSender, +} + +impl StatsPoller { + /// Poll current stats from the tracker. + pub async fn get_stats(&self) -> Option { + tracker::get_stats(&self.tracker_tx).await + } +} + +impl LoadTestHandle { + #[allow(clippy::too_many_arguments)] + pub(crate) const fn new( + tracker_tx: mpsc::UnboundedSender, + shutdown_tx: broadcast::Sender<()>, + drain_shutdown_tx: broadcast::Sender<()>, + block_tx: broadcast::Sender, + pipeline_handles: Vec>, + stats_handle: JoinHandle<()>, + block_handle: JoinHandle<()>, + logger_handle: JoinHandle<()>, + confirmer_handle: JoinHandle<()>, + confirmer_pending_tx: mpsc::UnboundedSender, + flashblock_handle: JoinHandle<()>, + flashblock_pending_tx: mpsc::UnboundedSender, + rate_limiter_handle: Option>, + tracker_handle: JoinHandle<()>, + txpool_hosts: Vec, + duration: Option, + http_client: reqwest::Client, + target_tps: Option, + ) -> Self { + Self { + tracker_tx, + shutdown_tx, + drain_shutdown_tx, + block_tx, + pipeline_handles, + stats_handle, + block_handle, + logger_handle, + confirmer_handle, + confirmer_pending_tx, + flashblock_handle, + flashblock_pending_tx, + rate_limiter_handle, + tracker_handle, + txpool_hosts, + duration, + http_client, + target_tps, + } + } + + /// Poll current stats from the tracker. + pub async fn get_stats(&self) -> Option { + tracker::get_stats(&self.tracker_tx).await + } + + /// Create a cloneable stats poller (useful during drain when handle is consumed). + pub fn stats_poller(&self) -> StatsPoller { + StatsPoller { tracker_tx: self.tracker_tx.clone() } + } + + /// Signal the pipeline to stop sending new transactions. + pub fn shutdown(&self) { + info!("Shutting down load test..."); + if let Err(e) = self.shutdown_tx.send(()) { + warn!(error = %e, "Failed to send shutdown signal"); + } + } + + /// Consume the handle, wait for pending transactions to drain, return final stats. + pub async fn wait_and_drain(self) -> Stats { + // Wait for pipeline tasks to complete + for (i, handle) in self.pipeline_handles.into_iter().enumerate() { + if let Err(e) = handle.await { + warn!(task = i, error = %e, "Task panicked during shutdown"); + } + } + + // Stop rate limiter replenisher + if let Some(handle) = self.rate_limiter_handle { + handle.abort(); + let _ = handle.await; + } + + // Close the pending channels now that all senders have finished + drop(self.confirmer_pending_tx); + drop(self.flashblock_pending_tx); + + // Wait for pending transactions to confirm + info!("Waiting for pending transactions to confirm..."); + let mut last_pending = u64::MAX; + let mut last_change = std::time::Instant::now(); + let stall_timeout = std::time::Duration::from_secs(20); + + loop { + let stats = tracker::get_stats(&self.tracker_tx).await.unwrap_or_default(); + let pending = stats.pending(); + + if pending == 0 { + info!("All transactions confirmed or timed out"); + break; + } + + if pending != last_pending { + last_pending = pending; + last_change = std::time::Instant::now(); + } else if last_change.elapsed() > stall_timeout { + warn!(pending, "No confirmation progress for 20s, proceeding with final report"); + break; + } + + info!(pending, "Waiting for confirmations..."); + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + } + + // Shut down drain-phase tasks + if let Err(e) = self.drain_shutdown_tx.send(()) { + warn!(error = %e, "Failed to send drain shutdown signal"); + } + let _ = self.stats_handle.await; + let _ = self.block_handle.await; + let _ = self.flashblock_handle.await; + + // Close block channel to signal consumers + drop(self.block_tx); + let _ = self.logger_handle.await; + let _ = self.confirmer_handle.await; + + // Get final stats + let final_stats = tracker::get_stats(&self.tracker_tx).await.unwrap_or_default(); + if let Err(e) = self.tracker_tx.send(TrackerEvent::Shutdown) { + warn!(error = %e, "Failed to send shutdown to tracker"); + } + if let Err(e) = self.tracker_handle.await { + warn!(error = %e, "Tracker task panicked"); + } + + final_stats + } + + /// Get management hosts from the config. + pub fn txpool_hosts(&self) -> &[String] { + &self.txpool_hosts + } + + /// Get the configured test duration. + pub const fn duration(&self) -> Option { + self.duration + } + + /// Get the shared HTTP client. + pub const fn http_client(&self) -> &reqwest::Client { + &self.http_client + } + + /// Get the configured target TPS. + pub const fn target_tps(&self) -> Option { + self.target_tps + } +} diff --git a/crates/gobrr/src/lib.rs b/crates/gobrr/src/lib.rs new file mode 100644 index 0000000..f165926 --- /dev/null +++ b/crates/gobrr/src/lib.rs @@ -0,0 +1,25 @@ +mod blocks; +mod client; +mod config; +mod confirmer; +mod flashblock_watcher; +mod funder; +mod handle; +mod orchestrator; +mod runner; +mod sender; +mod signer; +mod stats; +mod tracker; +mod wallet; + +/// Type alias for sender identification +pub(crate) type SenderId = u32; + +pub use config::{TestConfig, TxType, WeightedTxKind}; +pub use handle::{LoadTestHandle, StatsPoller}; +pub use orchestrator::{ + LoadTestChannels, LoadTestPhase, LoadTestState, TxpoolHostStatus, activate, +}; +pub use runner::start_load_test; +pub use tracker::Stats; diff --git a/crates/gobrr/src/orchestrator.rs b/crates/gobrr/src/orchestrator.rs new file mode 100644 index 0000000..1ae49c2 --- /dev/null +++ b/crates/gobrr/src/orchestrator.rs @@ -0,0 +1,225 @@ +use std::time::Duration; + +use alloy_provider::ext::TxPoolApi; +use tokio::sync::{mpsc, oneshot}; + +use crate::{LoadTestHandle, Stats, StatsPoller}; + +#[derive(Debug, Clone)] +pub struct TxpoolHostStatus { + pub host: String, + pub pending: u64, + pub queued: u64, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum LoadTestPhase { + Starting, + Running, + Draining, + Complete, +} + +impl std::fmt::Display for LoadTestPhase { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Starting => write!(f, "Starting"), + Self::Running => write!(f, "Running"), + Self::Draining => write!(f, "Draining"), + Self::Complete => write!(f, "Complete"), + } + } +} + +/// Channels for receiving loadtest updates from background tasks. +#[derive(Debug)] +pub struct LoadTestChannels { + pub stats_rx: mpsc::Receiver, + pub txpool_rx: mpsc::Receiver>, + pub phase_rx: mpsc::Receiver, + pub shutdown_tx: oneshot::Sender<()>, +} + +#[derive(Debug)] +pub struct LoadTestState { + pub stats: Option, + stats_rx: Option>, + pub txpool_status: Vec, + txpool_rx: Option>>, + pub phase: LoadTestPhase, + phase_rx: Option>, + pub shutdown_tx: Option>, + pub txpool_hosts: Vec, + pub target_tps: Option, + pub duration: Option, + pub config_file: String, +} + +impl LoadTestState { + pub fn new( + channels: LoadTestChannels, + txpool_hosts: Vec, + target_tps: Option, + duration: Option, + config_file: String, + ) -> Self { + Self { + stats: None, + stats_rx: Some(channels.stats_rx), + txpool_status: Vec::new(), + txpool_rx: Some(channels.txpool_rx), + phase: LoadTestPhase::Starting, + phase_rx: Some(channels.phase_rx), + shutdown_tx: Some(channels.shutdown_tx), + txpool_hosts, + target_tps, + duration, + config_file, + } + } + + pub fn poll(&mut self) { + // Drain stats channel, keep latest + if let Some(ref mut rx) = self.stats_rx { + while let Ok(stats) = rx.try_recv() { + self.stats = Some(stats); + } + } + + // Drain txpool channel, keep latest + if let Some(ref mut rx) = self.txpool_rx { + while let Ok(status) = rx.try_recv() { + self.txpool_status = status; + } + } + + // Drain phase channel, keep latest + if let Some(ref mut rx) = self.phase_rx { + while let Ok(phase) = rx.try_recv() { + self.phase = phase; + } + } + } +} + +/// Spawn the load test lifecycle manager and txpool poller, returning a +/// [`LoadTestState`] that can be polled for updates. +pub fn activate(handle: LoadTestHandle, config_file: String) -> LoadTestState { + let txpool_hosts = handle.txpool_hosts().to_vec(); + let target_tps = handle.target_tps(); + let duration = handle.duration(); + let http_client = handle.http_client().clone(); + + let (stats_tx, stats_rx) = mpsc::channel::(16); + let (txpool_tx, txpool_rx) = mpsc::channel::>(16); + let (phase_tx, phase_rx) = mpsc::channel::(16); + let (shutdown_tx, shutdown_rx) = oneshot::channel::<()>(); + + let state = LoadTestState::new( + LoadTestChannels { stats_rx, txpool_rx, phase_rx, shutdown_tx }, + txpool_hosts.clone(), + target_tps, + duration, + config_file, + ); + + let poller = handle.stats_poller(); + tokio::spawn(loadtest_manager(handle, poller, shutdown_rx, stats_tx, phase_tx)); + + if !txpool_hosts.is_empty() { + tokio::spawn(txpool_poller(http_client, txpool_hosts, txpool_tx)); + } + + state +} + +/// Background task that manages the loadtest lifecycle. +async fn loadtest_manager( + handle: LoadTestHandle, + poller: StatsPoller, + shutdown_rx: oneshot::Receiver<()>, + stats_tx: mpsc::Sender, + phase_tx: mpsc::Sender, +) { + let _ = phase_tx.send(LoadTestPhase::Running).await; + + let duration = handle.duration(); + + // Single stats poll task that runs across both running and drain phases + let poll_stats_tx = stats_tx.clone(); + let poll_task = tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_millis(100)); + loop { + interval.tick().await; + if let Some(stats) = poller.get_stats().await + && poll_stats_tx.send(stats).await.is_err() + { + break; + } + } + }); + + // Wait for shutdown signal (TUI quit/drop) or duration + if let Some(d) = duration { + tokio::select! { + _ = tokio::time::sleep(d) => {} + _ = shutdown_rx => {} + } + } else { + let _ = shutdown_rx.await; + } + + // Drain phase + let _ = phase_tx.send(LoadTestPhase::Draining).await; + handle.shutdown(); + + let final_stats = handle.wait_and_drain().await; + poll_task.abort(); + + // Send final stats + let _ = stats_tx.send(final_stats).await; + let _ = phase_tx.send(LoadTestPhase::Complete).await; +} + +/// Background task that polls `txpool_status` on management hosts. +async fn txpool_poller( + http_client: reqwest::Client, + hosts: Vec, + tx: mpsc::Sender>, +) { + let providers: Vec = hosts + .iter() + .filter_map(|host| { + let url: url::Url = host.parse().ok()?; + let http = alloy_transport_http::Http::with_client(http_client.clone(), url); + let client = alloy_rpc_client::RpcClient::new(http, true); + Some(alloy_provider::RootProvider::new(client)) + }) + .collect(); + + let mut interval = tokio::time::interval(Duration::from_secs(1)); + + loop { + interval.tick().await; + + let mut statuses = Vec::with_capacity(hosts.len()); + for (host, provider) in hosts.iter().zip(providers.iter()) { + match provider.txpool_status().await { + Ok(status) => { + statuses.push(TxpoolHostStatus { + host: host.clone(), + pending: status.pending, + queued: status.queued, + }); + } + Err(_) => { + statuses.push(TxpoolHostStatus { host: host.clone(), pending: 0, queued: 0 }); + } + } + } + + if tx.send(statuses).await.is_err() { + break; + } + } +} diff --git a/crates/gobrr/src/runner.rs b/crates/gobrr/src/runner.rs new file mode 100644 index 0000000..cf72849 --- /dev/null +++ b/crates/gobrr/src/runner.rs @@ -0,0 +1,400 @@ +use std::sync::Arc; + +use alloy_primitives::{Address, B256}; +use alloy_provider::Provider; +use alloy_rpc_client::BatchRequest; +use alloy_rpc_types_eth::BlockNumberOrTag; +use anyhow::{Context, Result}; +use tokio::sync::{Semaphore, broadcast, mpsc}; +use tracing::{info, warn}; + +use crate::{ + SenderId, + blocks::{BlockWatcher, OpBlock, run_block_logger}, + client::{create_provider, create_shared_client}, + config::{TestConfig, TxSelector}, + confirmer::run_confirmer, + flashblock_watcher::run_flashblock_watcher, + funder::Funder, + handle::LoadTestHandle, + sender::Sender, + signer::{ResignRequest, SignedTx, Signer}, + stats::run_stats_reporter, + tracker::run_tracker, + wallet::{derive_signers, get_addresses, parse_funder_key}, +}; + +/// Start a load test and return a handle for controlling it. +/// +/// This runs the setup phase (load config, fund wallets, spawn pipeline tasks) +/// and returns a [`LoadTestHandle`] that can be used to poll stats, trigger +/// shutdown, and drain pending transactions. +pub async fn start_load_test(config_path: &str) -> Result { + info!("Starting gobrr load tester"); + + // Load config file + let config = TestConfig::load(config_path)?; + + // Parse duration if specified + let duration = config.parse_duration()?; + if let Some(d) = &duration { + info!(duration_secs = d.as_secs(), "Test duration configured"); + } else { + info!("Running until Ctrl+C"); + } + + // Parse funding amount + let funding_amount = config.parse_funding_amount()?; + + // Step 1: Parse funder key + let funder_signer = + parse_funder_key(&config.funder_key).context("Failed to parse funder key")?; + info!(funder = %funder_signer.address(), "Funder wallet loaded"); + + // Step 1b: Create shared HTTP client with connection pooling + let http_client = create_shared_client(); + info!("Created shared HTTP client with connection pooling"); + + // Step 1c: Fetch chain ID and base fee from RPC (once for all signers) + let provider = create_provider(http_client.clone(), &config.rpc)?; + let chain_id = provider.get_chain_id().await.context("Failed to get chain ID")?; + let latest_block = provider + .get_block_by_number(BlockNumberOrTag::Latest) + .await + .context("Failed to get latest block")? + .context("Latest block not found")?; + let base_fee = latest_block.header.base_fee_per_gas.context("Latest block missing base fee")?; + // Gas multiplier applied to base fee (testnet buffer) + const GAS_FEE_MULTIPLIER: u128 = 100; + let max_fee_per_gas = u128::from(base_fee).saturating_mul(GAS_FEE_MULTIPLIER); + info!(chain_id, base_fee, max_fee_per_gas, "Connected to chain"); + + // Step 2: Derive sender signers from mnemonic (skipping funder address if derived) + info!(count = config.sender_count, offset = config.sender_offset, "Deriving sender wallets"); + let sender_signers = derive_signers( + &config.mnemonic, + config.sender_count, + config.sender_offset, + &[funder_signer.address()], + ) + .context("Failed to derive sender signers")?; + let sender_addresses = get_addresses(&sender_signers); + + // Step 2b: Clear mempools on management hosts if configured (include funder address) + if !config.txpool_hosts.is_empty() { + let mut addresses_to_clear = sender_addresses.clone(); + addresses_to_clear.push(funder_signer.address()); + clear_mempools(&http_client, &config.txpool_hosts, &addresses_to_clear).await; + } + + // Step 3: Run funding phase using Funder struct + info!("Starting funding phase"); + let mut funder = Funder::new(http_client.clone(), &config.rpc, funder_signer, chain_id) + .await + .context("Failed to create funder")?; + funder.fund(&sender_addresses, funding_amount).await.context("Funding phase failed")?; + + // Build TxSelector from config + let tx_selector = TxSelector::new(&config.transactions)?; + + // Step 4: Create tracker channel and spawn tracker task + let (tracker_tx, tracker_rx) = mpsc::unbounded_channel(); + let tracker_handle = tokio::spawn(run_tracker(tracker_rx)); + + // Step 5: Create shutdown broadcast channel + let (shutdown_tx, _) = broadcast::channel::<()>(1); + + // Step 5b: Create block event broadcast channel + let (block_tx, _) = broadcast::channel::(64); + + // Step 5c: Create confirmer pending tx channel + let (confirmer_pending_tx, confirmer_pending_rx) = mpsc::unbounded_channel(); + + // Step 5d: Create flashblock pending tx channel + let (flashblock_pending_tx, flashblock_pending_rx) = mpsc::unbounded_channel(); + + // Step 5e: Set up rate limiter if target_tps is configured + let rate_limiter: Option> = config.target_tps.map(|tps| { + info!(target_tps = tps, "Rate limiter enabled"); + Arc::new(Semaphore::new(0)) + }); + + // Spawn rate limiter replenisher task if configured + let rate_limiter_handle = + if let Some((tps, limiter)) = config.target_tps.zip(rate_limiter.clone()) { + let mut replenish_shutdown = shutdown_tx.subscribe(); + let handle = tokio::spawn(async move { + // Tick 10x per second, using fractional accumulation to support low TPS + let permits_per_tick = tps as f64 / 10.0; + let max_permits = (tps * 2) as usize; // cap at 2 seconds of burst + let mut carry = 0.0; + let mut interval = tokio::time::interval(std::time::Duration::from_millis(100)); + + loop { + tokio::select! { + biased; + _ = replenish_shutdown.recv() => break, + _ = interval.tick() => { + let available = limiter.available_permits(); + if available < max_permits { + carry += permits_per_tick; + let mut to_add = carry.floor() as usize; + let max_add = max_permits - available; + if to_add > max_add { + to_add = max_add; + } + if to_add > 0 { + limiter.add_permits(to_add); + carry -= to_add as f64; + } + } else { + carry = 0.0; + } + } + } + } + }); + Some(handle) + } else { + None + }; + + // Step 6: Spawn signer and sender tasks in batches to avoid overwhelming RPC + let mut handles = Vec::new(); + let backlog_capacity = config.in_flight_per_sender as usize * 2; + const BATCH_SIZE: usize = 50; + + let sender_signers: Vec<_> = sender_signers.into_iter().enumerate().collect(); + for batch in sender_signers.chunks(BATCH_SIZE) { + // Spawn signer initialization tasks for this batch + let mut init_handles = Vec::with_capacity(batch.len()); + for (i, signer) in batch.iter().cloned() { + let sender_id = i as SenderId; + let client = http_client.clone(); + let rpc = config.rpc.clone(); + let limiter = rate_limiter.clone(); + let selector = tx_selector.clone(); + + let handle = tokio::spawn(async move { + Signer::new( + client, + &rpc, + signer, + sender_id, + limiter, + selector, + chain_id, + max_fee_per_gas, + ) + .await + .map(|s| (i, s)) + }); + init_handles.push(handle); + } + + // Wait for all signers in this batch to initialize + for handle in init_handles { + match handle.await { + Ok(Ok((i, signer_instance))) => { + let sender_id = i as SenderId; + + // Create channels for the pipeline + let (signed_tx, signed_rx) = mpsc::channel::(backlog_capacity); + let (resign_tx, resign_rx) = + mpsc::channel::(config.in_flight_per_sender as usize); + + // Spawn signer task + let shutdown_rx = shutdown_tx.subscribe(); + let signer_block_rx = block_tx.subscribe(); + let signer_handle = tokio::spawn(async move { + if let Err(e) = signer_instance + .run(resign_rx, signed_tx, signer_block_rx, shutdown_rx) + .await + { + tracing::error!(sender = i, error = %e, "Signer failed"); + } + }); + handles.push(signer_handle); + + // Spawn sender task + let shutdown_rx = shutdown_tx.subscribe(); + let tracker = tracker_tx.clone(); + let confirmer = confirmer_pending_tx.clone(); + let flashblock = flashblock_pending_tx.clone(); + let rpc = config.rpc.clone(); + let in_flight = config.in_flight_per_sender; + let client = http_client.clone(); + let sender_handle = tokio::spawn(async move { + match Sender::new( + client, sender_id, &rpc, in_flight, tracker, confirmer, flashblock, + resign_tx, + ) { + Ok(sender) => { + if let Err(e) = sender.run(signed_rx, shutdown_rx).await { + tracing::error!(sender = i, error = %e, "Sender failed"); + } + } + Err(e) => { + tracing::error!(sender = i, error = %e, "Failed to create sender"); + } + } + }); + handles.push(sender_handle); + } + Ok(Err(e)) => { + tracing::error!(error = %e, "Failed to create signer"); + } + Err(e) => { + tracing::error!(error = %e, "Signer init task panicked"); + } + } + } + + info!(batch_size = batch.len(), "Initialized signer batch"); + } + + // Step 7: Create separate shutdown for tasks that run during drain period + let (drain_shutdown_tx, _) = broadcast::channel::<()>(1); + + // Spawn stats reporter (runs during drain to show progress) + let stats_shutdown = drain_shutdown_tx.subscribe(); + let stats_tracker = tracker_tx.clone(); + let stats_handle = tokio::spawn(run_stats_reporter(stats_tracker, stats_shutdown)); + + // Step 7b: Spawn block watcher using BlockWatcher struct + let block_watcher_shutdown = drain_shutdown_tx.subscribe(); + let block_tx_clone = block_tx.clone(); + let block_client = http_client.clone(); + let block_rpc = config.rpc.clone(); + let block_handle = tokio::spawn(async move { + match BlockWatcher::new(block_client, &block_rpc) { + Ok(watcher) => { + if let Err(e) = watcher.run(block_tx_clone, block_watcher_shutdown).await { + tracing::error!(error = %e, "Block watcher failed"); + } + } + Err(e) => { + tracing::error!(error = %e, "Failed to create block watcher"); + } + } + }); + + // Step 7c: Spawn block logger (exits when block channel closes) + let logger_block_rx = block_tx.subscribe(); + let logger_handle = tokio::spawn(run_block_logger(logger_block_rx)); + + // Step 7d: Spawn confirmer (exits when block channel closes) + let confirmer_block_rx = block_tx.subscribe(); + let confirmer_tracker = tracker_tx.clone(); + let confirmer_handle = + tokio::spawn(run_confirmer(confirmer_pending_rx, confirmer_block_rx, confirmer_tracker)); + + // Step 7e: Spawn flashblock watcher + let flashblock_shutdown = drain_shutdown_tx.subscribe(); + let flashblock_tracker = tracker_tx.clone(); + let flashblock_ws_url = config.flashblocks_ws.clone(); + let flashblock_handle = tokio::spawn(run_flashblock_watcher( + flashblock_ws_url, + flashblock_pending_rx, + flashblock_tracker, + flashblock_shutdown, + )); + + info!("Load test running..."); + + let txpool_hosts = config.txpool_hosts.clone(); + let target_tps = config.target_tps; + + Ok(LoadTestHandle::new( + tracker_tx, + shutdown_tx, + drain_shutdown_tx, + block_tx, + handles, + stats_handle, + block_handle, + logger_handle, + confirmer_handle, + confirmer_pending_tx, + flashblock_handle, + flashblock_pending_tx, + rate_limiter_handle, + tracker_handle, + txpool_hosts, + duration, + http_client, + target_tps, + )) +} + +/// Clears pending transactions for all sender addresses on each management host. +/// Calls `txpool_removeSender` in a JSON-RPC batch for each host. Errors are logged but don't fail the test. +async fn clear_mempools( + http_client: &reqwest::Client, + txpool_hosts: &[String], + addresses: &[Address], +) { + info!( + hosts = txpool_hosts.len(), + addresses = addresses.len(), + "Clearing mempools on management hosts" + ); + + for host in txpool_hosts { + let provider = match create_provider(http_client.clone(), host) { + Ok(p) => p, + Err(e) => { + warn!(host, error = %e, "Failed to create provider for management host"); + continue; + } + }; + + let mut batch = BatchRequest::new(provider.client()); + let mut futures = Vec::with_capacity(addresses.len()); + let mut addrs = Vec::with_capacity(addresses.len()); + + for addr in addresses { + match batch.add_call::<_, Vec>("txpool_removeSender", &(*addr,)) { + Ok(fut) => { + futures.push(fut); + addrs.push(*addr); + } + Err(e) => { + warn!(host, address = %addr, error = ?e, "Failed to add removeSender to batch"); + } + } + } + + if let Err(e) = batch.send().await { + warn!(host, error = ?e, "Failed to send mempool clear batch"); + continue; + } + + let mut total_removed = 0usize; + for (fut, addr) in futures.into_iter().zip(addrs) { + match fut.await { + Ok(removed) => { + total_removed += removed.len(); + if !removed.is_empty() { + info!( + host, + address = %addr, + removed = removed.len(), + "Cleared pending txs from mempool" + ); + } + } + Err(e) => { + warn!( + host, + address = %addr, + error = %e, + "Failed to clear mempool for sender" + ); + } + } + } + + info!(host, total_removed, "Mempool clear complete for host"); + } +} diff --git a/crates/gobrr/src/sender.rs b/crates/gobrr/src/sender.rs new file mode 100644 index 0000000..83a50e5 --- /dev/null +++ b/crates/gobrr/src/sender.rs @@ -0,0 +1,361 @@ +use std::{ + sync::{ + Arc, + atomic::{AtomicU32, Ordering}, + }, + time::Duration, +}; + +use alloy_primitives::B256; +use alloy_provider::Provider; +use alloy_rpc_client::BatchRequest; +use anyhow::Result; +use tokio::{ + sync::{Semaphore, broadcast, mpsc}, + task::JoinSet, +}; +use tracing::{debug, error, warn}; + +use crate::{ + SenderId, + client::{self, Provider as OpProvider}, + signer::{ResignRequest, SignedTx}, + tracker::TrackerEvent, +}; + +/// Batch size for transaction batching +const BATCH_SIZE: usize = 5; +/// Timeout to flush partial batches +const BATCH_FLUSH_TIMEOUT: Duration = Duration::from_millis(50); + +/// Consecutive error threshold before backoff kicks in +const BACKOFF_THRESHOLD: u32 = 3; +/// Base backoff duration +const BACKOFF_BASE: Duration = Duration::from_millis(100); +/// Maximum backoff duration +const BACKOFF_MAX: Duration = Duration::from_secs(5); + +/// Classification of send errors +enum SendErrorKind { + /// "underpriced", "replacement" — re-sign with bumped fees + Underpriced, + /// Network errors, timeouts, rate limits — trigger backoff + Transient, + /// "nonce too low" — tx with this nonce already mined + NonceTooLow, + /// "already known" — node already has this tx in mempool + AlreadyKnown, +} + +impl SendErrorKind { + fn classify(error_msg: &str) -> Self { + let lower = error_msg.to_lowercase(); + if lower.contains("underpriced") || lower.contains("replacement") { + Self::Underpriced + } else if lower.contains("nonce too low") { + Self::NonceTooLow + } else if lower.contains("already known") { + Self::AlreadyKnown + } else { + Self::Transient + } + } + + const fn label(&self) -> &'static str { + match self { + Self::Underpriced => "underpriced", + Self::Transient => "transient", + Self::NonceTooLow => "nonce_too_low", + Self::AlreadyKnown => "already_known", + } + } +} + +/// Sender sends signed transactions in batches +pub(crate) struct Sender { + sender_id: SenderId, + provider: OpProvider, + semaphore: Arc, + tracker_tx: mpsc::UnboundedSender, + confirmer_tx: mpsc::UnboundedSender, + flashblock_tx: mpsc::UnboundedSender, + resign_tx: mpsc::Sender, + consecutive_errors: Arc, +} + +impl Sender { + /// Creates a new Sender + #[allow(clippy::too_many_arguments)] + pub(crate) fn new( + http_client: reqwest::Client, + sender_id: SenderId, + rpc_url: &str, + in_flight_limit: u32, + tracker_tx: mpsc::UnboundedSender, + confirmer_tx: mpsc::UnboundedSender, + flashblock_tx: mpsc::UnboundedSender, + resign_tx: mpsc::Sender, + ) -> Result { + let provider = client::create_provider(http_client, rpc_url)?; + let semaphore = Arc::new(Semaphore::new(in_flight_limit as usize)); + + Ok(Self { + sender_id, + provider, + semaphore, + tracker_tx, + confirmer_tx, + flashblock_tx, + resign_tx, + consecutive_errors: Arc::new(AtomicU32::new(0)), + }) + } + + /// Spawns a task to send a batch of transactions + async fn spawn_batch(&self, tasks: &mut JoinSet<()>, batch: Vec) -> Result<()> { + let permit = Arc::clone(&self.semaphore).acquire_many_owned(batch.len() as u32).await?; + let provider = self.provider.clone(); + let tracker = self.tracker_tx.clone(); + let confirmer = self.confirmer_tx.clone(); + let flashblock = self.flashblock_tx.clone(); + let resign = self.resign_tx.clone(); + let errors = Arc::clone(&self.consecutive_errors); + let idx = self.sender_id; + tasks.spawn(async move { + send_batch(idx, batch, provider, tracker, confirmer, flashblock, resign, errors).await; + drop(permit); + }); + Ok(()) + } + + /// Adaptive backoff based on consecutive error count + async fn maybe_backoff(&self) { + let errors = self.consecutive_errors.load(Ordering::Relaxed); + if errors >= BACKOFF_THRESHOLD { + let exp = errors - BACKOFF_THRESHOLD; + let backoff = BACKOFF_BASE.saturating_mul(1u32.wrapping_shl(exp)); + let backoff = backoff.min(BACKOFF_MAX); + warn!( + sender = self.sender_id, + consecutive_errors = errors, + backoff_ms = backoff.as_millis() as u64, + "Backing off due to consecutive errors" + ); + tokio::time::sleep(backoff).await; + } + } + + /// Runs the sender loop, receiving signed transactions and sending them in batches + pub(crate) async fn run( + self, + mut signed_rx: mpsc::Receiver, + mut shutdown: broadcast::Receiver<()>, + ) -> Result<()> { + let mut tasks: JoinSet<()> = JoinSet::new(); + let mut batch_buffer: Vec = Vec::with_capacity(BATCH_SIZE); + + debug!(sender = self.sender_id, "Sender started with batching enabled"); + + loop { + tokio::select! { + biased; + _ = shutdown.recv() => { + debug!(sender = self.sender_id, "Sender shutting down"); + // Flush any remaining transactions in the buffer + if !batch_buffer.is_empty() { + let batch = std::mem::take(&mut batch_buffer); + self.spawn_batch(&mut tasks, batch).await?; + } + // Wait for all spawned tasks to complete + while tasks.join_next().await.is_some() {} + break; + } + Some(signed) = signed_rx.recv() => { + batch_buffer.push(signed); + + if batch_buffer.len() >= BATCH_SIZE { + let batch = std::mem::take(&mut batch_buffer); + self.spawn_batch(&mut tasks, batch).await?; + self.maybe_backoff().await; + } + } + _ = tokio::time::sleep(BATCH_FLUSH_TIMEOUT), if !batch_buffer.is_empty() => { + // Flush partial batch on timeout + let batch = std::mem::take(&mut batch_buffer); + self.spawn_batch(&mut tasks, batch).await?; + self.maybe_backoff().await; + } + } + } + + Ok(()) + } +} + +/// Send a batch of transactions using JSON-RPC batching +#[allow(clippy::too_many_arguments)] +async fn send_batch( + sender_id: SenderId, + batch: Vec, + provider: OpProvider, + tracker_tx: mpsc::UnboundedSender, + confirmer_tx: mpsc::UnboundedSender, + flashblock_tx: mpsc::UnboundedSender, + resign_tx: mpsc::Sender, + consecutive_errors: Arc, +) { + let original_size = batch.len(); + debug!(sender = sender_id, batch_size = original_size, "Sending transaction batch"); + + // Build batch request using BatchRequest directly + let mut batch_req = BatchRequest::new(provider.client()); + let mut queued = Vec::with_capacity(original_size); + + for tx in batch { + match batch_req.add_call::<_, B256>("eth_sendRawTransaction", &(tx.raw_bytes.clone(),)) { + Ok(fut) => queued.push((tx, fut)), + Err(e) => { + consecutive_errors.fetch_add(1, Ordering::Relaxed); + error!( + sender = sender_id, + tx_hash = %tx.tx_hash, + error = ?e, + "Failed to add tx to batch" + ); + let _ = tracker_tx.send(TrackerEvent::TxFailed { + tx_hash: tx.tx_hash, + tx_type: tx.tx_type, + reason: "batch_add_error".to_owned(), + retried: tx.is_retry, + }); + } + } + } + + if queued.is_empty() { + return; + } + + // Send the batch request (single HTTP request for all transactions) + if let Err(e) = batch_req.send().await { + error!( + sender = sender_id, + batch_size = queued.len(), + error = ?e, + "Failed to send batch request" + ); + // Whole-batch HTTP failure — count all txs as transient failures + consecutive_errors.fetch_add(queued.len() as u32, Ordering::Relaxed); + for (tx, _) in queued { + let _ = tracker_tx.send(TrackerEvent::TxFailed { + tx_hash: tx.tx_hash, + tx_type: tx.tx_type, + reason: "batch_http_error".to_owned(), + retried: tx.is_retry, + }); + } + return; + } + + // Collect results + for (tx, fut) in queued { + match fut.await { + Ok(_) => { + // Success — decay consecutive error counter + let _ = + consecutive_errors.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |v| { + Some(v.saturating_sub(1)) + }); + + // Notify tracker that tx was sent + if let Err(e) = tracker_tx + .send(TrackerEvent::TxSent { tx_hash: tx.tx_hash, tx_type: tx.tx_type }) + { + warn!(tx_hash = %tx.tx_hash, error = %e, "Failed to send TxSent to tracker"); + } + + // Send tx hash to confirmer after successful send + if let Err(e) = confirmer_tx.send(tx.tx_hash) { + warn!(tx_hash = %tx.tx_hash, error = %e, "Failed to send tx hash to confirmer"); + } + + // Send tx hash to flashblock watcher for FB inclusion tracking + if let Err(e) = flashblock_tx.send(tx.tx_hash) { + warn!(tx_hash = %tx.tx_hash, error = %e, "Failed to send tx hash to flashblock watcher"); + } + + debug!( + sender = sender_id, + tx_hash = %tx.tx_hash, + nonce = tx.nonce, + "Transaction sent via batch" + ); + } + Err(e) => { + let error_msg = format!("{e:?}"); + let kind = SendErrorKind::classify(&error_msg); + + match kind { + SendErrorKind::Underpriced if !tx.is_retry => { + // First underpriced — send to signer for re-sign + debug!( + sender = sender_id, + nonce = tx.nonce, + tx_hash = %tx.tx_hash, + "Underpriced tx, requesting re-sign" + ); + let _ = resign_tx + .send(ResignRequest { nonce: tx.nonce, unsigned: tx.unsigned }) + .await; + } + SendErrorKind::Underpriced => { + // Already retried — give up + warn!( + sender = sender_id, + nonce = tx.nonce, + tx_hash = %tx.tx_hash, + "Underpriced tx after retry, giving up" + ); + let _ = tracker_tx.send(TrackerEvent::TxFailed { + tx_hash: tx.tx_hash, + tx_type: tx.tx_type, + reason: kind.label().to_owned(), + retried: true, + }); + } + SendErrorKind::Transient => { + consecutive_errors.fetch_add(1, Ordering::Relaxed); + warn!( + sender = sender_id, + nonce = tx.nonce, + tx_hash = %tx.tx_hash, + error = %error_msg, + "Transient send error" + ); + let _ = tracker_tx.send(TrackerEvent::TxFailed { + tx_hash: tx.tx_hash, + tx_type: tx.tx_type, + reason: kind.label().to_owned(), + retried: tx.is_retry, + }); + } + SendErrorKind::NonceTooLow | SendErrorKind::AlreadyKnown => { + debug!( + sender = sender_id, + nonce = tx.nonce, + tx_hash = %tx.tx_hash, + kind = kind.label(), + "Stale tx" + ); + let _ = tracker_tx.send(TrackerEvent::TxFailed { + tx_hash: tx.tx_hash, + tx_type: tx.tx_type, + reason: kind.label().to_owned(), + retried: tx.is_retry, + }); + } + } + } + } + } +} diff --git a/crates/gobrr/src/signer.rs b/crates/gobrr/src/signer.rs new file mode 100644 index 0000000..e60657c --- /dev/null +++ b/crates/gobrr/src/signer.rs @@ -0,0 +1,286 @@ +use std::sync::Arc; + +use alloy_network::{EthereumWallet, NetworkWallet, TransactionBuilder, eip2718::Encodable2718}; +use alloy_primitives::{Address, B256, Bytes, U256}; +use alloy_provider::Provider; +use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionRequest}; +use alloy_signer_local::PrivateKeySigner; +use anyhow::{Context, Result}; +use rand::{SeedableRng, rngs::StdRng}; +use tokio::sync::{OwnedSemaphorePermit, Semaphore, broadcast, mpsc}; +use tracing::{debug, info, warn}; + +use crate::{ + SenderId, + blocks::OpBlock, + client::{self, WalletProvider}, + config::{TxSelector, TxType}, +}; + +/// Unsigned transaction content — no nonce, fees, or signature +pub(crate) struct UnsignedTx { + pub(crate) to: Address, + pub(crate) value: U256, + pub(crate) input: Bytes, + pub(crate) gas_limit: u64, + pub(crate) tx_type: TxType, +} + +/// Signed transaction ready to send +pub(crate) struct SignedTx { + pub(crate) raw_bytes: Bytes, + pub(crate) tx_hash: B256, + pub(crate) nonce: u64, + pub(crate) tx_type: TxType, + pub(crate) unsigned: UnsignedTx, + pub(crate) is_retry: bool, +} + +/// Request from Sender to Signer to re-sign with bumped fees +pub(crate) struct ResignRequest { + pub(crate) nonce: u64, + pub(crate) unsigned: UnsignedTx, +} + +/// Gas multiplier applied to base fee (testnet buffer) +const GAS_FEE_MULTIPLIER: u128 = 100; + +/// Signer generates, signs, and forwards transactions to the sender +pub(crate) struct Signer { + sender_id: SenderId, + wallet: EthereumWallet, + nonce: u64, + sender_address: Address, + chain_id: u64, + max_fee_per_gas: u128, + rate_limiter: Option>, + tx_selector: TxSelector, + rng: StdRng, +} + +impl Signer { + /// Creates a new Signer with pre-fetched chain state + #[allow(clippy::too_many_arguments)] + pub(crate) async fn new( + http_client: reqwest::Client, + rpc_url: &str, + signer: PrivateKeySigner, + sender_id: SenderId, + rate_limiter: Option>, + tx_selector: TxSelector, + chain_id: u64, + max_fee_per_gas: u128, + ) -> Result { + let sender_address = signer.address(); + let wallet = EthereumWallet::from(signer); + let provider: WalletProvider = + client::create_wallet_provider(http_client, rpc_url, wallet.clone())?; + + // Fetch initial nonce from pending state to account for mempool transactions + let initial_nonce = provider + .get_transaction_count(sender_address) + .block_id(BlockNumberOrTag::Pending.into()) + .await + .context("Failed to get initial nonce")?; + + info!( + sender = sender_id, + address = %sender_address, + initial_nonce = initial_nonce, + "Signer started" + ); + + Ok(Self { + sender_id, + wallet, + nonce: initial_nonce, + sender_address, + chain_id, + max_fee_per_gas, + rate_limiter, + tx_selector, + rng: StdRng::from_entropy(), + }) + } + + /// Generates the next unsigned transaction + fn prepare_next(&mut self) -> UnsignedTx { + let tx_params = self.tx_selector.select(&mut self.rng).build(&mut self.rng); + UnsignedTx { + to: tx_params.to, + value: tx_params.value, + input: tx_params.input, + gas_limit: tx_params.gas_limit, + tx_type: tx_params.tx_type, + } + } + + /// Runs the signer loop + pub(crate) async fn run( + mut self, + mut resign_rx: mpsc::Receiver, + signed_tx: mpsc::Sender, + mut block_rx: broadcast::Receiver, + mut shutdown: broadcast::Receiver<()>, + ) -> Result<()> { + loop { + tokio::select! { + biased; + _ = shutdown.recv() => { + debug!(sender = self.sender_id, "Signer shutting down"); + break; + } + Some(req) = resign_rx.recv() => { + if let Err(e) = self.handle_resign(req, &signed_tx).await { + warn!(sender = self.sender_id, error = %e, "Failed to re-sign transaction"); + } + } + result = block_rx.recv() => { + match result { + Ok(block) => self.on_new_block(&block), + Err(broadcast::error::RecvError::Lagged(n)) => { + debug!(sender = self.sender_id, missed = n, "Signer lagged behind block events"); + } + Err(broadcast::error::RecvError::Closed) => { + debug!(sender = self.sender_id, "Block channel closed in signer"); + } + } + } + permit = self.acquire_rate_permit() => { + match permit { + Ok(rate_permit) => { + // Default branch: generate and sign a new transaction + let unsigned = self.prepare_next(); + if let Err(e) = self.handle_sign(unsigned, &signed_tx, rate_permit).await + { + warn!( + sender = self.sender_id, + error = %e, + "Failed to sign transaction" + ); + } + } + Err(e) => { + warn!( + sender = self.sender_id, + error = %e, + "Rate limiter closed" + ); + break; + } + } + } + } + } + + Ok(()) + } + + /// Acquire a rate-limiter permit if configured. + async fn acquire_rate_permit(&self) -> Result> { + let Some(limiter) = &self.rate_limiter else { + return Ok(None); + }; + let permit = Arc::clone(limiter).acquire_owned().await.context("Rate limiter closed")?; + Ok(Some(permit)) + } + + /// Update gas fee from a new block's base fee + fn on_new_block(&mut self, block: &OpBlock) { + if let Some(base_fee) = block.header.inner.base_fee_per_gas { + let new_max_fee = u128::from(base_fee).saturating_mul(GAS_FEE_MULTIPLIER); + if new_max_fee != self.max_fee_per_gas { + debug!( + sender = self.sender_id, + old_max_fee = self.max_fee_per_gas, + new_max_fee, + block = block.header.number, + "Gas price updated from block" + ); + self.max_fee_per_gas = new_max_fee; + } + } + } + + /// Assigns nonce, signs, and forwards to sender + async fn handle_sign( + &mut self, + unsigned: UnsignedTx, + signed_tx: &mpsc::Sender, + rate_permit: Option, + ) -> Result<()> { + let nonce = self.nonce; + self.nonce += 1; + + let tx = self.build_and_sign(&unsigned, nonce).await?; + let signed = SignedTx { + raw_bytes: tx.0, + tx_hash: tx.1, + nonce, + tx_type: unsigned.tx_type, + unsigned, + is_retry: false, + }; + + signed_tx.send(signed).await.context("Sender channel closed")?; + + if let Some(permit) = rate_permit { + std::mem::forget(permit); + } + Ok(()) + } + + /// Re-signs a transaction with bumped gas fees + async fn handle_resign( + &mut self, + req: ResignRequest, + signed_tx: &mpsc::Sender, + ) -> Result<()> { + // Bump max fee by 20% + self.max_fee_per_gas = self.max_fee_per_gas * 120 / 100; + debug!( + sender = self.sender_id, + bumped_max_fee = self.max_fee_per_gas, + "Gas price bumped 20% for re-sign" + ); + + let tx = self.build_and_sign(&req.unsigned, req.nonce).await?; + let signed = SignedTx { + raw_bytes: tx.0, + tx_hash: tx.1, + nonce: req.nonce, + tx_type: req.unsigned.tx_type, + unsigned: req.unsigned, + is_retry: true, + }; + + signed_tx.send(signed).await.context("Sender channel closed")?; + Ok(()) + } + + /// Builds a `TransactionRequest` and signs it with the wallet + async fn build_and_sign(&self, unsigned: &UnsignedTx, nonce: u64) -> Result<(Bytes, B256)> { + let tx = TransactionRequest::default() + .with_to(unsigned.to) + .with_value(unsigned.value) + .with_input(unsigned.input.clone()) + .with_nonce(nonce) + .with_chain_id(self.chain_id) + .with_max_fee_per_gas(self.max_fee_per_gas) + .with_max_priority_fee_per_gas(0) + .with_gas_limit(unsigned.gas_limit) + .with_from(self.sender_address); + + let tx_envelope = >::sign_request( + &self.wallet, + tx, + ) + .await + .context("Failed to sign transaction")?; + + let tx_hash = *tx_envelope.tx_hash(); + let raw_bytes = Bytes::from(tx_envelope.encoded_2718()); + + Ok((raw_bytes, tx_hash)) + } +} diff --git a/crates/gobrr/src/stats.rs b/crates/gobrr/src/stats.rs new file mode 100644 index 0000000..86bd3bf --- /dev/null +++ b/crates/gobrr/src/stats.rs @@ -0,0 +1,67 @@ +use std::time::Duration; + +use tokio::sync::{broadcast, mpsc}; +use tracing::info; + +use crate::tracker::{self, Stats, TrackerEvent}; + +const REPORT_INTERVAL: Duration = Duration::from_secs(10); + +/// Runs the stats reporter task +pub(crate) async fn run_stats_reporter( + tracker_tx: mpsc::UnboundedSender, + mut shutdown: broadcast::Receiver<()>, +) { + let mut interval = tokio::time::interval(REPORT_INTERVAL); + + loop { + tokio::select! { + _ = interval.tick() => { + if let Some(stats) = tracker::get_stats(&tracker_tx).await { + log_stats(&stats); + } + } + _ = shutdown.recv() => { + break; + } + } + } +} + +fn log_stats(stats: &Stats) { + info!( + sent = stats.sent, + confirmed = stats.confirmed, + failed = stats.failed, + timed_out = stats.timed_out, + pending = stats.pending(), + tps = format!("{:.1}", stats.tps()), + "Progress" + ); + + // Log FB inclusion metrics if we have data + if stats.fb_inclusion_count > 0 { + info!( + avg_ms = format!("{:.2}", stats.fb_avg_inclusion_ms()), + min_ms = stats.fb_min_inclusion_ms(), + max_ms = stats.fb_max_inclusion_ms(), + p50_ms = stats.fb_percentile(50.0), + p99_ms = stats.fb_percentile(99.0), + count = stats.fb_inclusion_count, + "FB Inclusion" + ); + } + + // Log block inclusion metrics if we have data + if stats.block_inclusion_count > 0 { + info!( + avg_ms = format!("{:.2}", stats.block_avg_inclusion_ms()), + min_ms = stats.block_min_inclusion_ms(), + max_ms = stats.block_max_inclusion_ms(), + p50_ms = stats.block_percentile(50.0), + p99_ms = stats.block_percentile(99.0), + count = stats.block_inclusion_count, + "Block Inclusion" + ); + } +} diff --git a/crates/gobrr/src/tracker.rs b/crates/gobrr/src/tracker.rs new file mode 100644 index 0000000..2427fa2 --- /dev/null +++ b/crates/gobrr/src/tracker.rs @@ -0,0 +1,293 @@ +use std::{ + collections::HashMap, + time::{Duration, Instant}, +}; + +use alloy_primitives::B256; +use rand::{Rng, SeedableRng, rngs::StdRng}; +use serde::Serialize; +use tokio::sync::{mpsc, oneshot}; +use tracing::{debug, warn}; + +use crate::config::TxType; + +/// Events sent to the tracker +#[derive(Debug)] +pub(crate) enum TrackerEvent { + TxSent { + tx_hash: B256, + tx_type: TxType, + }, + TxFailed { + #[allow(dead_code)] + tx_hash: B256, + tx_type: TxType, + reason: String, + retried: bool, + }, + /// Transaction seen in a flashblock (via WebSocket) + FlashblockReceived { + tx_hash: B256, + }, + /// Transaction confirmed in a regular block (via RPC) + BlockReceived { + tx_hash: B256, + }, + GetStats(oneshot::Sender), + Shutdown, +} + +/// Pending transaction info +struct PendingTx { + sent_at: Instant, + /// When first seen in a flashblock (if any) + fb_included_at: Option, +} + +/// Timeout for pending transactions (evicted after this) +const TX_TIMEOUT: Duration = Duration::from_secs(600); +/// Interval for sweeping timed-out transactions +const SWEEP_INTERVAL: Duration = Duration::from_secs(10); +/// Maximum number of inclusion times to keep (reservoir sample) +const MAX_INCLUSION_SAMPLES: usize = 10_000; + +/// Statistics collected by the tracker +#[derive(Debug, Clone, Serialize)] +pub struct Stats { + pub sent: u64, + pub confirmed: u64, + pub failed: u64, + pub failed_after_retry: u64, + pub timed_out: u64, + pub type_counts: HashMap, + pub failure_reasons: HashMap, + // Flashblock inclusion metrics (time from tx sent -> seen in flashblock via WS) + pub fb_inclusion_count: u64, + pub fb_inclusion_sum_ms: u64, + pub fb_inclusion_min_ms: u64, + pub fb_inclusion_max_ms: u64, + pub fb_inclusion_times: Vec, + // Block inclusion metrics (time from tx sent -> seen in block via RPC) + pub block_inclusion_count: u64, + pub block_inclusion_sum_ms: u64, + pub block_inclusion_min_ms: u64, + pub block_inclusion_max_ms: u64, + pub block_inclusion_times: Vec, + pub elapsed_secs: f64, +} + +impl Default for Stats { + fn default() -> Self { + Self { + sent: 0, + confirmed: 0, + failed: 0, + failed_after_retry: 0, + timed_out: 0, + type_counts: HashMap::new(), + failure_reasons: HashMap::new(), + fb_inclusion_count: 0, + fb_inclusion_sum_ms: 0, + fb_inclusion_min_ms: u64::MAX, + fb_inclusion_max_ms: 0, + fb_inclusion_times: Vec::with_capacity(MAX_INCLUSION_SAMPLES), + block_inclusion_count: 0, + block_inclusion_sum_ms: 0, + block_inclusion_min_ms: u64::MAX, + block_inclusion_max_ms: 0, + block_inclusion_times: Vec::with_capacity(MAX_INCLUSION_SAMPLES), + elapsed_secs: 0.0, + } + } +} + +impl Stats { + pub const fn pending(&self) -> u64 { + self.sent.saturating_sub(self.confirmed + self.timed_out) + } + + // Flashblock inclusion metrics + pub fn fb_avg_inclusion_ms(&self) -> f64 { + if self.fb_inclusion_count == 0 { + 0.0 + } else { + self.fb_inclusion_sum_ms as f64 / self.fb_inclusion_count as f64 + } + } + + pub const fn fb_min_inclusion_ms(&self) -> u64 { + if self.fb_inclusion_count == 0 { 0 } else { self.fb_inclusion_min_ms } + } + + pub const fn fb_max_inclusion_ms(&self) -> u64 { + self.fb_inclusion_max_ms + } + + pub fn fb_percentile(&self, p: f64) -> u64 { + Self::compute_percentile(&self.fb_inclusion_times, p) + } + + // Block inclusion metrics + pub fn block_avg_inclusion_ms(&self) -> f64 { + if self.block_inclusion_count == 0 { + 0.0 + } else { + self.block_inclusion_sum_ms as f64 / self.block_inclusion_count as f64 + } + } + + pub const fn block_min_inclusion_ms(&self) -> u64 { + if self.block_inclusion_count == 0 { 0 } else { self.block_inclusion_min_ms } + } + + pub const fn block_max_inclusion_ms(&self) -> u64 { + self.block_inclusion_max_ms + } + + pub fn block_percentile(&self, p: f64) -> u64 { + Self::compute_percentile(&self.block_inclusion_times, p) + } + + fn compute_percentile(times: &[u64], p: f64) -> u64 { + if times.is_empty() { + return 0; + } + let mut sorted = times.to_vec(); + sorted.sort_unstable(); + let idx = ((p / 100.0) * (sorted.len() as f64 - 1.0)).round() as usize; + sorted[idx.min(sorted.len() - 1)] + } + + pub fn tps(&self) -> f64 { + if self.elapsed_secs <= 0.0 { 0.0 } else { self.sent as f64 / self.elapsed_secs } + } + + pub fn confirmed_tps(&self) -> f64 { + if self.elapsed_secs <= 0.0 { 0.0 } else { self.confirmed as f64 / self.elapsed_secs } + } +} + +/// Helper to request stats from the tracker via a oneshot channel +pub(crate) async fn get_stats(tracker_tx: &mpsc::UnboundedSender) -> Option { + let (tx, rx) = oneshot::channel(); + tracker_tx.send(TrackerEvent::GetStats(tx)).ok()?; + rx.await.ok() +} + +/// Runs the tracker task +pub(crate) async fn run_tracker(mut rx: mpsc::UnboundedReceiver) { + let mut pending: HashMap = HashMap::new(); + let mut stats = Stats::default(); + let test_start = Instant::now(); + let mut sweep_interval = tokio::time::interval(SWEEP_INTERVAL); + let mut fb_rng = StdRng::from_entropy(); + let mut block_rng = StdRng::from_entropy(); + + loop { + tokio::select! { + Some(event) = rx.recv() => { + match event { + TrackerEvent::TxSent { tx_hash, tx_type } => { + stats.sent += 1; + *stats.type_counts.entry(tx_type).or_insert(0) += 1; + pending.insert(tx_hash, PendingTx { + sent_at: Instant::now(), + fb_included_at: None, + }); + } + TrackerEvent::TxFailed { tx_type, reason, retried, .. } => { + stats.failed += 1; + if retried { + stats.failed_after_retry += 1; + } + *stats.type_counts.entry(tx_type).or_insert(0) += 1; + *stats.failure_reasons.entry(reason).or_insert(0) += 1; + } + TrackerEvent::FlashblockReceived { tx_hash } => { + // Record FB inclusion time, but don't remove from pending + // (we still want to track block inclusion) + if let Some(tx) = pending.get_mut(&tx_hash) { + // Only record if not already seen in a flashblock + if tx.fb_included_at.is_none() { + let now = Instant::now(); + tx.fb_included_at = Some(now); + let fb_time = (now - tx.sent_at).as_millis() as u64; + + stats.fb_inclusion_count += 1; + stats.fb_inclusion_sum_ms += fb_time; + if fb_time < stats.fb_inclusion_min_ms { + stats.fb_inclusion_min_ms = fb_time; + } + if fb_time > stats.fb_inclusion_max_ms { + stats.fb_inclusion_max_ms = fb_time; + } + + // Reservoir sampling for FB times + let n = stats.fb_inclusion_count as usize; + if n <= MAX_INCLUSION_SAMPLES { + stats.fb_inclusion_times.push(fb_time); + } else { + let j = fb_rng.gen_range(0..n); + if j < MAX_INCLUSION_SAMPLES { + stats.fb_inclusion_times[j] = fb_time; + } + } + } + } + } + TrackerEvent::BlockReceived { tx_hash } => { + if let Some(tx) = pending.remove(&tx_hash) { + let block_time = tx.sent_at.elapsed().as_millis() as u64; + + stats.block_inclusion_count += 1; + stats.block_inclusion_sum_ms += block_time; + if block_time < stats.block_inclusion_min_ms { + stats.block_inclusion_min_ms = block_time; + } + if block_time > stats.block_inclusion_max_ms { + stats.block_inclusion_max_ms = block_time; + } + + // Reservoir sampling for block times + let n = stats.block_inclusion_count as usize; + if n <= MAX_INCLUSION_SAMPLES { + stats.block_inclusion_times.push(block_time); + } else { + let j = block_rng.gen_range(0..n); + if j < MAX_INCLUSION_SAMPLES { + stats.block_inclusion_times[j] = block_time; + } + } + stats.confirmed += 1; + } + } + TrackerEvent::GetStats(reply) => { + stats.elapsed_secs = test_start.elapsed().as_secs_f64(); + if reply.send(stats.clone()).is_err() { + warn!("Failed to send stats response - receiver dropped"); + } + } + TrackerEvent::Shutdown => { + break; + } + } + } + _ = sweep_interval.tick() => { + // Evict timed-out pending transactions + let before = pending.len(); + pending.retain(|_hash, tx| { + if tx.sent_at.elapsed() > TX_TIMEOUT { + stats.timed_out += 1; + false + } else { + true + } + }); + let evicted = before - pending.len(); + if evicted > 0 { + debug!(evicted, pending = pending.len(), "Swept timed-out transactions"); + } + } + } + } +} diff --git a/crates/gobrr/src/wallet.rs b/crates/gobrr/src/wallet.rs new file mode 100644 index 0000000..68d2ea6 --- /dev/null +++ b/crates/gobrr/src/wallet.rs @@ -0,0 +1,46 @@ +use alloy_primitives::Address; +use alloy_signer_local::{LocalSigner, MnemonicBuilder, PrivateKeySigner, coins_bip39::English}; +use anyhow::{Context, Result}; + +/// Derives `count` signers from the given mnemonic using BIP-44 path m/44'/60'/0'/0/i +/// Starting from index `offset` (i.e., derives indices offset..offset+count) +/// Skips any addresses in `skip_addresses` (e.g., the funder address) +pub(crate) fn derive_signers( + mnemonic: &str, + count: u32, + offset: u32, + skip_addresses: &[Address], +) -> Result> { + let mut signers = Vec::with_capacity(count as usize); + let mut index = offset; + + while signers.len() < count as usize { + let signer = MnemonicBuilder::::default() + .phrase(mnemonic) + .index(index)? + .build() + .with_context(|| format!("Failed to derive signer at index {index}"))?; + + if skip_addresses.contains(&signer.address()) { + tracing::warn!(index, address = %signer.address(), "Skipping derived address"); + index += 1; + continue; + } + + signers.push(signer); + index += 1; + } + + Ok(signers) +} + +/// Parses a private key from hex string (with or without 0x prefix) +pub(crate) fn parse_funder_key(hex_key: &str) -> Result { + let key = hex_key.strip_prefix("0x").unwrap_or(hex_key); + key.parse::().context("Failed to parse funder private key") +} + +/// Get addresses from signers +pub(crate) fn get_addresses(signers: &[PrivateKeySigner]) -> Vec
{ + signers.iter().map(LocalSigner::address).collect() +} From c724da13a9b67704b7463563df72d63b8a4d6198 Mon Sep 17 00:00:00 2001 From: Danyal Prout Date: Tue, 10 Feb 2026 12:11:31 -0600 Subject: [PATCH 2/2] yolo --- crates/gobrr/src/funder.rs | 139 +++++++++++++++++++++---------------- crates/gobrr/src/runner.rs | 2 +- 2 files changed, 82 insertions(+), 59 deletions(-) diff --git a/crates/gobrr/src/funder.rs b/crates/gobrr/src/funder.rs index 5eba2a5..8fa516a 100644 --- a/crates/gobrr/src/funder.rs +++ b/crates/gobrr/src/funder.rs @@ -20,6 +20,8 @@ const RECEIPT_TIMEOUT: Duration = Duration::from_secs(60); const BATCH_SIZE: usize = 10; /// Number of batches to run concurrently const CONCURRENT_BATCHES: usize = 10; +/// Maximum number of outstanding funding transactions before waiting for confirmation +const MAX_OUTSTANDING_TXS: usize = 100; /// Handles funding sender accounts to a target balance pub(crate) struct Funder { @@ -95,78 +97,99 @@ impl Funder { self.provider.estimate_eip1559_fees().await.context("Failed to estimate fees")?; let max_fee = fees.max_fee_per_gas; - // Step 3: Chunk into batches and send concurrently - info!(count = to_fund.len(), "Sending funding transactions in batches"); - let chunks: Vec> = - to_fund.chunks(BATCH_SIZE).map(|c| c.to_vec()).collect(); + // Step 3: Process in waves of MAX_OUTSTANDING_TXS, waiting for confirmation between waves + let total = to_fund.len(); + let waves: Vec> = + to_fund.chunks(MAX_OUTSTANDING_TXS).map(|c| c.to_vec()).collect(); let wallet = self.provider.wallet(); let chain_id = self.chain_id; let provider = &self.provider; + let poll_interval = Duration::from_millis(500); + + for (wave_idx, wave) in waves.iter().enumerate() { + info!( + wave = wave_idx + 1, + wave_size = wave.len(), + total, + "Sending funding wave" + ); - let batch_results: Vec>> = stream::iter(chunks) - .map(|chunk| async move { - send_funding_batch(provider, wallet, chain_id, max_fee, chunk).await - }) - .buffer_unordered(CONCURRENT_BATCHES) - .collect() - .await; - - // Collect all sent transactions - let mut sent_txs: Vec<(usize, Address, B256)> = Vec::new(); - for result in batch_results { - match result { - Ok(txs) => sent_txs.extend(txs), - Err(e) => { - warn!(error = %e, "Batch funding failed"); - return Err(e).context("Failed to send funding batch"); + // Send this wave in batches concurrently + let chunks: Vec> = + wave.chunks(BATCH_SIZE).map(|c| c.to_vec()).collect(); + + let batch_results: Vec>> = stream::iter(chunks) + .map(|chunk| async move { + send_funding_batch(provider, wallet, chain_id, max_fee, chunk).await + }) + .buffer_unordered(CONCURRENT_BATCHES) + .collect() + .await; + + // Collect sent transactions for this wave + let mut sent_txs: Vec<(usize, Address, B256)> = Vec::new(); + for result in batch_results { + match result { + Ok(txs) => sent_txs.extend(txs), + Err(e) => { + warn!(error = %e, "Batch funding failed"); + return Err(e).context("Failed to send funding batch"); + } } } - } - if sent_txs.is_empty() { - info!("No funding transactions were sent"); - return Ok(()); - } - - // Step 4: Wait for confirmations by polling for receipts - info!(count = sent_txs.len(), "Waiting for funding transactions to confirm"); - let start = std::time::Instant::now(); - let poll_interval = Duration::from_millis(500); + if sent_txs.is_empty() { + continue; + } - for (i, address, tx_hash) in sent_txs { - loop { - if start.elapsed() > RECEIPT_TIMEOUT { - anyhow::bail!( - "Timeout waiting for funding receipt for sender {i} ({}s)", - RECEIPT_TIMEOUT.as_secs() - ); - } + // Wait for all transactions in this wave to confirm before sending the next wave + info!( + wave = wave_idx + 1, + count = sent_txs.len(), + "Waiting for funding wave to confirm" + ); + let start = std::time::Instant::now(); + + for (i, address, tx_hash) in &sent_txs { + loop { + if start.elapsed() > RECEIPT_TIMEOUT { + anyhow::bail!( + "Timeout waiting for funding receipt for sender {i} ({}s)", + RECEIPT_TIMEOUT.as_secs() + ); + } - match self.provider.get_transaction_receipt(tx_hash).await { - Ok(Some(receipt)) => { - if receipt.status() { - info!( - sender = i, - address = %address, - tx_hash = %tx_hash, - "Funding confirmed" - ); - } else { - anyhow::bail!("Funding transaction failed for sender {i}"); + match self.provider.get_transaction_receipt(*tx_hash).await { + Ok(Some(receipt)) => { + if receipt.status() { + info!( + sender = i, + address = %address, + tx_hash = %tx_hash, + "Funding confirmed" + ); + } else { + anyhow::bail!("Funding transaction failed for sender {i}"); + } + break; + } + Ok(None) => { + tokio::time::sleep(poll_interval).await; + } + Err(e) => { + warn!(sender = i, tx_hash = %tx_hash, error = %e, "Error fetching receipt, retrying"); + tokio::time::sleep(poll_interval).await; } - break; - } - Ok(None) => { - // Not yet mined, wait and retry - tokio::time::sleep(poll_interval).await; - } - Err(e) => { - warn!(sender = i, tx_hash = %tx_hash, error = %e, "Error fetching receipt, retrying"); - tokio::time::sleep(poll_interval).await; } } } + + info!( + wave = wave_idx + 1, + count = sent_txs.len(), + "Funding wave confirmed" + ); } info!("All senders funded successfully"); diff --git a/crates/gobrr/src/runner.rs b/crates/gobrr/src/runner.rs index cf72849..25455f1 100644 --- a/crates/gobrr/src/runner.rs +++ b/crates/gobrr/src/runner.rs @@ -242,7 +242,7 @@ pub async fn start_load_test(config_path: &str) -> Result { handles.push(sender_handle); } Ok(Err(e)) => { - tracing::error!(error = %e, "Failed to create signer"); + tracing::error!(error = ?e, "Failed to create signer"); } Err(e) => { tracing::error!(error = %e, "Signer init task panicked");