diff --git a/Cargo.lock b/Cargo.lock index 1b2d4d7b..0fcc57d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4149,6 +4149,20 @@ dependencies = [ "serde_repr", ] +[[package]] +name = "op-alloy-network" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d113b325527ba7da271a8793f1c14bdf7f035ce9e0611e668c36fc6812568c7f" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-primitives 0.8.8", + "alloy-rpc-types-eth", + "op-alloy-consensus", + "op-alloy-rpc-types", +] + [[package]] name = "op-alloy-protocol" version = "0.4.0" @@ -4228,6 +4242,18 @@ dependencies = [ "sha2", ] +[[package]] +name = "op-succinct-fees" +version = "0.1.0" +dependencies = [ + "alloy-primitives 0.8.8", + "anyhow", + "clap", + "dotenv", + "op-succinct-host-utils", + "tokio", +] + [[package]] name = "op-succinct-host-utils" version = "0.1.0" @@ -4235,6 +4261,7 @@ dependencies = [ "alloy", "alloy-consensus", "alloy-primitives 0.8.8", + "alloy-rlp", "alloy-sol-types 0.8.8", "anyhow", "cargo_metadata", @@ -4244,7 +4271,10 @@ dependencies = [ "kona-primitives", "log", "num-format", + "op-alloy-consensus", "op-alloy-genesis", + "op-alloy-network", + "op-alloy-protocol", "op-alloy-rpc-types", "op-succinct-client-utils", "reqwest 0.12.8", diff --git a/Cargo.toml b/Cargo.toml index f5fc0f05..e92119a3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,6 +77,7 @@ op-alloy-genesis = { version = "0.4.0", default-features = false, features = [ op-alloy-protocol = { version = "0.4.0", default-features = false } op-alloy-rpc-types = { version = "0.4.0", default-features = false } op-alloy-rpc-types-engine = { version = "0.4.0", default-features = false } +op-alloy-network = { version = "0.4.0", default-features = false } # sp1 sp1-lib = { version = "3.0.0-rc3", features = ["verify"] } diff --git a/configs/10/rollup.json b/configs/10/rollup.json index 9630f319..21cf8362 100644 --- a/configs/10/rollup.json +++ b/configs/10/rollup.json @@ -10,7 +10,7 @@ }, "l2_time": 1686068903, "system_config": { - "batcherAddr": "0x6887246668a3b87F54DeB3b94Ba47a6f63F32985", + "batcherAddr": "0x6887246668a3b87f54deb3b94ba47a6f63f32985", "overhead": "0xbc", "scalar": "0xa6fe0", "gasLimit": 30000000, @@ -41,8 +41,8 @@ "ecotone_time": 1710374401, "fjord_time": 1720627201, "granite_time": 1726070401, - "batch_inbox_address": "0xFF00000000000000000000000000000000000010", - "deposit_contract_address": "0xbEb5Fc579115071764c7423A4f12eDde41f106Ed", - "l1_system_config_address": "0x229047fed2591dbec1eF1118d64F7aF3dB9EB290", + "batch_inbox_address": "0xff00000000000000000000000000000000000010", + "deposit_contract_address": "0xbeb5fc579115071764c7423a4f12edde41f106ed", + "l1_system_config_address": "0x229047fed2591dbec1ef1118d64f7af3db9eb290", "protocol_versions_address": "0x0000000000000000000000000000000000000000" } \ No newline at end of file diff --git a/configs/11155420/rollup.json b/configs/11155420/rollup.json index 5970a758..6eb39e0e 100644 --- a/configs/11155420/rollup.json +++ b/configs/11155420/rollup.json @@ -10,7 +10,7 @@ }, "l2_time": 1691802540, "system_config": { - "batcherAddr": "0x8F23BB38F531600e5d8FDDaAEC41F13FaB46E98c", + "batcherAddr": "0x8f23bb38f531600e5d8fddaaec41f13fab46e98c", "overhead": "0xbc", "scalar": "0xa6fe0", "gasLimit": 30000000, @@ -42,7 +42,7 @@ "fjord_time": 1716998400, "granite_time": 1723478400, "batch_inbox_address": "0xff00000000000000000000000000000011155420", - "deposit_contract_address": "0x16Fc5058F25648194471939df75CF27A2fdC48BC", - "l1_system_config_address": "0x034edD2A225f7f429A63E0f1D2084B9E0A93b538", - "protocol_versions_address": "0x79ADD5713B383DAa0a138d3C4780C7A1804a8090" + "deposit_contract_address": "0x16fc5058f25648194471939df75cf27a2fdc48bc", + "l1_system_config_address": "0x034edd2a225f7f429a63e0f1d2084b9e0a93b538", + "protocol_versions_address": "0x79add5713b383daa0a138d3c4780c7a1804a8090" } \ No newline at end of file diff --git a/configs/8453/rollup.json b/configs/8453/rollup.json new file mode 100644 index 00000000..2ec64f99 --- /dev/null +++ b/configs/8453/rollup.json @@ -0,0 +1,48 @@ +{ + "genesis": { + "l1": { + "number": 17481768, + "hash": "0x5c13d307623a926cd31415036c8b7fa14572f9dac64528e857a470511fc30771" + }, + "l2": { + "number": 0, + "hash": "0xf712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd" + }, + "l2_time": 1686789347, + "system_config": { + "batcherAddr": "0x5050f69a9786f081509234f1a7f4684b5e5b76c9", + "overhead": "0xbc", + "scalar": "0xa6fe0", + "gasLimit": 30000000, + "baseFeeScalar": null, + "blobBaseFeeScalar": null, + "eip1559Denominator": null, + "eip1559Elasticity": null + } + }, + "block_time": 2, + "max_sequencer_drift": 600, + "seq_window_size": 3600, + "channel_timeout": 300, + "granite_channel_timeout": 50, + "l1_chain_id": 1, + "l2_chain_id": 8453, + "base_fee_params": { + "max_change_denominator": "0x32", + "elasticity_multiplier": "0x6" + }, + "canyon_base_fee_params": { + "max_change_denominator": "0xfa", + "elasticity_multiplier": "0x6" + }, + "regolith_time": 0, + "canyon_time": 1704992401, + "delta_time": 1708560000, + "ecotone_time": 1710374401, + "fjord_time": 1720627201, + "granite_time": 1726070401, + "batch_inbox_address": "0xff00000000000000000000000000000000008453", + "deposit_contract_address": "0x49048044d57e1c92a77f79988d21fa8faf74e97e", + "l1_system_config_address": "0x73a79fab69143498ed3712e519a88a918e1f4072", + "protocol_versions_address": "0x0000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/scripts/fees/Cargo.toml b/scripts/fees/Cargo.toml new file mode 100644 index 00000000..1af09f9d --- /dev/null +++ b/scripts/fees/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "op-succinct-fees" +version = "0.1.0" +license.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true + +[[bin]] +name = "l1-fee-scalar" +path = "bin/l1_fee_scalar.rs" + +[dependencies] +op-succinct-host-utils = { workspace = true } +clap = { workspace = true } +anyhow = { workspace = true } +tokio = { workspace = true } +dotenv = { workspace = true } +alloy-primitives = { workspace = true } diff --git a/scripts/fees/bin/l1_fee_scalar.rs b/scripts/fees/bin/l1_fee_scalar.rs new file mode 100644 index 00000000..96716163 --- /dev/null +++ b/scripts/fees/bin/l1_fee_scalar.rs @@ -0,0 +1,56 @@ +use std::time::Instant; + +use alloy_primitives::U256; +use anyhow::Result; +use clap::Parser; +use op_succinct_fees::aggregate_fee_data; +use op_succinct_host_utils::fetcher::OPSuccinctDataFetcher; + +#[derive(Parser)] +struct Args { + #[clap(long)] + start: u64, + #[clap(long)] + end: u64, + #[clap(long, default_value = None)] + l1_fee_scalar: Option, + #[clap(long, default_value = ".env")] + env_file: String, +} + +#[tokio::main] +async fn main() -> Result<()> { + let args = Args::parse(); + dotenv::from_filename(args.env_file).ok(); + let fetcher = OPSuccinctDataFetcher::default(); + + let start_time = Instant::now(); + let (fee_data, modified_fee_data) = tokio::join!( + fetcher.get_l2_fee_data_range(args.start, args.end), + fetcher.get_l2_fee_data_with_modified_l1_fee_scalar( + args.start, + args.end, + args.l1_fee_scalar + ) + ); + let duration = start_time.elapsed(); + println!("Done getting fee data. Time taken: {:?}", duration); + + let fee_data = fee_data?; + let modified_fee_data = modified_fee_data?; + + let total_aggregate_fee_data = aggregate_fee_data(fee_data)?; + let modified_total_aggregate_fee_data = aggregate_fee_data(modified_fee_data)?; + + println!("{modified_total_aggregate_fee_data}"); + println!("{total_aggregate_fee_data}"); + + assert_eq!( + total_aggregate_fee_data.total_l1_fee, + modified_total_aggregate_fee_data.total_l1_fee + ); + + println!("Success!"); + + Ok(()) +} diff --git a/scripts/fees/src/lib.rs b/scripts/fees/src/lib.rs new file mode 100644 index 00000000..09e4c456 --- /dev/null +++ b/scripts/fees/src/lib.rs @@ -0,0 +1,41 @@ +use std::fmt; + +use alloy_primitives::U256; +use anyhow::Result; +use op_succinct_host_utils::fetcher::FeeData; + +pub struct AggregateFeeData { + pub start: u64, + pub end: u64, + pub num_transactions: u64, + pub total_l1_fee: U256, +} + +impl fmt::Display for AggregateFeeData { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let eth = self.total_l1_fee / U256::from(10).pow(U256::from(18)); + let gwei = (self.total_l1_fee / U256::from(10).pow(U256::from(9))) + % U256::from(10).pow(U256::from(9)); + write!( + f, + "Start: {}, End: {}, Aggregate: {} transactions, {}.{:09} ETH L1 fee", + self.start, self.end, self.num_transactions, eth, gwei + ) + } +} + +pub fn aggregate_fee_data(fee_data: Vec) -> Result { + let mut aggregate_fee_data = AggregateFeeData { + start: fee_data[0].block_number, + end: fee_data[fee_data.len() - 1].block_number, + num_transactions: 0, + total_l1_fee: U256::ZERO, + }; + + for data in fee_data { + aggregate_fee_data.num_transactions += 1; + aggregate_fee_data.total_l1_fee += data.l1_gas_cost; + } + + Ok(aggregate_fee_data) +} diff --git a/scripts/prove/bin/agg.rs b/scripts/prove/bin/agg.rs index 08cbdbad..a25ca767 100644 --- a/scripts/prove/bin/agg.rs +++ b/scripts/prove/bin/agg.rs @@ -2,10 +2,7 @@ use anyhow::Result; use cargo_metadata::MetadataCommand; use clap::Parser; use op_succinct_client_utils::boot::BootInfoStruct; -use op_succinct_host_utils::{ - fetcher::{OPSuccinctDataFetcher, RPCMode}, - get_agg_proof_stdin, -}; +use op_succinct_host_utils::{fetcher::OPSuccinctDataFetcher, get_agg_proof_stdin}; use sp1_sdk::{utils, HashableKey, ProverClient, SP1Proof, SP1ProofWithPublicValues}; use std::fs; @@ -67,10 +64,10 @@ async fn main() -> Result<()> { let prover = ProverClient::new(); let fetcher = OPSuccinctDataFetcher::default(); - let l2_chain_id = fetcher.get_chain_id(RPCMode::L2).await?; + let l2_chain_id = fetcher.get_l2_chain_id().await?; let (proofs, boot_infos) = load_aggregation_proof_data(args.proofs, l2_chain_id); let latest_checkpoint_head = fetcher - .get_header_by_number(RPCMode::L1, args.latest_checkpoint_head_nb) + .get_l1_header(args.latest_checkpoint_head_nb.into()) .await? .hash_slow(); let headers = fetcher diff --git a/scripts/prove/bin/multi.rs b/scripts/prove/bin/multi.rs index eedb07cf..b62b4859 100644 --- a/scripts/prove/bin/multi.rs +++ b/scripts/prove/bin/multi.rs @@ -1,7 +1,7 @@ use anyhow::Result; use clap::Parser; use op_succinct_host_utils::{ - fetcher::{CacheMode, OPSuccinctDataFetcher, RPCMode}, + fetcher::{CacheMode, OPSuccinctDataFetcher}, get_proof_stdin, stats::ExecutionStats, witnessgen::WitnessGenExecutor, @@ -91,7 +91,7 @@ async fn main() -> Result<()> { // Create a proof directory for the chain ID if it doesn't exist. let proof_dir = format!( "data/{}/proofs", - data_fetcher.get_chain_id(RPCMode::L2).await.unwrap() + data_fetcher.get_l2_chain_id().await.unwrap() ); if !std::path::Path::new(&proof_dir).exists() { fs::create_dir_all(&proof_dir).unwrap(); @@ -108,7 +108,7 @@ async fn main() -> Result<()> { .unwrap(); let execution_duration = start_time.elapsed(); - let l2_chain_id = data_fetcher.get_chain_id(RPCMode::L2).await.unwrap(); + let l2_chain_id = data_fetcher.get_l2_chain_id().await.unwrap(); let report_path = format!( "execution-reports/multi/{}/{}-{}.csv", l2_chain_id, args.start, args.end diff --git a/scripts/prove/bin/single.rs b/scripts/prove/bin/single.rs index bcc08871..f3b85280 100644 --- a/scripts/prove/bin/single.rs +++ b/scripts/prove/bin/single.rs @@ -1,7 +1,7 @@ use anyhow::Result; use clap::Parser; use op_succinct_host_utils::{ - fetcher::{CacheMode, OPSuccinctDataFetcher, RPCMode}, + fetcher::{CacheMode, OPSuccinctDataFetcher}, get_proof_stdin, stats::ExecutionStats, witnessgen::WitnessGenExecutor, @@ -36,6 +36,7 @@ async fn main() -> Result<()> { utils::setup_logger(); let data_fetcher = OPSuccinctDataFetcher::default(); + let l2_chain_id = data_fetcher.get_l2_chain_id().await?; let l2_safe_head = args.l2_block - 1; @@ -71,10 +72,7 @@ async fn main() -> Result<()> { let proof = prover.prove(&pk, sp1_stdin).plonk().run().unwrap(); // Create a proof directory for the chain ID if it doesn't exist. - let proof_dir = format!( - "data/{}/proofs", - data_fetcher.get_chain_id(RPCMode::L2).await.unwrap() - ); + let proof_dir = format!("data/{}/proofs", l2_chain_id); if !std::path::Path::new(&proof_dir).exists() { std::fs::create_dir_all(&proof_dir)?; } @@ -86,7 +84,6 @@ async fn main() -> Result<()> { let (_, report) = prover.execute(SINGLE_BLOCK_ELF, sp1_stdin).run().unwrap(); let execution_duration = start_time.elapsed(); - let l2_chain_id = data_fetcher.get_chain_id(RPCMode::L2).await.unwrap(); let report_path = format!( "execution-reports/single/{}/{}.csv", l2_chain_id, args.l2_block diff --git a/scripts/utils/bin/cost_estimator.rs b/scripts/utils/bin/cost_estimator.rs index 5645d43d..f5d8f780 100644 --- a/scripts/utils/bin/cost_estimator.rs +++ b/scripts/utils/bin/cost_estimator.rs @@ -3,7 +3,7 @@ use clap::Parser; use kona_host::HostCli; use log::info; use op_succinct_host_utils::{ - fetcher::{CacheMode, OPSuccinctDataFetcher, RPCMode}, + fetcher::{CacheMode, OPSuccinctDataFetcher}, get_proof_stdin, stats::ExecutionStats, witnessgen::WitnessGenExecutor, @@ -91,7 +91,7 @@ async fn run_native_data_generation( data_fetcher: &OPSuccinctDataFetcher, split_ranges: &[SpanBatchRange], ) -> Vec { - const CONCURRENT_NATIVE_HOST_RUNNERS: usize = 20; + const CONCURRENT_NATIVE_HOST_RUNNERS: usize = 5; // Split the entire range into chunks of size CONCURRENT_NATIVE_HOST_RUNNERS and process chunks // serially. Generate witnesses within each chunk in parallel. This prevents the RPC from @@ -294,7 +294,7 @@ async fn main() -> Result<()> { let args = HostArgs::parse(); let data_fetcher = OPSuccinctDataFetcher::default(); - let l2_chain_id = data_fetcher.get_chain_id(RPCMode::L2).await?; + let l2_chain_id = data_fetcher.get_l2_chain_id().await?; let split_ranges = split_range(args.start, args.end, l2_chain_id); diff --git a/scripts/utils/bin/fetch_rollup_config.rs b/scripts/utils/bin/fetch_rollup_config.rs index 19f87400..b1874890 100644 --- a/scripts/utils/bin/fetch_rollup_config.rs +++ b/scripts/utils/bin/fetch_rollup_config.rs @@ -1,4 +1,4 @@ -use alloy::{hex, signers::local::PrivateKeySigner}; +use alloy::{eips::BlockId, hex, signers::local::PrivateKeySigner}; use alloy_primitives::B256; use anyhow::{bail, Result}; use op_succinct_client_utils::{boot::hash_rollup_config, types::u32_to_u8}; @@ -60,7 +60,7 @@ async fn update_l2oo_config() -> Result<()> { // If we are not using a cached starting block number, set it to 10 blocks before the latest block on L2. if env::var("USE_CACHED_STARTING_BLOCK").unwrap_or("false".to_string()) != "true" { // Set the starting block number to 10 blocks before the latest block on L2. - let latest_block = data_fetcher.get_head(RPCMode::L2).await?; + let latest_block = data_fetcher.get_l2_header(BlockId::latest()).await?; l2oo_config.starting_block_number = latest_block.number - 20; } diff --git a/utils/host/Cargo.toml b/utils/host/Cargo.toml index afa4e162..1d3f2745 100644 --- a/utils/host/Cargo.toml +++ b/utils/host/Cargo.toml @@ -8,6 +8,7 @@ edition.workspace = true # workspace alloy = { workspace = true } +alloy-rlp.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true @@ -34,6 +35,9 @@ op-succinct-client-utils.workspace = true # op-alloy op-alloy-genesis.workspace = true op-alloy-rpc-types.workspace = true +op-alloy-network.workspace = true +op-alloy-protocol.workspace = true +op-alloy-consensus.workspace = true # kona kona-host.workspace = true diff --git a/utils/host/src/fetcher.rs b/utils/host/src/fetcher.rs index 73f8eec1..f91bce10 100644 --- a/utils/host/src/fetcher.rs +++ b/utils/host/src/fetcher.rs @@ -1,24 +1,34 @@ use alloy::{ - eips::BlockNumberOrTag, + eips::{BlockId, BlockNumberOrTag}, primitives::{Address, B256}, providers::{Provider, ProviderBuilder, RootProvider}, - rpc::types::Block, transports::http::{reqwest::Url, Client, Http}, }; use alloy_consensus::Header; use alloy_sol_types::SolValue; +use anyhow::anyhow; use anyhow::Result; use cargo_metadata::MetadataCommand; use kona_host::HostCli; use op_alloy_genesis::RollupConfig; -use op_alloy_rpc_types::{output::OutputResponse, safe_head::SafeHeadResponse}; +use op_alloy_network::{ + primitives::{BlockTransactions, BlockTransactionsKind}, + Optimism, +}; +use op_alloy_protocol::calculate_tx_l1_cost_fjord; +use op_alloy_rpc_types::{ + output::OutputResponse, safe_head::SafeHeadResponse, OpTransactionReceipt, +}; use op_succinct_client_utils::boot::BootInfoStruct; use serde_json::{json, Value}; use sp1_sdk::block_on; -use std::{cmp::Ordering, env, fs, path::Path, str::FromStr, sync::Arc, time::Duration}; +use std::{ + cmp::Ordering, collections::HashMap, env, fs, path::Path, str::FromStr, sync::Arc, + time::Duration, +}; use tokio::time::sleep; -use alloy_primitives::keccak256; +use alloy_primitives::{keccak256, Bytes, U256}; use crate::{ rollup_config::{get_rollup_config_path, merge_rollup_config, save_rollup_config}, @@ -29,10 +39,11 @@ use crate::{ /// The OPSuccinctDataFetcher struct is used to fetch the L2 output data and L2 claim data for a /// given block number. It is used to generate the boot info for the native host program. /// TODO: Add retries for all requests (3 retries). +/// TODO: We can generify some of these methods based on the Network (Ethereum, Optimism, etc.) types. pub struct OPSuccinctDataFetcher { pub rpc_config: RPCConfig, pub l1_provider: Arc>>, - pub l2_provider: Arc>>, + pub l2_provider: Arc, Optimism>>, pub rollup_config: RollupConfig, pub l1_block_time_secs: u64, } @@ -43,7 +54,7 @@ impl Default for OPSuccinctDataFetcher { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct RPCConfig { pub l1_rpc: String, pub l1_beacon_rpc: String, @@ -83,6 +94,15 @@ pub struct BlockInfo { pub block_number: u64, pub transaction_count: u64, pub gas_used: u64, + pub l1_gas_cost: U256, +} + +/// The fee data for a block. +pub struct FeeData { + pub block_number: u64, + pub tx_index: u64, + pub tx_hash: B256, + pub l1_gas_cost: U256, } impl OPSuccinctDataFetcher { @@ -124,6 +144,274 @@ impl OPSuccinctDataFetcher { fetcher } + pub async fn get_l2_chain_id(&self) -> Result { + Ok(self.l2_provider.get_chain_id().await?) + } + + pub async fn get_l2_head(&self) -> Header { + self.l2_provider + .get_block_by_number(BlockNumberOrTag::Latest, false) + .await + .unwrap() + .unwrap() + .header + .try_into() + .unwrap() + } + + pub async fn get_l2_header_by_number(&self, block_number: u64) -> Header { + self.l2_provider + .get_block_by_number(block_number.into(), false) + .await + .unwrap() + .unwrap() + .header + .try_into() + .unwrap() + } + + /// Manually calculate the L1 fee data for a range of blocks. Allows for modifying the L1 fee scalar. + pub async fn get_l2_fee_data_with_modified_l1_fee_scalar( + &self, + start: u64, + end: u64, + custom_l1_fee_scalar: Option, + ) -> Result> { + use futures::stream::{self, StreamExt}; + + // Fetch all tranasctions in parallel. + // Return a tuple of the block number and the transactions. + let transactions: Vec<(u64, Vec)> = stream::iter(start..=end) + .map(|block_number| async move { + let block = self + .l2_provider + .get_block(block_number.into(), BlockTransactionsKind::Hashes) + .await? + .unwrap(); + match block.transactions { + BlockTransactions::Hashes(txs) => Ok((block_number, txs)), + _ => Err(anyhow::anyhow!("Unsupported transaction type")), + } + }) + .buffered(100) + .collect::)>>>() + .await + .into_iter() + .filter_map(Result::ok) + .collect(); + + // Create a map of the block number to the transactions. + let block_number_to_transactions: HashMap> = + transactions.into_iter().collect(); + + // Fetch all of the L1 block receipts in parallel. + let block_receipts: Vec<(u64, Vec)> = stream::iter(start..=end) + .map(|block_number| async move { + ( + block_number, + self.l2_provider + .get_block_receipts(block_number.into()) + .await + .unwrap() + .unwrap(), + ) + }) + .buffered(100) + .collect::)>>() + .await; + + // Get all the encoded transactions for each block number in parallel. + let block_number_to_encoded_transactions = stream::iter(block_number_to_transactions) + .map(|(block_number, transactions)| async move { + let encoded_transactions = stream::iter(transactions) + .map(|tx_hash| async move { + self.l2_provider + .client() + .request::<&[B256; 1], Bytes>("debug_getRawTransaction", &[tx_hash]) + .await + .map_err(|e| anyhow!("Error fetching transaction: {e}")) + .unwrap() + }) + .buffered(100) + .collect::>() + .await; + (block_number, encoded_transactions) + }) + .buffered(100) + .collect::>>() + .await; + + // Zip the block number to encoded transactions with the block number to receipts. + let block_number_to_receipts_and_transactions: HashMap< + u64, + (Vec, Vec), + > = block_receipts + .into_iter() + .filter_map(|(block_number, receipts)| { + block_number_to_encoded_transactions + .get(&block_number) + .map(|transactions| (block_number, (receipts, transactions.clone()))) + }) + .collect(); + + let mut fee_data = Vec::new(); + for (block_number, (receipts, transactions)) in block_number_to_receipts_and_transactions { + for (transaction, receipt) in transactions.iter().zip(receipts) { + let l1_fee_scalar = if let Some(custom_l1_fee_scalar) = custom_l1_fee_scalar { + custom_l1_fee_scalar + } else { + U256::from(receipt.l1_block_info.l1_base_fee_scalar.unwrap_or(0)) + }; + // Get the Fjord L1 cost of the transaction. + let l1_gas_cost = calculate_tx_l1_cost_fjord( + transaction.as_ref(), + U256::from(receipt.l1_block_info.l1_gas_price.unwrap_or(0)), + l1_fee_scalar, + U256::from(receipt.l1_block_info.l1_blob_base_fee.unwrap_or(0)), + U256::from(receipt.l1_block_info.l1_blob_base_fee_scalar.unwrap_or(0)), + ); + + fee_data.push(FeeData { + block_number, + tx_index: receipt.inner.transaction_index.unwrap(), + tx_hash: receipt.inner.transaction_hash, + l1_gas_cost, + }); + } + } + Ok(fee_data) + } + + /// Get the fee data for a range of blocks. Extracts the l1 fee data from the receipts. + pub async fn get_l2_fee_data_range(&self, start: u64, end: u64) -> Result> { + let l2_provider = self.l2_provider.clone(); + + use futures::stream::{self, StreamExt}; + + // Only fetch 100 receipts at a time to better use system resources. Increases stability. + let fee_data = stream::iter(start..=end) + .map(|block_number| { + let l2_provider = l2_provider.clone(); + async move { + let receipt = l2_provider + .get_block_receipts(block_number.into()) + .await + .unwrap(); + let transactions = receipt.unwrap(); + let block_fee_data: Vec = transactions + .iter() + .enumerate() + .map(|(tx_index, tx)| FeeData { + block_number, + tx_index: tx_index as u64, + tx_hash: tx.inner.transaction_hash, + l1_gas_cost: U256::from(tx.l1_block_info.l1_fee.unwrap_or(0)), + }) + .collect(); + block_fee_data + } + }) + .buffered(100) + .collect::>() + .await + .into_iter() + .flatten() + .collect(); + Ok(fee_data) + } + + /// Get the aggregate block statistics for a range of blocks. + pub async fn get_l2_block_data_range(&self, start: u64, end: u64) -> Result> { + use futures::stream::{self, StreamExt}; + + let l2_provider = self.l2_provider.clone(); + let block_data = stream::iter(start..=end) + .map(|block_number| { + let l2_provider = l2_provider.clone(); + async move { + let block = l2_provider + .get_block_by_number(block_number.into(), false) + .await? + .unwrap(); + let receipts = l2_provider + .get_block_receipts(block_number.into()) + .await? + .unwrap(); + let l1_gas_cost: U256 = receipts + .iter() + .map(|tx| U256::from(tx.l1_block_info.l1_fee.unwrap_or(0))) + .sum(); + Ok(BlockInfo { + block_number, + transaction_count: block.transactions.len() as u64, + gas_used: block.header.gas_used, + l1_gas_cost, + }) + } + }) + .buffered(100) + .collect::>>() + .await; + + block_data.into_iter().collect() + } + + pub async fn get_l1_header(&self, block_number: BlockId) -> Result
{ + Ok(self + .l1_provider + .get_block(block_number, alloy::rpc::types::BlockTransactionsKind::Full) + .await? + .unwrap() + .header + .try_into() + .unwrap()) + } + + pub async fn get_l2_header(&self, block_number: BlockId) -> Result
{ + Ok(self + .l2_provider + .get_block(block_number, BlockTransactionsKind::Full) + .await? + .unwrap() + .header + .try_into() + .unwrap()) + } + + pub async fn find_l1_block_hash_by_timestamp(&self, target_timestamp: u64) -> Result { + let latest_block = self + .l1_provider + .get_block(BlockId::latest(), BlockTransactionsKind::Hashes) + .await? + .unwrap(); + let mut low = 0; + let mut high = latest_block.header.number; + + while low <= high { + let mid = (low + high) / 2; + let block = self + .l1_provider + .get_block(mid.into(), BlockTransactionsKind::Hashes) + .await? + .unwrap(); + let block_timestamp = block.header.timestamp; + + match block_timestamp.cmp(&target_timestamp) { + Ordering::Equal => return Ok(block.header.hash.0.into()), + Ordering::Less => low = mid + 1, + Ordering::Greater => high = mid - 1, + } + } + + // Return the block hash of the closest block after the target timestamp + let block = self + .l1_provider + .get_block(low.into(), BlockTransactionsKind::Hashes) + .await? + .unwrap(); + Ok(block.header.hash.0.into()) + } + /// Get the RPC URL for the given RPC mode. pub fn get_rpc_url(&self, rpc_mode: RPCMode) -> String { match rpc_mode { @@ -134,19 +422,6 @@ impl OPSuccinctDataFetcher { } } - /// Get the provider for the given RPC mode. Note: Will panic if the RPC mode is not L1 or L2. - /// Note: The provider can be dropped by the Tokio runtime if it is not used for a long time. Be - /// careful when using this function. - pub fn get_provider(&self, rpc_mode: RPCMode) -> Arc>> { - match rpc_mode { - RPCMode::L1 => self.l1_provider.clone(), - RPCMode::L2 => self.l2_provider.clone(), - RPCMode::L1Beacon | RPCMode::L2Node => { - panic!("L1Beacon and L2Node modes do not have associated providers") - } - } - } - /// Fetch the rollup config. Combines the rollup config from `optimism_rollupConfig` and the /// chain config from `debug_chainConfig`. pub async fn fetch_rollup_config(&self) -> Result { @@ -201,9 +476,7 @@ impl OPSuccinctDataFetcher { let mut earliest_l1_header: Option
= None; for boot_info in boot_infos { - let l1_block_header = self - .get_header_by_hash(RPCMode::L1, boot_info.l1Head) - .await?; + let l1_block_header = self.get_l1_header(boot_info.l1Head.into()).await?; if l1_block_header.number < earliest_block_num { earliest_block_num = l1_block_header.number; earliest_l1_header = Some(l1_block_header); @@ -222,8 +495,7 @@ impl OPSuccinctDataFetcher { while block_number <= end { let batch_end = block_number + batch_size - 1; let batch_headers: Vec
= futures::future::join_all( - (block_number..=batch_end.min(end)) - .map(|num| self.get_header_by_number(RPCMode::L1, num)), + (block_number..=batch_end.min(end)).map(|num| self.get_l1_header(num.into())), ) .await .into_iter() @@ -248,9 +520,7 @@ impl OPSuccinctDataFetcher { let start_header = self.get_earliest_l1_head_in_batch(boot_infos).await?; // Fetch the full header for the latest L1 Head (which is validated on chain). - let latest_header = self - .get_header_by_hash(RPCMode::L1, checkpoint_block_hash) - .await?; + let latest_header = self.get_l1_header(checkpoint_block_hash.into()).await?; // Create a vector of futures for fetching all headers let headers = self @@ -260,115 +530,6 @@ impl OPSuccinctDataFetcher { Ok(headers) } - pub async fn get_header_by_hash(&self, rpc_mode: RPCMode, block_hash: B256) -> Result
{ - let provider = self.get_provider(rpc_mode); - let header = provider - .get_block_by_hash(block_hash, alloy::rpc::types::BlockTransactionsKind::Full) - .await? - .unwrap() - .header; - Ok(header.try_into().unwrap()) - } - - pub async fn get_chain_id(&self, rpc_mode: RPCMode) -> Result { - let provider = self.get_provider(rpc_mode); - let chain_id = provider.get_chain_id().await?; - Ok(chain_id) - } - - pub async fn get_head(&self, rpc_mode: RPCMode) -> Result
{ - let provider = self.get_provider(rpc_mode); - let header = provider - .get_block_by_number(BlockNumberOrTag::Latest, false) - .await? - .unwrap() - .header; - Ok(header.try_into().unwrap()) - } - - pub async fn get_header_by_number( - &self, - rpc_mode: RPCMode, - block_number: u64, - ) -> Result
{ - let provider = self.get_provider(rpc_mode); - let header = provider - .get_block_by_number(block_number.into(), false) - .await? - .unwrap() - .header; - Ok(header.try_into().unwrap()) - } - - /// Get the block data for a range of blocks inclusive. - pub async fn get_block_data_range( - &self, - rpc_mode: RPCMode, - start: u64, - end: u64, - ) -> Result> { - let mut block_data = Vec::new(); - for block_number in start..=end { - let provider = self.get_provider(rpc_mode); - let block = provider - .get_block_by_number(block_number.into(), false) - .await? - .unwrap(); - block_data.push(BlockInfo { - block_number, - transaction_count: block.transactions.len() as u64, - gas_used: block.header.gas_used, - }); - } - Ok(block_data) - } - - pub async fn get_block_by_number(&self, rpc_mode: RPCMode, block_number: u64) -> Result { - let provider = self.get_provider(rpc_mode); - let block = provider - .get_block_by_number(block_number.into(), false) - .await? - .unwrap(); - Ok(block) - } - - /// Find the block with the closest timestamp to the target timestamp. - async fn find_block_hash_by_timestamp( - &self, - rpc_mode: RPCMode, - target_timestamp: u64, - ) -> Result { - let provider = self.get_provider(rpc_mode); - let latest_block = provider - .get_block_by_number(BlockNumberOrTag::Latest, false) - .await? - .unwrap(); - let mut low = 0; - let mut high = latest_block.header.number; - - while low <= high { - let mid = (low + high) / 2; - let block = provider - .get_block_by_number(mid.into(), false) - .await? - .unwrap(); - let block_timestamp = block.header.timestamp; - - match block_timestamp.cmp(&target_timestamp) { - Ordering::Equal => return Ok(block.header.hash.0.into()), - Ordering::Less => low = mid + 1, - Ordering::Greater => high = mid - 1, - } - } - - // Return the block hash of the closest block after the target timestamp - let block = provider - .get_block_by_number(low.into(), false) - .await? - .unwrap(); - Ok(block.header.hash.0.into()) - } - /// Get the L2 output data for a given block number and save the boot info to a file in the data /// directory with block_number. Return the arguments to be passed to the native host for /// datagen. @@ -509,20 +670,16 @@ impl OPSuccinctDataFetcher { /// Get the L1 block time in seconds. async fn get_l1_block_time(&self) -> Result { - let l1_provider = self.l1_provider.clone(); - let l1_head = self.get_head(RPCMode::L1).await?; + let l1_head = self.get_l1_header(BlockId::latest()).await?; let l1_head_minus_1 = l1_head.number - 1; - let l1_block_minus_1 = l1_provider - .get_block_by_number(l1_head_minus_1.into(), false) - .await? - .unwrap(); - Ok(l1_head.timestamp - l1_block_minus_1.header.timestamp) + let l1_block_minus_1 = self.get_l1_header(l1_head_minus_1.into()).await?; + Ok(l1_head.timestamp - l1_block_minus_1.timestamp) } /// Get the L1 block from which the `l2_end_block` can be derived. async fn get_l1_head_with_safe_head(&self, l2_end_block: u64) -> Result { - let latest_l1_header = self.get_head(RPCMode::L1).await?; + let latest_l1_header = self.get_l1_header(BlockId::latest()).await?; // Get the l1 origin of the l2 end block. let l2_end_block_hex = format!("0x{:x}", l2_end_block); @@ -586,14 +743,11 @@ impl OPSuccinctDataFetcher { }; // Get L1 head. - let l2_block_timestamp = self - .get_header_by_number(RPCMode::L2, l2_end_block) - .await? - .timestamp; + let l2_block_timestamp = self.get_l2_header(l2_end_block.into()).await?.timestamp; let target_timestamp = l2_block_timestamp + (max_batch_post_delay_minutes * 60); Ok(self - .find_block_hash_by_timestamp(RPCMode::L1, target_timestamp) + .find_l1_block_hash_by_timestamp(target_timestamp) .await?) } } @@ -601,14 +755,16 @@ impl OPSuccinctDataFetcher { #[cfg(test)] mod tests { - use crate::fetcher::{OPSuccinctDataFetcher, RPCMode}; + use crate::fetcher::OPSuccinctDataFetcher; #[tokio::test] #[cfg(test)] async fn test_get_l1_head() { + use alloy::eips::BlockId; + dotenv::dotenv().ok(); let fetcher = OPSuccinctDataFetcher::new().await; - let latest_l2_block = fetcher.get_head(RPCMode::L2).await.unwrap(); + let latest_l2_block = fetcher.get_l2_header(BlockId::latest()).await.unwrap(); // Get the L2 block number from 1 hour ago. let l2_end_block = latest_l2_block.number - ((60 * 60) / fetcher.rollup_config.block_time); diff --git a/utils/host/src/stats.rs b/utils/host/src/stats.rs index 05710432..b1b7b6b4 100644 --- a/utils/host/src/stats.rs +++ b/utils/host/src/stats.rs @@ -1,6 +1,7 @@ use std::fmt; -use crate::fetcher::{OPSuccinctDataFetcher, RPCMode}; +use crate::fetcher::OPSuccinctDataFetcher; +use alloy_primitives::U256; use num_format::{Locale, ToFormattedString}; use serde::{Deserialize, Serialize}; use sp1_sdk::{CostEstimator, ExecutionReport}; @@ -23,6 +24,7 @@ pub struct ExecutionStats { pub nb_blocks: u64, pub nb_transactions: u64, pub eth_gas_used: u64, + pub l1_fees: U256, pub cycles_per_block: u64, pub cycles_per_transaction: u64, pub transactions_per_block: u64, @@ -115,7 +117,7 @@ impl ExecutionStats { end: u64, ) { let block_data = data_fetcher - .get_block_data_range(RPCMode::L2, start, end) + .get_l2_block_data_range(start, end) .await .expect("Failed to fetch block data range."); @@ -123,6 +125,7 @@ impl ExecutionStats { self.batch_end = end; self.nb_transactions = block_data.iter().map(|b| b.transaction_count).sum(); self.eth_gas_used = block_data.iter().map(|b| b.gas_used).sum(); + self.l1_fees = block_data.iter().map(|b| b.l1_gas_cost).sum(); self.nb_blocks = end - start + 1; }