diff --git a/benches/gigagas.rs b/benches/gigagas.rs index bc7d5b8..31c604a 100644 --- a/benches/gigagas.rs +++ b/benches/gigagas.rs @@ -62,11 +62,12 @@ fn bench(c: &mut Criterion, name: &str, db: InMemoryDB, txs: Vec) { let root = Span::root(format!("{name} Grevm Parallel"), SpanContext::random()); let _guard = root.set_local_parent(); metrics::with_local_recorder(&recorder, || { - let executor = GrevmScheduler::new( + let mut executor = GrevmScheduler::new( black_box(SpecId::LATEST), black_box(env.clone()), black_box(db.clone()), black_box(txs.clone()), + None, ); let _ = executor.parallel_execute(); @@ -92,11 +93,12 @@ fn bench(c: &mut Criterion, name: &str, db: InMemoryDB, txs: Vec) { let root = Span::root(format!("{name} Grevm Sequential"), SpanContext::random()); let _guard = root.set_local_parent(); metrics::with_local_recorder(&recorder, || { - let executor = GrevmScheduler::new( + let mut executor = GrevmScheduler::new( black_box(SpecId::LATEST), black_box(env.clone()), black_box(db.clone()), black_box(txs.clone()), + None, ); let _ = executor.force_sequential_execute(); diff --git a/benches/mainnet.rs b/benches/mainnet.rs index 1673728..512a541 100644 --- a/benches/mainnet.rs +++ b/benches/mainnet.rs @@ -53,11 +53,12 @@ fn benchmark_mainnet(c: &mut Criterion) { group.bench_function("Grevm Parallel", |b| { b.iter(|| { - let executor = GrevmScheduler::new( + let mut executor = GrevmScheduler::new( black_box(spec_id), black_box(env.clone()), black_box(db.clone()), black_box(txs.clone()), + None, ); executor.parallel_execute() }) @@ -65,11 +66,12 @@ fn benchmark_mainnet(c: &mut Criterion) { group.bench_function("Grevm Sequential", |b| { b.iter(|| { - let executor = GrevmScheduler::new( + let mut executor = GrevmScheduler::new( black_box(spec_id), black_box(env.clone()), black_box(db.clone()), black_box(txs.clone()), + None, ); executor.force_sequential_execute() }) diff --git a/src/lib.rs b/src/lib.rs index bebcedd..b2e76db 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -25,7 +25,8 @@ use tokio::runtime::{Builder, Runtime}; mod hint; mod partition; mod scheduler; -mod storage; +/// Manages storage-related operations. +pub mod storage; mod tx_dependency; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; diff --git a/src/scheduler.rs b/src/scheduler.rs index a80eefa..25f8d8f 100644 --- a/src/scheduler.rs +++ b/src/scheduler.rs @@ -2,7 +2,7 @@ use crate::{ fork_join_util, hint::ParallelExecutionHints, partition::PartitionExecutor, - storage::{LazyUpdateValue, SchedulerDB}, + storage::{LazyUpdateValue, SchedulerDB, State}, tx_dependency::{DependentTxsVec, TxDependency}, GrevmError, LocationAndType, ResultAndTransition, TransactionStatus, TxId, CPU_CORES, GREVM_RUNTIME, MAX_NUM_ROUND, @@ -18,11 +18,10 @@ use std::{ use ahash::{AHashMap as HashMap, AHashSet as HashSet}; use metrics::{counter, gauge}; use revm::{ - db::{states::bundle_state::BundleRetention, BundleState}, primitives::{ AccountInfo, Address, Bytecode, EVMError, Env, ExecutionResult, SpecId, TxEnv, B256, U256, }, - CacheState, DatabaseRef, EvmBuilder, + CacheState, DatabaseCommit, DatabaseRef, EvmBuilder, }; use tracing::info; @@ -70,8 +69,6 @@ struct ExecuteMetrics { commit_transition_time: metrics::Counter, /// Time taken to execute transactions in sequential(in nanoseconds). sequential_execute_time: metrics::Counter, - /// Time taken to build output(in nanoseconds). - build_output_time: metrics::Counter, } impl Default for ExecuteMetrics { @@ -97,7 +94,6 @@ impl Default for ExecuteMetrics { merge_write_set_time: counter!("grevm.merge_write_set_time"), commit_transition_time: counter!("grevm.commit_transition_time"), sequential_execute_time: counter!("grevm.sequential_execute_time"), - build_output_time: counter!("grevm.build_output_time"), } } } @@ -105,8 +101,6 @@ impl Default for ExecuteMetrics { /// The output of the execution of a block. #[derive(Debug)] pub struct ExecuteOutput { - /// The changed state of the block after execution. - pub state: BundleState, /// All the results of the transactions in the block. pub results: Vec, } @@ -161,7 +155,7 @@ where /// The database utilized by the scheduler. /// It is shared among the partition executors, /// allowing them to read the final state from previous rounds. - database: Arc>, + pub database: Arc>, /// The dependency relationship between transactions. /// Used to construct the next round of transaction partitions. @@ -208,12 +202,14 @@ impl DatabaseRef for DatabaseWrapper { } } -/// Creates a new GrevmScheduler instance. +/// Creates a new GrevmScheduler instance using DB type without 'static constraint. +/// If `state` is not None, it will be used as the initial state before the block execution. pub fn new_grevm_scheduler( spec_id: SpecId, env: Env, db: DB, txs: Vec, + state: Option>, ) -> GrevmScheduler> where DB: DatabaseRef + Send + Sync, @@ -230,7 +226,7 @@ where >(boxed) }; let db: DatabaseWrapper = DatabaseWrapper(db); - GrevmScheduler::new(spec_id, env, db, Arc::new(txs)) + GrevmScheduler::new(spec_id, env, db, Arc::new(txs), state) } impl GrevmScheduler @@ -239,8 +235,14 @@ where DB::Error: Send + Sync, { /// Creates a new GrevmScheduler instance. - #[fastrace::trace] - pub fn new(spec_id: SpecId, env: Env, db: DB, txs: Arc>) -> Self { + /// If `state` is not None, it will be used as the initial state before the block execution. + pub fn new( + spec_id: SpecId, + env: Env, + db: DB, + txs: Arc>, + state: Option>, + ) -> Self { let coinbase = env.block.coinbase; let num_partitions = *CPU_CORES * 2 + 1; // 2 * cpu + 1 for initial partition number let num_txs = txs.len(); @@ -250,7 +252,7 @@ where env, coinbase, txs, - database: Arc::new(SchedulerDB::new(db)), + database: Arc::new(SchedulerDB::new(state.unwrap_or_default(), db)), tx_dependencies: TxDependency::new(num_txs), tx_states: Arc::new(vec![TxState::new(); num_txs]), num_partitions, @@ -303,13 +305,18 @@ where } let start = Instant::now(); - GREVM_RUNTIME.block_on(async { - let mut tasks = vec![]; - for executor in &self.partition_executors { - let executor = executor.clone(); - tasks.push(GREVM_RUNTIME.spawn(async move { executor.write().unwrap().execute() })); - } - futures::future::join_all(tasks).await; + // Do not block tokio runtime if we are in async context + tokio::task::block_in_place(|| { + GREVM_RUNTIME.block_on(async { + let mut tasks = vec![]; + for executor in &self.partition_executors { + let executor = executor.clone(); + tasks.push( + GREVM_RUNTIME.spawn(async move { executor.write().unwrap().execute() }), + ); + } + futures::future::join_all(tasks).await; + }) }); self.metrics.parallel_execute_time.increment(start.elapsed().as_nanos() as u64); @@ -501,7 +508,7 @@ where let database = Arc::get_mut(&mut self.database).unwrap(); if self.num_finality_txs < self.txs.len() { // Merging these states is only useful when there is a next round of execution. - Self::merge_not_modified_state(&mut database.cache, partition_state); + Self::merge_not_modified_state(&mut database.state.cache, partition_state); } #[allow(invalid_reference_casting)] @@ -599,21 +606,6 @@ where Ok(()) } - #[fastrace::trace] - fn build_output(&mut self) -> ExecuteOutput { - let start = Instant::now(); - // MUST drop the `PartitionExecutor::scheduler_db` before get mut - self.partition_executors.clear(); - let database = Arc::get_mut(&mut self.database).unwrap(); - database.merge_transitions(BundleRetention::Reverts); - let output = ExecuteOutput { - state: std::mem::take(&mut database.bundle_state), - results: std::mem::take(&mut self.results), - }; - self.metrics.build_output_time.increment(start.elapsed().as_nanos() as u64); - output - } - #[fastrace::trace] fn parse_hints(&mut self) { let start = Instant::now(); @@ -643,7 +635,7 @@ where if self.txs.len() < self.num_partitions && !force_parallel { self.execute_remaining_sequential()?; - return Ok(self.build_output()); + return Ok(ExecuteOutput { results: std::mem::take(&mut self.results) }); } if !force_sequential { @@ -668,17 +660,17 @@ where self.execute_remaining_sequential()?; } - Ok(self.build_output()) + Ok(ExecuteOutput { results: std::mem::take(&mut self.results) }) } /// Execute transactions in parallel. - pub fn parallel_execute(mut self) -> Result> { + pub fn parallel_execute(&mut self) -> Result> { self.evm_execute(None, true, None) } /// Execute transactions parallelly with or without hints. pub fn force_parallel_execute( - mut self, + &mut self, with_hints: bool, num_partitions: Option, ) -> Result> { @@ -686,7 +678,13 @@ where } /// Execute transactions sequentially. - pub fn force_sequential_execute(mut self) -> Result> { + pub fn force_sequential_execute(&mut self) -> Result> { self.evm_execute(Some(true), false, None) } + + /// Take the state of the scheduler. + /// It is typically called after the execution. + pub fn take_state(self) -> Box { + Arc::try_unwrap(self.database).ok().unwrap().state + } } diff --git a/src/storage.rs b/src/storage.rs index 50f1001..1abf47a 100644 --- a/src/storage.rs +++ b/src/storage.rs @@ -8,7 +8,7 @@ use revm::{ }, precompile::Address, primitives::{Account, AccountInfo, Bytecode, EvmState, B256, BLOCK_HASH_HISTORY, U256}, - CacheState, Database, DatabaseRef, TransitionAccount, TransitionState, + CacheState, Database, DatabaseCommit, DatabaseRef, TransitionAccount, TransitionState, }; use std::{ collections::{btree_map, hash_map, BTreeMap, HashMap}, @@ -88,7 +88,11 @@ impl ParallelBundleState for BundleState { transitions: TransitionState, retention: BundleRetention, ) { - assert!(self.state.is_empty()); + if !self.state.is_empty() { + self.apply_transitions_and_create_reverts(transitions, retention); + return; + } + let include_reverts = retention.includes_reverts(); // pessimistically pre-allocate assuming _all_ accounts changed. let reverts_capacity = if include_reverts { transitions.transitions.len() } else { 0 }; @@ -154,22 +158,13 @@ impl ParallelBundleState for BundleState { } } -/// SchedulerDB is a database wrapper that manages state transitions and caching for the EVM. -/// It maintains a cache of committed data, a transition state for ongoing transactions, and a -/// bundle state for finalizing block state changes. It also tracks block hashes for quick access. -/// -/// After each execution round, SchedulerDB caches the committed data of finalized -/// transactions and the read-only data accessed during execution. -/// This cached data serves as the initial state for the next round of partition executors. -/// When reverting to sequential execution, these cached states will include both -/// the changes from EVM execution and the cached/loaded accounts and storages. -pub(crate) struct SchedulerDB { +#[derive(Debug)] +pub struct State { /// Cache the committed data of finality txns and the read-only data during execution after /// each round of execution. Used as the initial state for the next round of partition /// executors. When fall back to sequential execution, used as cached state contains both /// changed from evm execution and cached/loaded account/storages. pub cache: CacheState, - pub database: DB, /// Block state, it aggregates transactions transitions into one state. /// /// Build reverts and state that gets applied to the state. @@ -188,29 +183,66 @@ pub(crate) struct SchedulerDB { pub block_hashes: BTreeMap, } -impl SchedulerDB { - pub(crate) fn new(database: DB) -> Self { +impl Default for State { + fn default() -> Self { Self { cache: CacheState::new(false), - database, transition_state: Some(TransitionState::default()), bundle_state: BundleState::default(), block_hashes: BTreeMap::new(), } } +} + +impl State { + /// Takes the current bundle state. + /// It is typically called after the bundle state has been finalized. + pub fn take_bundle(&mut self) -> BundleState { + std::mem::take(&mut self.bundle_state) + } + + /// Take all transitions and merge them inside bundle state. + /// This action will create final post state and all reverts so that + /// we at any time revert state of bundle to the state before transition is applied. + #[fastrace::trace] + pub fn merge_transitions(&mut self, retention: BundleRetention) { + if let Some(transition_state) = self.transition_state.as_mut().map(TransitionState::take) { + self.bundle_state + .parallel_apply_transitions_and_create_reverts(transition_state, retention); + } + } +} + +/// SchedulerDB is a database wrapper that manages state transitions and caching for the EVM. +/// It maintains a cache of committed data, a transition state for ongoing transactions, and a +/// bundle state for finalizing block state changes. It also tracks block hashes for quick access. +/// +/// After each execution round, SchedulerDB caches the committed data of finalized +/// transactions and the read-only data accessed during execution. +/// This cached data serves as the initial state for the next round of partition executors. +/// When reverting to sequential execution, these cached states will include both +/// the changes from EVM execution and the cached/loaded accounts and storages. +#[allow(missing_debug_implementations)] +pub struct SchedulerDB { + /// The cached state during execution. + pub state: Box, + + /// The underlying database that stores the state. + pub database: DB, +} + +impl SchedulerDB { + /// Create new SchedulerDB with database + pub fn new(state: Box, database: DB) -> Self { + Self { state, database } + } /// This function is used to cache the committed data of finality txns and the read-only data /// during execution. These data will be used as the initial state for the next round of /// partition executors. When falling back to sequential execution, these cached states will /// include both the changes from EVM execution and the cached/loaded accounts/storages. pub(crate) fn commit_transition(&mut self, transitions: Vec<(Address, TransitionAccount)>) { - apply_transition_to_cache(&mut self.cache, &transitions); - self.apply_transition(transitions); - } - - /// Fall back to sequential execute - pub(crate) fn commit(&mut self, changes: HashMap) { - let transitions = self.cache.apply_evm_state(changes); + apply_transition_to_cache(&mut self.state.cache, &transitions); self.apply_transition(transitions); } @@ -218,21 +250,10 @@ impl SchedulerDB { /// This will be used to create final post state and reverts. fn apply_transition(&mut self, transitions: Vec<(Address, TransitionAccount)>) { // add transition to transition state. - if let Some(s) = self.transition_state.as_mut() { + if let Some(s) = self.state.transition_state.as_mut() { s.add_transitions(transitions) } } - - /// Take all transitions and merge them inside bundle state. - /// This action will create final post state and all reverts so that - /// we at any time revert state of bundle to the state before transition is applied. - #[fastrace::trace] - pub(crate) fn merge_transitions(&mut self, retention: BundleRetention) { - if let Some(transition_state) = self.transition_state.as_mut().map(TransitionState::take) { - self.bundle_state - .parallel_apply_transitions_and_create_reverts(transition_state, retention); - } - } } impl SchedulerDB @@ -242,7 +263,7 @@ where /// Load account from cache or database. /// If account is not found in cache, it will be loaded from database. fn load_cache_account(&mut self, address: Address) -> Result<&mut CacheAccount, DB::Error> { - match self.cache.accounts.entry(address) { + match self.state.cache.accounts.entry(address) { hash_map::Entry::Vacant(entry) => { let info = self.database.basic_ref(address)?; Ok(entry.insert(into_cache_account(info))) @@ -273,7 +294,7 @@ where } } // append transition - if let Some(s) = self.transition_state.as_mut() { + if let Some(s) = self.state.transition_state.as_mut() { s.add_transitions(transitions) } Ok(()) @@ -377,7 +398,7 @@ where } fn code_by_hash(&mut self, code_hash: B256) -> Result { - let res = match self.cache.contracts.entry(code_hash) { + let res = match self.state.cache.contracts.entry(code_hash) { hash_map::Entry::Occupied(entry) => Ok(entry.get().clone()), hash_map::Entry::Vacant(entry) => { let code = self.database.code_by_hash_ref(code_hash)?; @@ -389,18 +410,18 @@ where } fn storage(&mut self, address: Address, index: U256) -> Result { - load_storage(&mut self.cache, &self.database, address, index) + load_storage(&mut self.state.cache, &self.database, address, index) } fn block_hash(&mut self, number: u64) -> Result { - match self.block_hashes.entry(number) { + match self.state.block_hashes.entry(number) { btree_map::Entry::Occupied(entry) => Ok(*entry.get()), btree_map::Entry::Vacant(entry) => { let ret = *entry.insert(self.database.block_hash_ref(number)?); // prune all hashes that are older than BLOCK_HASH_HISTORY let last_block = number.saturating_sub(BLOCK_HASH_HISTORY); - while let Some(entry) = self.block_hashes.first_entry() { + while let Some(entry) = self.state.block_hashes.first_entry() { if *entry.key() < last_block { entry.remove(); } else { @@ -414,6 +435,14 @@ where } } +impl DatabaseCommit for SchedulerDB { + /// Fall back to sequential execute + fn commit(&mut self, changes: HashMap) { + let transitions = self.state.cache.apply_evm_state(changes); + self.apply_transition(transitions); + } +} + /// PartitionDB is used in PartitionExecutor to build EVM and hook the read operations. /// It maintains the partition internal cache, scheduler_db, and block_hashes. /// It also records the read set of the current transaction, which will be consumed after the @@ -615,7 +644,7 @@ where let result = match self.cache.accounts.entry(address) { hash_map::Entry::Vacant(entry) => { // 2. read initial state of this round from scheduler cache - if let Some(account) = self.scheduler_db.cache.accounts.get(&address) { + if let Some(account) = self.scheduler_db.state.cache.accounts.get(&address) { Ok(entry.insert(account.clone()).account_info()) } else { // 3. read from origin database @@ -646,7 +675,7 @@ where hash_map::Entry::Occupied(entry) => Ok(entry.get().clone()), hash_map::Entry::Vacant(entry) => { // 2. read initial state of this round from scheduler cache - if let Some(code) = self.scheduler_db.cache.contracts.get(&code_hash) { + if let Some(code) = self.scheduler_db.state.cache.contracts.get(&code_hash) { return Ok(entry.insert(code.clone()).clone()); } diff --git a/tests/common/execute.rs b/tests/common/execute.rs index 095ed96..fbf2236 100644 --- a/tests/common/execute.rs +++ b/tests/common/execute.rs @@ -115,25 +115,34 @@ pub(crate) fn compare_evm_execute( let db = Arc::new(db); let txs = Arc::new(txs); let start = Instant::now(); - let sequential = GrevmScheduler::new(SpecId::LATEST, env.clone(), db.clone(), txs.clone()); - let sequential_result = sequential.force_sequential_execute(); - println!("Grevm sequential execute time: {}ms", start.elapsed().as_millis()); + let sequential_result = { + let mut executor = + GrevmScheduler::new(SpecId::LATEST, env.clone(), db.clone(), txs.clone(), None); + let sequential_result = executor.force_sequential_execute().unwrap(); + println!("Grevm sequential execute time: {}ms", start.elapsed().as_millis()); + let database = Arc::get_mut(&mut executor.database).unwrap(); + database.state.merge_transitions(BundleRetention::Reverts); + (sequential_result, database.state.take_bundle()) + }; let parallel_result = metrics::with_local_recorder(&recorder, || { let start = Instant::now(); - let parallel = GrevmScheduler::new(SpecId::LATEST, env.clone(), db.clone(), txs.clone()); + let mut executor = + GrevmScheduler::new(SpecId::LATEST, env.clone(), db.clone(), txs.clone(), None); // set determined partitions - let parallel_result = parallel.force_parallel_execute(with_hints, Some(23)); + let parallel_result = executor.force_parallel_execute(with_hints, Some(23)).unwrap(); println!("Grevm parallel execute time: {}ms", start.elapsed().as_millis()); let snapshot = recorder.snapshotter().snapshot(); - for (key, unit, desc, value) in snapshot.into_vec() { + for (key, _, _, value) in snapshot.into_vec() { println!("metrics: {} => value: {:?}", key.key().name(), value); if let Some(metric) = parallel_metrics.get(key.key().name()) { assert_eq!(*metric, value); } } - parallel_result + let database = Arc::get_mut(&mut executor.database).unwrap(); + database.state.merge_transitions(BundleRetention::Reverts); + (parallel_result, database.state.take_bundle()) }); let start = Instant::now(); @@ -142,7 +151,7 @@ pub(crate) fn compare_evm_execute( let mut max_gas_spent = 0; let mut max_gas_used = 0; - for result in &reth_result.as_ref().unwrap().results { + for result in &reth_result.as_ref().unwrap().0.results { match result { ExecutionResult::Success { gas_used, gas_refunded, .. } => { max_gas_spent = max_gas_spent.max(gas_used + gas_refunded); @@ -154,22 +163,13 @@ pub(crate) fn compare_evm_execute( println!("max_gas_spent: {}, max_gas_used: {}", max_gas_spent, max_gas_used); compare_execution_result( - &reth_result.as_ref().unwrap().results, - &sequential_result.as_ref().unwrap().results, - ); - compare_execution_result( - &reth_result.as_ref().unwrap().results, - ¶llel_result.as_ref().unwrap().results, + &reth_result.as_ref().unwrap().0.results, + &sequential_result.0.results, ); + compare_execution_result(&reth_result.as_ref().unwrap().0.results, ¶llel_result.0.results); - compare_bundle_state( - &reth_result.as_ref().unwrap().state, - &sequential_result.as_ref().unwrap().state, - ); - compare_bundle_state( - &reth_result.as_ref().unwrap().state, - ¶llel_result.as_ref().unwrap().state, - ); + compare_bundle_state(&reth_result.as_ref().unwrap().1, &sequential_result.1); + compare_bundle_state(&reth_result.as_ref().unwrap().1, ¶llel_result.1); } /// Simulate the sequential execution of transactions in reth @@ -178,7 +178,7 @@ pub(crate) fn execute_revm_sequential( spec_id: SpecId, env: Env, txs: &[TxEnv], -) -> Result> +) -> Result<(ExecuteOutput, BundleState), EVMError> where DB: DatabaseRef, DB::Error: Debug, @@ -201,7 +201,7 @@ where evm.db_mut().merge_transitions(BundleRetention::Reverts); - Ok(ExecuteOutput { state: evm.db_mut().take_bundle(), results }) + Ok((ExecuteOutput { results }, evm.db_mut().take_bundle())) } #[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] diff --git a/tests/ethereum/main.rs b/tests/ethereum/main.rs index 728af3d..e188d4b 100644 --- a/tests/ethereum/main.rs +++ b/tests/ethereum/main.rs @@ -136,18 +136,19 @@ fn run_test_unit(path: &Path, unit: TestUnit) { tx: Default::default(), }; let db = InMemoryDB::new(accounts.clone(), bytecodes, Default::default()); + let mut executor = GrevmScheduler::new(spec_name.to_spec_id(), env, db, Arc::new(vec![tx_env.unwrap()]), None); match ( test.expect_exception.as_deref(), - GrevmScheduler::new(spec_name.to_spec_id(), env, db.clone(), Arc::new(vec![tx_env.unwrap()])) - .parallel_execute(), + executor.parallel_execute(), ) { // EIP-2681 (Some("TransactionException.NONCE_IS_MAX"), Ok(exec_results)) => { assert_eq!(exec_results.results.len(), 1); // This is overly strict as we only need the newly created account's code to be empty. // Extracting such account is unjustified complexity so let's live with this for now. - assert!(exec_results.state.state.values().all(|account| { + let state = Arc::get_mut(&mut executor.database).unwrap().state.take_bundle(); + assert!(state.state.values().all(|account| { match &account.info { Some(account) => account.is_empty_code_hash(), None => true, @@ -187,7 +188,8 @@ fn run_test_unit(path: &Path, unit: TestUnit) { // This is a good reference for a minimal state/DB commitment logic for // pevm/revm to meet the Ethereum specs throughout the eras. - for (address, bundle) in exec_results.state.state { + let state = Arc::get_mut(&mut executor.database).unwrap().state.take_bundle(); + for (address, bundle) in state.state { if bundle.info.is_some() { let chain_state_account = accounts.entry(address).or_default(); for (index, slot) in bundle.storage.iter() { diff --git a/tests/mainnet.rs b/tests/mainnet.rs index 3d6820a..8f62f38 100644 --- a/tests/mainnet.rs +++ b/tests/mainnet.rs @@ -8,7 +8,10 @@ use alloy_rpc_types::{Block, BlockTransactions}; use common::{compat, storage::InMemoryDB}; use grevm::GrevmScheduler; use metrics_util::debugging::DebuggingRecorder; -use revm::primitives::{Env, TxEnv}; +use revm::{ + db::states::bundle_state::BundleRetention, + primitives::{Env, TxEnv}, +}; fn test_execute_alloy(block: Block, db: InMemoryDB) { let spec_id = compat::get_block_spec(&block.header); @@ -25,30 +28,28 @@ fn test_execute_alloy(block: Block, db: InMemoryDB) { let db = Arc::new(db); - let reth_result = common::execute_revm_sequential(db.clone(), spec_id, env.clone(), &*txs); + let reth_result = + common::execute_revm_sequential(db.clone(), spec_id, env.clone(), &*txs).unwrap(); // create registry for metrics let recorder = DebuggingRecorder::new(); let parallel_result = metrics::with_local_recorder(&recorder, || { - let executor = GrevmScheduler::new(spec_id, env, db, txs); - let parallel_result = executor.force_parallel_execute(true, Some(23)); + let mut executor = GrevmScheduler::new(spec_id, env, db, txs, None); + let parallel_result = executor.force_parallel_execute(true, Some(23)).unwrap(); let snapshot = recorder.snapshotter().snapshot(); - for (key, unit, desc, value) in snapshot.into_vec() { + for (key, _, _, value) in snapshot.into_vec() { println!("metrics: {} => value: {:?}", key.key().name(), value); } - parallel_result + + let database = Arc::get_mut(&mut executor.database).unwrap(); + database.state.merge_transitions(BundleRetention::Reverts); + (parallel_result, database.state.take_bundle()) }); - common::compare_execution_result( - &reth_result.as_ref().unwrap().results, - ¶llel_result.as_ref().unwrap().results, - ); + common::compare_execution_result(&reth_result.0.results, ¶llel_result.0.results); - common::compare_bundle_state( - &reth_result.as_ref().unwrap().state, - ¶llel_result.as_ref().unwrap().state, - ); + common::compare_bundle_state(&reth_result.1, ¶llel_result.1); // TODO(gravity_nekomoto): compare the receipts root }