diff --git a/CHANGELOG.md b/CHANGELOG.md index 721137b3e..05a0db7f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ ### Enhancements +- Added cleanup of old account data from the in-memory forest ([#1175](https://github.com/0xMiden/miden-node/issues/1175)). - Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). - Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). - Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). diff --git a/Cargo.lock b/Cargo.lock index 8f7601604..8b99bac18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2910,6 +2910,7 @@ dependencies = [ "hex", "indexmap 2.13.0", "libsqlite3-sys", + "lru 0.16.3", "miden-crypto", "miden-node-proto", "miden-node-proto-build", @@ -2922,6 +2923,7 @@ dependencies = [ "rand_chacha 0.9.0", "regex", "serde", + "tempfile", "termtree", "thiserror 2.0.18", "tokio", diff --git a/Cargo.toml b/Cargo.toml index d9af227ef..19a4ef53d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,6 +87,7 @@ rand = { version = "0.9" } rand_chacha = { version = "0.9" } rstest = { version = "0.26" } serde = { features = ["derive"], version = "1" } +tempfile = { version = "3.12" } thiserror = { default-features = false, version = "2.0" } tokio = { features = ["rt-multi-thread"], version = "1.46" } tokio-stream = { version = "0.1" } diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 1c62c7ab7..7f1f6901b 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -25,6 +25,7 @@ fs-err = { workspace = true } hex = { version = "0.4" } indexmap = { workspace = true } libsqlite3-sys = { workspace = true } +lru = { workspace = true } miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } @@ -55,6 +56,7 @@ miden-protocol = { default-features = true, features = ["testing"], works miden-standards = { features = ["testing"], workspace = true } rand = { workspace = true } regex = { version = "1.11" } +tempfile = { workspace = true } termtree = { version = "0.5" } [features] diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 7fc4a5cab..bbb786c05 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -1,4 +1,5 @@ use std::collections::{BTreeMap, BTreeSet, HashSet}; +use std::mem::size_of; use std::ops::RangeInclusive; use std::path::PathBuf; @@ -6,6 +7,7 @@ use anyhow::Context; use diesel::{Connection, QueryableByName, RunQueryDsl, SqliteConnection}; use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; use miden_node_proto::generated as proto; +use miden_node_utils::limiter::MAX_RESPONSE_PAYLOAD_BYTES; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader}; @@ -600,13 +602,112 @@ impl Db { &self, account_id: AccountId, block_range: RangeInclusive, + entries_limit: Option, ) -> Result { + let entries_limit = entries_limit.unwrap_or_else(|| { + // TODO: These limits should be given by the protocol. + // See miden-base/issues/1770 for more details + pub const ROW_OVERHEAD_BYTES: usize = + 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx + MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES + }); + self.transact("select storage map sync values", move |conn| { - models::queries::select_account_storage_map_values(conn, account_id, block_range) + models::queries::select_account_storage_map_values_paged( + conn, + account_id, + block_range, + entries_limit, + ) }) .await } + /// Reconstructs storage map details from the database for a specific slot at a block. + /// + /// Used as fallback when `InnerForest` cache misses (historical or evicted queries). + /// Rebuilds all entries by querying the DB and filtering to the specific slot. + /// + /// Returns: + /// - `::LimitExceeded` when too many entries are present + /// - `::AllEntries` if the size is sufficiently small + pub(crate) async fn reconstruct_storage_map_from_db( + &self, + account_id: AccountId, + slot_name: miden_protocol::account::StorageSlotName, + block_num: BlockNumber, + entries_limit: Option, + ) -> Result { + use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; + use miden_protocol::EMPTY_WORD; + + // TODO this remains expensive with a large history until we implement pruning for DB + // columns + let mut values = Vec::new(); + let mut block_range_start = BlockNumber::GENESIS; + let entries_limit = entries_limit.unwrap_or_else(|| { + // TODO: These limits should be given by the protocol. + // See miden-base/issues/1770 for more details + pub const ROW_OVERHEAD_BYTES: usize = + 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx + MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES + }); + + let mut page = self + .select_storage_map_sync_values( + account_id, + block_range_start..=block_num, + Some(entries_limit), + ) + .await?; + + values.extend(page.values); + + loop { + if page.last_block_included == block_num || page.last_block_included < block_range_start + { + break; + } + + block_range_start = page.last_block_included.child(); + page = self + .select_storage_map_sync_values( + account_id, + block_range_start..=block_num, + Some(entries_limit), + ) + .await?; + + values.extend(page.values); + } + + if page.last_block_included != block_num { + return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); + } + + // Filter to the specific slot and collect latest values per key + let mut latest_values = BTreeMap::::new(); + for value in values { + if value.slot_name == slot_name { + let raw_key = value.key; + latest_values.insert(raw_key, value.value); + } + } + + // Remove EMPTY_WORD entries (deletions) + latest_values.retain(|_, v| *v != EMPTY_WORD); + + if latest_values.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { + return Ok(AccountStorageMapDetails::limit_exceeded(slot_name)); + } + + let entries = Vec::from_iter(latest_values.into_iter()); + Ok(AccountStorageMapDetails { + slot_name, + entries: StorageMapEntries::AllEntries(entries), + }) + } + /// Emits size metrics for each table in the database, and the entire database. #[instrument(target = COMPONENT, skip_all, err)] pub async fn analyze_table_sizes(&self) -> Result<(), DatabaseError> { diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index fef733cb6..af5fdbc94 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -653,19 +653,14 @@ impl StorageMapValue { /// /// * Response payload size: 0 <= size <= 2MB /// * Storage map values per response: 0 <= count <= (2MB / (2*Word + u32 + u8)) + 1 -pub(crate) fn select_account_storage_map_values( +pub(crate) fn select_account_storage_map_values_paged( conn: &mut SqliteConnection, account_id: AccountId, block_range: RangeInclusive, + limit: usize, ) -> Result { use schema::account_storage_map_values as t; - // TODO: These limits should be given by the protocol. - // See miden-base/issues/1770 for more details - pub const ROW_OVERHEAD_BYTES: usize = - 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx - pub const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; - if !account_id.is_public() { return Err(DatabaseError::AccountNotPublic(account_id)); } @@ -686,13 +681,13 @@ pub(crate) fn select_account_storage_map_values( .and(t::block_num.le(block_range.end().to_raw_sql())), ) .order(t::block_num.asc()) - .limit(i64::try_from(MAX_ROWS + 1).expect("limit fits within i64")) + .limit(i64::try_from(limit + 1).expect("limit fits within i64")) .load(conn)?; // Discard the last block in the response (assumes more than one block may be present) let (last_block_included, values) = if let Some(&(last_block_num, ..)) = raw.last() - && raw.len() > MAX_ROWS + && raw.len() > limit { // NOTE: If the query contains at least one more row than the amount of storage map updates // allowed in a single block for an account, then the response is guaranteed to have at @@ -708,7 +703,9 @@ pub(crate) fn select_account_storage_map_values( } else { ( *block_range.end(), - raw.into_iter().map(StorageMapValue::from_raw_row).collect::>()?, + raw.into_iter() + .map(StorageMapValue::from_raw_row) + .collect::, _>>()?, ) }; diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 2749c9903..598e1b849 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -4,8 +4,9 @@ use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; +use assert_matches::assert_matches; use diesel::{Connection, SqliteConnection}; -use miden_node_proto::domain::account::AccountSummary; +use miden_node_proto::domain::account::{AccountSummary, StorageMapEntries}; use miden_node_utils::fee::{test_fee, test_fee_params}; use miden_protocol::account::auth::PublicKeyCommitment; use miden_protocol::account::delta::AccountUpdateDetails; @@ -36,6 +37,7 @@ use miden_protocol::block::{ }; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::crypto::merkle::smt::SmtProof; use miden_protocol::crypto::rand::RpoRandomCoin; use miden_protocol::note::{ Note, @@ -71,6 +73,7 @@ use miden_standards::code_builder::CodeBuilder; use miden_standards::note::{NetworkAccountTarget, create_p2id_note}; use pretty_assertions::assert_eq; use rand::Rng; +use tempfile::tempdir; use super::{AccountInfo, NoteRecord, NullifierInfo}; use crate::db::TransactionSummary; @@ -78,6 +81,7 @@ use crate::db::migrations::apply_migrations; use crate::db::models::queries::{StorageMapValue, insert_account_storage_map_value}; use crate::db::models::{Page, queries, utils}; use crate::errors::DatabaseError; +use crate::inner_forest::HISTORICAL_BLOCK_RETENTION; fn create_db() -> SqliteConnection { let mut conn = SqliteConnection::establish(":memory:").expect("In memory sqlite always works"); @@ -1069,9 +1073,13 @@ fn sql_account_storage_map_values_insertion() { AccountDelta::new(account_id, storage1, AccountVaultDelta::default(), Felt::ONE).unwrap(); insert_account_delta(conn, account_id, block1, &delta1); - let storage_map_page = - queries::select_account_storage_map_values(conn, account_id, BlockNumber::GENESIS..=block1) - .unwrap(); + let storage_map_page = queries::select_account_storage_map_values_paged( + conn, + account_id, + BlockNumber::GENESIS..=block1, + 1024, + ) + .unwrap(); assert_eq!(storage_map_page.values.len(), 2, "expect 2 initial rows"); // Update key1 at block 2 @@ -1084,9 +1092,13 @@ fn sql_account_storage_map_values_insertion() { .unwrap(); insert_account_delta(conn, account_id, block2, &delta2); - let storage_map_values = - queries::select_account_storage_map_values(conn, account_id, BlockNumber::GENESIS..=block2) - .unwrap(); + let storage_map_values = queries::select_account_storage_map_values_paged( + conn, + account_id, + BlockNumber::GENESIS..=block2, + 1024, + ) + .unwrap(); assert_eq!(storage_map_values.values.len(), 3, "three rows (with duplicate key)"); // key1 should now be value3 at block2; key2 remains value2 at block1 @@ -1180,10 +1192,11 @@ fn select_storage_map_sync_values() { ) .unwrap(); - let page = queries::select_account_storage_map_values( + let page = queries::select_account_storage_map_values_paged( &mut conn, account_id, BlockNumber::from(2)..=BlockNumber::from(3), + 1024, ) .unwrap(); @@ -1214,6 +1227,133 @@ fn select_storage_map_sync_values() { assert_eq!(page.values, expected, "should return latest values ordered by key"); } +#[test] +fn select_storage_map_sync_values_paginates_until_last_block() { + let mut conn = create_db(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let slot_name = StorageSlotName::mock(7); + + let block1 = BlockNumber::from(1); + let block2 = BlockNumber::from(2); + let block3 = BlockNumber::from(3); + + create_block(&mut conn, block1); + create_block(&mut conn, block2); + create_block(&mut conn, block3); + + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block1) + .unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block2) + .unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 2)], block3) + .unwrap(); + + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block1, + slot_name.clone(), + num_to_word(1), + num_to_word(11), + ) + .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block2, + slot_name.clone(), + num_to_word(2), + num_to_word(22), + ) + .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block3, + slot_name.clone(), + num_to_word(3), + num_to_word(33), + ) + .unwrap(); + + let page = queries::select_account_storage_map_values_paged( + &mut conn, + account_id, + BlockNumber::GENESIS..=block3, + 1, + ) + .unwrap(); + + assert_eq!(page.last_block_included, block1, "should truncate at block 1"); + assert_eq!(page.values.len(), 1, "should include block 1 only"); +} + +#[tokio::test] +#[miden_node_test_macro::enable_logging] +async fn reconstruct_storage_map_from_db_pages_until_latest() { + let temp_dir = tempdir().unwrap(); + let db_path = temp_dir.path().join("store.sqlite"); + + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let slot_name = StorageSlotName::mock(9); + + let block1 = BlockNumber::from(1); + let block2 = BlockNumber::from(2); + let block3 = BlockNumber::from(3); + + let db = crate::db::Db::load(db_path).await.unwrap(); + let slot_name_for_db = slot_name.clone(); + db.query("insert paged values", move |db_conn| { + db_conn.transaction(|db_conn| { + apply_migrations(db_conn)?; + create_block(db_conn, block1); + create_block(db_conn, block2); + create_block(db_conn, block3); + + queries::upsert_accounts(db_conn, &[mock_block_account_update(account_id, 0)], block1)?; + queries::upsert_accounts(db_conn, &[mock_block_account_update(account_id, 1)], block2)?; + queries::upsert_accounts(db_conn, &[mock_block_account_update(account_id, 2)], block3)?; + + queries::insert_account_storage_map_value( + db_conn, + account_id, + block1, + slot_name_for_db.clone(), + num_to_word(1), + num_to_word(10), + )?; + queries::insert_account_storage_map_value( + db_conn, + account_id, + block2, + slot_name_for_db.clone(), + num_to_word(2), + num_to_word(20), + )?; + queries::insert_account_storage_map_value( + db_conn, + account_id, + block3, + slot_name_for_db.clone(), + num_to_word(3), + num_to_word(30), + )?; + Ok::<_, DatabaseError>(()) + }) + }) + .await + .unwrap(); + + let details = db + .reconstruct_storage_map_from_db(account_id, slot_name.clone(), block3, Some(1)) + .await + .unwrap(); + + assert_matches!(details.entries, StorageMapEntries::AllEntries(entries) => { + assert_eq!(entries.len(), 3); + }); +} + // UTILITIES // ------------------------------------------------------------------------------------------- fn num_to_word(n: u64) -> Word { @@ -2117,10 +2257,11 @@ fn db_roundtrip_storage_map_values() { .unwrap(); // Retrieve - let page = queries::select_account_storage_map_values( + let page = queries::select_account_storage_map_values_paged( &mut conn, account_id, BlockNumber::GENESIS..=block_num, + 1024, ) .unwrap(); @@ -2244,7 +2385,7 @@ fn db_roundtrip_account_storage_with_maps() { #[test] #[miden_node_test_macro::enable_logging] -fn test_note_metadata_with_attachment_roundtrip() { +fn db_roundtrip_note_metadata_attachment() { let mut conn = create_db(); let block_num = BlockNumber::from(1); create_block(&mut conn, block_num); @@ -2295,3 +2436,878 @@ fn test_note_metadata_with_attachment_roundtrip() { "NetworkAccountTarget should have the correct target account ID" ); } + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_matches_db_storage_map_roots_across_updates() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + use miden_protocol::crypto::merkle::smt::Smt; + + use crate::inner_forest::InnerForest; + + /// Reconstructs storage map root from DB entries at a specific block. + fn reconstruct_storage_map_root_from_db( + conn: &mut SqliteConnection, + account_id: AccountId, + slot_name: &StorageSlotName, + block_num: BlockNumber, + ) -> Option { + let storage_values = queries::select_account_storage_map_values_paged( + conn, + account_id, + BlockNumber::GENESIS..=block_num, + 1024, + ) + .unwrap(); + + // Filter to the specific slot and get most recent value for each key + let mut latest_values: BTreeMap = BTreeMap::new(); + for value in storage_values.values { + if value.slot_name == *slot_name { + latest_values.insert(value.key, value.value); + } + } + + if latest_values.is_empty() { + return None; + } + + // Build SMT from entries + let entries: Vec<(Word, Word)> = latest_values + .into_iter() + .filter_map(|(key, value)| { + if value == EMPTY_WORD { + None + } else { + // Keys are stored unhashed in DB, match InnerForest behavior + Some((key, value)) + } + }) + .collect(); + + if entries.is_empty() { + use miden_protocol::crypto::merkle::EmptySubtreeRoots; + use miden_protocol::crypto::merkle::smt::SMT_DEPTH; + return Some(*EmptySubtreeRoots::entry(SMT_DEPTH, 0)); + } + + let mut smt = Smt::default(); + for (key, value) in entries { + smt.insert(miden_protocol::account::StorageMap::hash_key(key), value).unwrap(); + } + + Some(smt.root()) + } + + let mut conn = create_db(); + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + + let block1 = BlockNumber::from(1); + let block2 = BlockNumber::from(2); + let block3 = BlockNumber::from(3); + + create_block(&mut conn, block1); + create_block(&mut conn, block2); + create_block(&mut conn, block3); + + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block1) + .unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block2) + .unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 2)], block3) + .unwrap(); + + let slot_map = StorageSlotName::mock(1); + let slot_value = StorageSlotName::mock(2); + + let key1 = num_to_word(100); + let key2 = num_to_word(200); + let value1 = num_to_word(1000); + let value2 = num_to_word(2000); + let value3 = num_to_word(3000); + + // Block 1: Add storage map entries and a storage value + let mut map_delta_1 = StorageMapDelta::default(); + map_delta_1.insert(key1, value1); + map_delta_1.insert(key2, value2); + + let raw_1 = BTreeMap::from_iter([ + (slot_map.clone(), StorageSlotDelta::Map(map_delta_1)), + (slot_value.clone(), StorageSlotDelta::Value(value1)), + ]); + let storage_1 = AccountStorageDelta::from_raw(raw_1); + let delta_1 = + AccountDelta::new(account_id, storage_1.clone(), AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + + insert_account_delta(&mut conn, account_id, block1, &delta_1); + forest.update_account(block1, &delta_1).unwrap(); + + // Verify forest matches DB for block 1 + let forest_root_1 = forest.get_storage_map_root(account_id, &slot_map, block1).unwrap(); + let db_root_1 = reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block1) + .expect("DB should have storage map root"); + + assert_eq!( + forest_root_1, db_root_1, + "Storage map root at block 1 should match between InnerForest and DB" + ); + + // Block 2: Delete storage map entry (set to EMPTY_WORD) and delete storage value + let mut map_delta_2 = StorageMapDelta::default(); + map_delta_2.insert(key1, EMPTY_WORD); + + let raw_2 = BTreeMap::from_iter([ + (slot_map.clone(), StorageSlotDelta::Map(map_delta_2)), + (slot_value.clone(), StorageSlotDelta::Value(EMPTY_WORD)), + ]); + let storage_2 = AccountStorageDelta::from_raw(raw_2); + let delta_2 = AccountDelta::new( + account_id, + storage_2.clone(), + AccountVaultDelta::default(), + Felt::new(2), + ) + .unwrap(); + + insert_account_delta(&mut conn, account_id, block2, &delta_2); + forest.update_account(block2, &delta_2).unwrap(); + + // Verify forest matches DB for block 2 + let forest_root_2 = forest.get_storage_map_root(account_id, &slot_map, block2).unwrap(); + let db_root_2 = reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block2) + .expect("DB should have storage map root"); + + assert_eq!( + forest_root_2, db_root_2, + "Storage map root at block 2 should match between InnerForest and DB" + ); + + // Block 3: Re-add same value as block 1 and add different map entry + let mut map_delta_3 = StorageMapDelta::default(); + map_delta_3.insert(key2, value3); // Update existing key + + let raw_3 = BTreeMap::from_iter([ + (slot_map.clone(), StorageSlotDelta::Map(map_delta_3)), + (slot_value.clone(), StorageSlotDelta::Value(value1)), // Same as block 1 + ]); + let storage_3 = AccountStorageDelta::from_raw(raw_3); + let delta_3 = AccountDelta::new( + account_id, + storage_3.clone(), + AccountVaultDelta::default(), + Felt::new(3), + ) + .unwrap(); + + insert_account_delta(&mut conn, account_id, block3, &delta_3); + forest.update_account(block3, &delta_3).unwrap(); + + // Verify forest matches DB for block 3 + let forest_root_3 = forest.get_storage_map_root(account_id, &slot_map, block3).unwrap(); + let db_root_3 = reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block3) + .expect("DB should have storage map root"); + + assert_eq!( + forest_root_3, db_root_3, + "Storage map root at block 3 should match between InnerForest and DB" + ); + + // Verify we can query historical roots + let forest_root_1_check = forest.get_storage_map_root(account_id, &slot_map, block1).unwrap(); + let db_root_1_check = + reconstruct_storage_map_root_from_db(&mut conn, account_id, &slot_map, block1) + .expect("DB should have storage map root"); + assert_eq!( + forest_root_1_check, db_root_1_check, + "Historical query for block 1 should match" + ); + + // Verify roots are different across blocks (since we modified the map) + assert_ne!(forest_root_1, forest_root_2, "Roots should differ after deletion"); + assert_ne!(forest_root_2, forest_root_3, "Roots should differ after modification"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_shared_roots_not_deleted_prematurely() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + use miden_protocol::testing::account_id::{ + ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE, + ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, + }; + + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account1 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let account2 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); + let account3 = AccountId::try_from(ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE).unwrap(); + + let block01 = BlockNumber::from(1); + let block02 = BlockNumber::from(2); + let block50 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION); + let block51 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 1); + let block52 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 2); + let block53 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 3); + let slot_name = StorageSlotName::mock(1); + + let key1 = num_to_word(100); + let key2 = num_to_word(200); + let value1 = num_to_word(1000); + let value2 = num_to_word(2000); + + // All three accounts add identical storage maps at block 1 + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key1, value1); + map_delta.insert(key2, value2); + + // Setups a single slot with a map and two key-value-pairs + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta.clone()))]); + let storage = AccountStorageDelta::from_raw(raw); + + // Account 1 + let delta1 = + AccountDelta::new(account1, storage.clone(), AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + forest.update_account(block01, &delta1).unwrap(); + + // Account 2 (same storage) + let delta2 = + AccountDelta::new(account2, storage.clone(), AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + forest.update_account(block02, &delta2).unwrap(); + + // Account 3 (same storage) + let delta3 = + AccountDelta::new(account3, storage.clone(), AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + forest.update_account(block02, &delta3).unwrap(); + + // All three accounts should have the same root (structural sharing in SmtForest) + let root1 = forest.get_storage_map_root(account1, &slot_name, block01).unwrap(); + let root2 = forest.get_storage_map_root(account2, &slot_name, block02).unwrap(); + let root3 = forest.get_storage_map_root(account3, &slot_name, block02).unwrap(); + + // identical maps means identical roots + assert_eq!(root1, root2); + assert_eq!(root2, root3); + + // Verify we can get witnesses for all three accounts and verify them against roots + let witness1 = forest + .get_storage_map_witness(account1, &slot_name, block01, key1) + .expect("Account1 should have accessible storage map"); + let witness2 = forest + .get_storage_map_witness(account2, &slot_name, block02, key1) + .expect("Account2 should have accessible storage map"); + let witness3 = forest + .get_storage_map_witness(account3, &slot_name, block02, key1) + .expect("Account3 should have accessible storage map"); + + // Verify witnesses against storage map roots using SmtProof::compute_root + let proof1: SmtProof = witness1.into(); + assert_eq!(proof1.compute_root(), root1, "Witness1 must verify against root1"); + + let proof2: SmtProof = witness2.into(); + assert_eq!(proof2.compute_root(), root2, "Witness2 must verify against root2"); + + let proof3: SmtProof = witness3.into(); + assert_eq!(proof3.compute_root(), root3, "Witness3 must verify against root3"); + + let (_, storage_roots_removed) = forest.prune(block50); + // nothing should be pruned yet, it's still in the window + assert_eq!(storage_roots_removed, 0); + + // Update accounts 1,2,3 + let mut map_delta_update = StorageMapDelta::default(); + map_delta_update.insert(key1, num_to_word(1001)); // Slight change + let raw_update = + BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta_update))]); + let storage_update = AccountStorageDelta::from_raw(raw_update); + let delta2_update = AccountDelta::new( + account2, + storage_update.clone(), + AccountVaultDelta::default(), + Felt::new(2), + ) + .unwrap(); + forest.update_account(block51, &delta2_update).unwrap(); + + let delta3_update = AccountDelta::new( + account3, + storage_update.clone(), + AccountVaultDelta::default(), + Felt::new(2), + ) + .unwrap(); + forest.update_account(block52, &delta3_update).unwrap(); + + // Prune at block 52 + let (_, storage_roots_removed) = forest.prune(block52); + // the root for account01 is the most recent, which is the same as the other two, so nothing + // should be pruned + assert_eq!(storage_roots_removed, 0); + + // ensure the root is still accessible + let account1_root_after_prune = forest.get_storage_map_root(account1, &slot_name, block01); + assert!(account1_root_after_prune.is_some()); + + let delta1_update = + AccountDelta::new(account1, storage_update, AccountVaultDelta::default(), Felt::new(2)) + .unwrap(); + forest.update_account(block53, &delta1_update).unwrap(); + + // Prune at block 53 + let (_, storage_roots_removed) = forest.prune(block53); + // the roots from block01 and block02 are now all obsolete and should remove 2 storage entries + assert_eq!(storage_roots_removed, 1); + + // Account2 and Account3 should still be accessible at their recent blocks + let account1_root = forest.get_storage_map_root(account1, &slot_name, block53).unwrap(); + let account2_root = forest.get_storage_map_root(account2, &slot_name, block53).unwrap(); + let account3_root = forest.get_storage_map_root(account3, &slot_name, block53).unwrap(); + + // Verify we can still get witnesses for account2 and account3 and verify against roots + let witness1_after = forest + .get_storage_map_witness(account2, &slot_name, block51, key1) + .expect("Account2 should still have accessible storage map after pruning account1"); + let witness2_after = forest + .get_storage_map_witness(account2, &slot_name, block51, key1) + .expect("Account2 should still have accessible storage map after pruning account1"); + let witness3_after = forest + .get_storage_map_witness(account3, &slot_name, block52, key1) + .expect("Account3 should still have accessible storage map after pruning account1"); + + // Verify witnesses against storage map roots + let proof1: SmtProof = witness1_after.into(); + assert_eq!(proof1.compute_root(), account1_root,); + let proof2: SmtProof = witness2_after.into(); + assert_eq!(proof2.compute_root(), account2_root,); + let proof3: SmtProof = witness3_after.into(); + assert_eq!(proof3.compute_root(), account3_root,); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_retains_latest_after_100_blocks_and_pruning() { + use std::collections::BTreeMap; + + use miden_node_proto::domain::account::StorageMapEntries; + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + use crate::inner_forest::{HISTORICAL_BLOCK_RETENTION, InnerForest}; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let slot_map = StorageSlotName::mock(1); + + let key1 = num_to_word(100); + let key2 = num_to_word(200); + let value1 = num_to_word(1000); + let value2 = num_to_word(2000); + + // Block 1: Apply initial update with vault and storage + let block_1 = BlockNumber::from(1); + + // Create storage map with two entries + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key1, value1); + map_delta.insert(key2, value2); + + let raw = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + // Create vault with one asset + let asset = FungibleAsset::new(faucet_id, 100).unwrap(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset.into()).unwrap(); + + let delta_1 = AccountDelta::new(account_id, storage_delta, vault_delta, Felt::ONE).unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + // Capture the roots from block 1 + let initial_vault_root = forest.get_vault_root(account_id, block_1).unwrap(); + let initial_storage_map_root = + forest.get_storage_map_root(account_id, &slot_map, block_1).unwrap(); + + // Blocks 2-100: Do nothing (no updates to this account) + // Simulate other activity by just advancing to block 100 + + let block_100 = BlockNumber::from(100); + + // Before pruning, verify we can still query block 1's data at block 100 + // (range query finds most recent at or before block 100) + let vault_root_before_prune = forest.get_vault_root(account_id, block_100); + assert_eq!( + vault_root_before_prune, + Some(initial_vault_root), + "Before pruning, should find block 1's vault root when querying at block 100" + ); + + let storage_root_before_prune = forest.get_storage_map_root(account_id, &slot_map, block_100); + assert_eq!( + storage_root_before_prune, + Some(initial_storage_map_root), + "Before pruning, should find block 1's storage root when querying at block 100" + ); + + // Prune at block 100 + // Block 1 is 99 blocks old, BUT it's the most recent entry for this account + // so it should NOT be pruned + let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); + + let cutoff_block = 100 - HISTORICAL_BLOCK_RETENTION; + assert_eq!(cutoff_block, 50, "Cutoff should be block 50 (100 - HISTORICAL_BLOCK_RETENTION)"); + assert_eq!( + vault_roots_removed, 0, + "Should NOT prune block 1 vault root (it's the most recent for this account)" + ); + assert_eq!( + storage_roots_removed, 0, + "Should NOT prune block 1 storage root (it's the most recent for this account/slot)" + ); + + // After pruning, we should STILL be able to access block 1's data + // because it's the most recent entry for this account + let vault_root_after_prune = forest.get_vault_root(account_id, block_100); + assert_eq!( + vault_root_after_prune, + Some(initial_vault_root), + "After pruning, should still find vault root (block 1 preserved as most recent)" + ); + + let storage_root_after_prune = forest.get_storage_map_root(account_id, &slot_map, block_100); + assert_eq!( + storage_root_after_prune, + Some(initial_storage_map_root), + "After pruning, should still find storage root (block 1 preserved as most recent)" + ); + + // Verify we can still get witnesses and entries and verify against root + let witness = forest + .get_storage_map_witness(account_id, &slot_map, block_100, key1) + .expect("Should be able to get witness for key1 after pruning"); + + let storage_root = forest.get_storage_map_root(account_id, &slot_map, block_100).unwrap(); + let proof: SmtProof = witness.into(); + assert_eq!(proof.compute_root(), storage_root, "Witness must verify against storage root"); + + let entries = forest + .get_storage_map_details_full_from_cache(account_id, slot_map.clone(), block_1) + .expect("Should have storage map entries after pruning"); + assert_matches!(&entries.entries, StorageMapEntries::AllEntries(entries) => { + assert_eq!(entries.len(), 2, "Should have 2 entries (key1 and key2)"); + assert!(entries.contains(&(key1, value1)), "Should contain key1 with value1"); + assert!(entries.contains(&(key2, value2)), "Should contain key2 with value2"); + }); + + // Now add an update at block 51 (within retention window) to test that old entries + // get pruned when newer entries exist + let block_51 = BlockNumber::from(51); + + // Update with new values + let value1_new = num_to_word(3000); + let mut map_delta_51 = StorageMapDelta::default(); + map_delta_51.insert(key1, value1_new); + + let raw_51 = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta_51))]); + let storage_delta_51 = AccountStorageDelta::from_raw(raw_51); + + let asset_51 = FungibleAsset::new(faucet_id, 200).unwrap(); + let mut vault_delta_51 = AccountVaultDelta::default(); + vault_delta_51.add_asset(asset_51.into()).unwrap(); + + let delta_51 = + AccountDelta::new(account_id, storage_delta_51, vault_delta_51, Felt::new(51)).unwrap(); + + forest.update_account(block_51, &delta_51).unwrap(); + + // Prune again at block 100 + let (vault_roots_removed_2, storage_roots_removed_2) = forest.prune(block_100); + + // Now block 1 should be pruned because there's a newer entry at block 51 + assert_eq!(vault_roots_removed_2, 1, "Should prune block 1 vault root (block 51 is newer)"); + assert_eq!( + storage_roots_removed_2, 1, + "Should prune block 1 storage root (block 51 is newer)" + ); + + // Now verify we can access the account state at block 100 + // (should find block 51's entry via range query) + let vault_root_at_100 = forest + .get_vault_root(account_id, block_100) + .expect("Should find vault root at block 100 (from block 51 entry)"); + + let _storage_root_at_100 = forest + .get_storage_map_root(account_id, &slot_map, block_100) + .expect("Should find storage root at block 100 (from block 51 entry)"); + + // The roots should be different from initial (state changed at block 51) + assert_ne!( + vault_root_at_100, initial_vault_root, + "Vault root should differ from initial (updated at block 51)" + ); + + // Verify we can get witnesses and entries for the updated state and verify against root + let witness = forest + .get_storage_map_witness(account_id, &slot_map, block_100, key1) + .expect("Should be able to get witness for key1"); + + let storage_root = forest.get_storage_map_root(account_id, &slot_map, block_100).unwrap(); + let proof: SmtProof = witness.into(); + assert_eq!(proof.compute_root(), storage_root, "Witness must verify against storage root"); + + let entries = forest + .get_storage_map_details_full_from_cache(account_id, slot_map.clone(), block_51) + .expect("Should have storage map entries"); + + match &entries.entries { + StorageMapEntries::AllEntries(entries) => { + assert_eq!(entries.len(), 2, "Should have 2 entries (key1 updated, key2 from block 1)"); + assert!( + entries.contains(&(key1, value1_new)), + "Should contain key1 with updated value" + ); + assert!( + entries.contains(&(key2, value2)), + "Should contain key2 with original value from block 1" + ); + }, + _ => panic!("Expected AllEntries"), + } + + // Verify querying at block 51 still works + let vault_root_at_51 = forest + .get_vault_root(account_id, block_51) + .expect("Should have vault root at block 51"); + assert_eq!(vault_root_at_51, vault_root_at_100); + + // Verify block 1 is no longer accessible + let vault_root_at_1 = forest.get_vault_root(account_id, block_1); + assert!( + vault_root_at_1.is_none(), + "Block 1 should not be accessible after pruning (block 51 is newer)" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_preserves_most_recent_vault_only() { + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + // Block 1: Create vault with asset + let block_1 = BlockNumber::from(1); + let asset = FungibleAsset::new(faucet_id, 500).unwrap(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset.into()).unwrap(); + + let delta_1 = + AccountDelta::new(account_id, AccountStorageDelta::default(), vault_delta, Felt::ONE) + .unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + let initial_vault_root = forest.get_vault_root(account_id, block_1).unwrap(); + + // Advance 100 blocks without any updates + let block_100 = BlockNumber::from(100); + + // Prune at block 100 + let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); + + // Vault from block 1 should NOT be pruned (it's the most recent) + assert_eq!( + vault_roots_removed, 0, + "Should NOT prune vault root (it's the most recent for this account)" + ); + assert_eq!(storage_roots_removed, 0, "No storage roots to prune"); + + // Verify vault is still accessible at block 100 + let vault_root_at_100 = forest + .get_vault_root(account_id, block_100) + .expect("Should still have vault root at block 100"); + assert_eq!(vault_root_at_100, initial_vault_root, "Vault root should be preserved"); + + // Verify we can get witnesses for the vault and verify against vault root + let witnesses = forest + .get_vault_asset_witnesses( + account_id, + block_100, + [AssetVaultKey::new_unchecked(asset.vault_key().into())].into(), + ) + .expect("Should be able to get vault witness after pruning"); + + assert_eq!(witnesses.len(), 1, "Should have one witness"); + let witness = &witnesses[0]; + let proof: SmtProof = witness.clone().into(); + assert_eq!( + proof.compute_root(), + vault_root_at_100, + "Vault witness must verify against vault root" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_preserves_most_recent_storage_map_only() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + + let slot_map = StorageSlotName::mock(1); + let key1 = num_to_word(100); + let value1 = num_to_word(1000); + + // Block 1: Create storage map + let block_1 = BlockNumber::from(1); + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key1, value1); + + let raw = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + let delta_1 = + AccountDelta::new(account_id, storage_delta, AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + let initial_storage_root = forest.get_storage_map_root(account_id, &slot_map, block_1).unwrap(); + + // Advance 100 blocks without any updates + let block_100 = BlockNumber::from(100); + + // Prune at block 100 + let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); + + // Storage map from block 1 should NOT be pruned (it's the most recent) + assert_eq!(vault_roots_removed, 0, "No vault roots to prune"); + assert_eq!( + storage_roots_removed, 0, + "Should NOT prune storage map root (it's the most recent for this account/slot)" + ); + + // Verify storage map is still accessible at block 100 + let storage_root_at_100 = forest + .get_storage_map_root(account_id, &slot_map, block_100) + .expect("Should still have storage root at block 100"); + assert_eq!(storage_root_at_100, initial_storage_root, "Storage root should be preserved"); + + // Verify we can get witnesses for the storage map and verify against storage root + let witness = forest + .get_storage_map_witness(account_id, &slot_map, block_100, key1) + .expect("Should be able to get storage witness after pruning"); + + let proof: SmtProof = witness.into(); + assert_eq!( + proof.compute_root(), + storage_root_at_100, + "Storage witness must verify against storage root" + ); + + // Verify we can get all entries + let entries = forest + .get_storage_map_details_full_from_cache(account_id, slot_map.clone(), block_1) + .expect("Should have storage entries after pruning"); + + match &entries.entries { + miden_node_proto::domain::account::StorageMapEntries::AllEntries(entries) => { + assert_eq!(entries.len(), 1, "Should have 1 entry"); + assert_eq!(entries[0], (key1, value1), "Entry should match"); + }, + _ => panic!("Expected AllEntries"), + } +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_preserves_most_recent_storage_value_slot() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::StorageSlotDelta; + + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + + let slot_value = StorageSlotName::mock(1); + let value1 = num_to_word(5000); + + // Block 1: Create storage value slot + let block_1 = BlockNumber::from(1); + + let raw = BTreeMap::from_iter([(slot_value.clone(), StorageSlotDelta::Value(value1))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + let delta_1 = + AccountDelta::new(account_id, storage_delta, AccountVaultDelta::default(), Felt::ONE) + .unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + // Note: Value slots don't have roots in InnerForest - they're just part of the + // account storage header. The InnerForest only tracks map slots. + // So there's nothing to verify for value slots in the forest. + + // This test documents that value slots are NOT tracked in InnerForest + // (they don't need to be, since their digest is 1:1 with the value) + + // Advance 100 blocks without any updates + let block_100 = BlockNumber::from(100); + + // Prune at block 100 + let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); + + // No roots should be pruned because there are no map slots + assert_eq!(vault_roots_removed, 0, "No vault roots in this test"); + assert_eq!( + storage_roots_removed, 0, + "Value slots don't create storage roots in InnerForest" + ); + + // Verify no storage map roots exist for this account + let storage_root = forest.get_storage_map_root(account_id, &slot_value, block_100); + assert!( + storage_root.is_none(), + "Value slots don't have storage map roots in InnerForest" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn inner_forest_preserves_mixed_slots_independently() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + use crate::inner_forest::InnerForest; + + let mut forest = InnerForest::new(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let slot_map_a = StorageSlotName::mock(1); + let slot_map_b = StorageSlotName::mock(2); + let slot_value = StorageSlotName::mock(3); + + let key1 = num_to_word(100); + let value1 = num_to_word(1000); + let value_slot_data = num_to_word(5000); + + // Block 1: Create vault + two map slots + one value slot + let block_1 = BlockNumber::from(1); + + let asset = FungibleAsset::new(faucet_id, 100).unwrap(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(asset.into()).unwrap(); + + let mut map_delta_a = StorageMapDelta::default(); + map_delta_a.insert(key1, value1); + + let mut map_delta_b = StorageMapDelta::default(); + map_delta_b.insert(key1, value1); + + let raw = BTreeMap::from_iter([ + (slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a)), + (slot_map_b.clone(), StorageSlotDelta::Map(map_delta_b)), + (slot_value.clone(), StorageSlotDelta::Value(value_slot_data)), + ]); + let storage_delta = AccountStorageDelta::from_raw(raw); + + let delta_1 = AccountDelta::new(account_id, storage_delta, vault_delta, Felt::ONE).unwrap(); + + forest.update_account(block_1, &delta_1).unwrap(); + + let initial_vault_root = forest.get_vault_root(account_id, block_1).unwrap(); + let initial_map_a_root = forest.get_storage_map_root(account_id, &slot_map_a, block_1).unwrap(); + let initial_map_b_root = forest.get_storage_map_root(account_id, &slot_map_b, block_1).unwrap(); + + // Block 51: Update only map_a (within retention window) + let block_51 = BlockNumber::from(51); + let value2 = num_to_word(2000); + + let mut map_delta_a_update = StorageMapDelta::default(); + map_delta_a_update.insert(key1, value2); + + let raw_51 = + BTreeMap::from_iter([(slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a_update))]); + let storage_delta_51 = AccountStorageDelta::from_raw(raw_51); + + let delta_51 = AccountDelta::new( + account_id, + storage_delta_51, + AccountVaultDelta::default(), + Felt::new(51), + ) + .unwrap(); + + forest.update_account(block_51, &delta_51).unwrap(); + + // Advance to block 100 + let block_100 = BlockNumber::from(100); + + // Prune at block 100 + let (vault_roots_removed, storage_roots_removed) = forest.prune(block_100); + + // Vault: block 1 is most recent, should NOT be pruned + // Map A: block 1 is old (block 51 is newer), SHOULD be pruned + // Map B: block 1 is most recent, should NOT be pruned + assert_eq!( + vault_roots_removed, 0, + "Vault root from block 1 should NOT be pruned (most recent)" + ); + assert_eq!( + storage_roots_removed, 0, + "Map A from block 1 should be pruned (block 51 is newer); Map B should NOT" + ); + + // Verify vault is still accessible + let vault_root_at_100 = forest + .get_vault_root(account_id, block_100) + .expect("Vault should be accessible"); + assert_eq!(vault_root_at_100, initial_vault_root, "Vault should be from block 1"); + + // Verify map_a is accessible (from block 51) + let map_a_root_at_100 = forest + .get_storage_map_root(account_id, &slot_map_a, block_100) + .expect("Map A should be accessible"); + assert_ne!( + map_a_root_at_100, initial_map_a_root, + "Map A should be from block 51, not block 1" + ); + + // Verify map_b is still accessible (from block 1) + let map_b_root_at_100 = forest + .get_storage_map_root(account_id, &slot_map_b, block_100) + .expect("Map B should be accessible"); + assert_eq!( + map_b_root_at_100, initial_map_b_root, + "Map B should still be from block 1 (most recent)" + ); + + // Verify map_a block 1 is no longer accessible + let map_a_root_at_1 = forest.get_storage_map_root(account_id, &slot_map_a, block_1); + assert!(map_a_root_at_1.is_none(), "Map A block 1 should be pruned"); + + // Verify map_b block 1 IS still accessible + let map_b_root_at_1 = forest.get_storage_map_root(account_id, &slot_map_b, block_1); + assert!(map_b_root_at_1.is_some(), "Map B block 1 should NOT be pruned (most recent)"); +} diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 042986406..5c86ef1aa 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -1,5 +1,7 @@ -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use std::num::NonZeroUsize; +use lru::LruCache; use miden_node_proto::domain::account::{AccountStorageMapDetails, StorageMapEntries}; use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountVaultDelta}; use miden_protocol::account::{ @@ -16,10 +18,24 @@ use miden_protocol::crypto::merkle::{EmptySubtreeRoots, MerkleError}; use miden_protocol::errors::{AssetError, StorageMapError}; use miden_protocol::{EMPTY_WORD, Word}; use thiserror::Error; +use tracing::instrument; + +use crate::COMPONENT; #[cfg(test)] mod tests; +// CONSTANTS +// ================================================================================================ + +/// Number of historical blocks to retain in the in-memory forest. +/// Entries older than `chain_tip - HISTORICAL_BLOCK_RETENTION` will be pruned. +pub const HISTORICAL_BLOCK_RETENTION: u32 = 50; + +/// Default size for the LRU cache of latest storage map entries. +/// Used to serve `SlotData::All` queries for the most recent block. +const DEFAULT_STORAGE_CACHE_ENTRIES_SIZE: usize = 5; + // ERRORS // ================================================================================================ @@ -52,6 +68,12 @@ pub enum WitnessError { // INNER FOREST // ================================================================================================ +/// Snapshot of storage map entries at a specific block. +struct StorageSnapshot { + block_num: BlockNumber, + entries: BTreeMap, +} + /// Container for forest-related state that needs to be updated atomically. pub(crate) struct InnerForest { /// `SmtForest` for efficient account storage reconstruction. @@ -60,15 +82,34 @@ pub(crate) struct InnerForest { /// Maps (`account_id`, `slot_name`, `block_num`) to SMT root. /// Populated during block import for all storage map slots. + /// + /// Used for `SlotData::MapKeys` queries (SMT proof generation). + /// Works for all historical blocks within retention window. + /// + /// Attention: Must be a `BTreeMap`, since not every block contains a value here, so we need to + /// be able to query the previous blocks cheaply. storage_map_roots: BTreeMap<(AccountId, StorageSlotName, BlockNumber), Word>, - /// Maps (`account_id`, `slot_name`, `block_num`) to all key-value entries in that storage map. - /// Accumulated from deltas - each block's entries include all entries up to that point. - storage_entries: BTreeMap<(AccountId, StorageSlotName, BlockNumber), BTreeMap>, + /// LRU cache of latest storage map entries for `SlotData::All` queries. + /// Only stores the most recent snapshot per (account, slot). + /// Historical queries fall back to DB. + storage_entries_per_account_per_slot: LruCache<(AccountId, StorageSlotName), StorageSnapshot>, + + vault_refcount: HashMap, + storage_slots_refcount: HashMap, /// Maps (`account_id`, `block_num`) to vault SMT root. /// Tracks asset vault versions across all blocks with structural sharing. + /// + /// Attention: Must be a `BTreeMap`, since not every block contains a value here, so we need to + /// be able to query the previous blocks cheaply. vault_roots: BTreeMap<(AccountId, BlockNumber), Word>, + + /// Tracks vault roots by block number for pruning. + vault_roots_by_block: BTreeMap>, + + /// Tracks storage map roots by block number for pruning. + storage_slots_by_block: BTreeMap>, } impl InnerForest { @@ -76,8 +117,14 @@ impl InnerForest { Self { forest: SmtForest::new(), storage_map_roots: BTreeMap::new(), - storage_entries: BTreeMap::new(), + storage_entries_per_account_per_slot: LruCache::new( + NonZeroUsize::new(DEFAULT_STORAGE_CACHE_ENTRIES_SIZE).unwrap(), + ), + vault_refcount: HashMap::new(), + storage_slots_refcount: HashMap::new(), vault_roots: BTreeMap::new(), + vault_roots_by_block: BTreeMap::new(), + storage_slots_by_block: BTreeMap::new(), } } @@ -89,6 +136,25 @@ impl InnerForest { *EmptySubtreeRoots::entry(SMT_DEPTH, 0) } + /// Decrement the reference count in the given map. + /// + /// Returns `true` if the refcount reached zero. + fn decrement_refcount(map: &mut HashMap, root: Word) -> bool { + let Some(count) = map.get_mut(&root) else { + return false; + }; + if *count == 1 { + map.remove(&root); + true + } else { + *count -= 1; + false + } + } + + // ACCESSORS + // -------------------------------------------------------------------------------------------- + /// Retrieves a vault root for the specified account at or before the specified block. pub(crate) fn get_vault_root( &self, @@ -117,6 +183,9 @@ impl InnerForest { .map(|(_, root)| *root) } + // WITNESSES and PROOFS + // -------------------------------------------------------------------------------------------- + /// Retrieves a storage map witness for the specified account and storage slot. /// /// Finds the most recent witness at or before the specified block number. @@ -163,7 +232,7 @@ impl InnerForest { /// /// Returns `None` if no storage root is tracked for this account/slot/block combination. /// Returns a `MerkleError` if the forest doesn't contain sufficient data for the proofs. - pub(crate) fn open_storage_map( + pub(crate) fn get_storage_map_details_for_keys( &self, account_id: AccountId, slot_name: StorageSlotName, @@ -172,7 +241,6 @@ impl InnerForest { ) -> Option> { let root = self.get_storage_map_root(account_id, &slot_name, block_num)?; - // Collect SMT proofs for each key let proofs = Result::from_iter(raw_keys.iter().map(|raw_key| { let key = StorageMap::hash_key(*raw_key); self.forest.open(root, key) @@ -181,38 +249,52 @@ impl InnerForest { Some(proofs.map(|proofs| AccountStorageMapDetails::from_proofs(slot_name, proofs))) } - /// Returns all key-value entries for a specific account storage slot at or before a block. + /// Returns all key-value entries for a specific account storage slot at the latest cached + /// block. Historical queries fall back to DB reconstruction. + /// + /// Returns `None` if: + /// - No entries exist for this account/slot + /// - Query is for a historical block (not the most recent) /// - /// Uses range query semantics: finds the most recent entries at or before `block_num`. - /// Returns `None` if no entries exist for this account/slot up to the given block. /// Returns `LimitExceeded` if there are too many entries to return. - pub(crate) fn storage_map_entries( - &self, + pub(crate) fn get_storage_map_details_full_from_cache( + &mut self, account_id: AccountId, slot_name: StorageSlotName, block_num: BlockNumber, ) -> Option { - // Find the most recent entries at or before block_num - let entries = self - .storage_entries - .range( - (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..=(account_id, slot_name.clone(), block_num), - ) - .next_back() - .map(|(_, entries)| entries)?; + // Get cached snapshot + let snapshot = self + .storage_entries_per_account_per_slot + .get(&(account_id, slot_name.clone()))?; + + if snapshot.block_num != block_num { + return None; + } - if entries.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { + if snapshot.entries.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { return Some(AccountStorageMapDetails { slot_name, entries: StorageMapEntries::LimitExceeded, }); } - let entries = Vec::from_iter(entries.iter().map(|(k, v)| (*k, *v))); + let entries = Vec::from_iter(snapshot.entries.iter().map(|(k, v)| (*k, *v))); Some(AccountStorageMapDetails::from_forest_entries(slot_name, entries)) } + pub(crate) fn cache_storage_map_entries( + &mut self, + account_id: AccountId, + slot_name: StorageSlotName, + block_num: BlockNumber, + entries: Vec<(Word, Word)>, + ) { + let entries = BTreeMap::from_iter(entries); + self.storage_entries_per_account_per_slot + .put((account_id, slot_name), StorageSnapshot { block_num, entries }); + } + // PUBLIC INTERFACE // -------------------------------------------------------------------------------------------- @@ -229,6 +311,7 @@ impl InnerForest { /// # Errors /// /// Returns an error if applying a vault delta results in a negative balance. + #[instrument(target = COMPONENT, skip_all, fields(block.number = %block_num))] pub(crate) fn apply_block_updates( &mut self, block_num: BlockNumber, @@ -245,6 +328,9 @@ impl InnerForest { "Updated forest with account delta" ); } + + let _ = self.prune(block_num); + Ok(()) } @@ -311,7 +397,7 @@ impl InnerForest { // so that the map has entries for all accounts, and then return (i.e., no need to insert // anything into the forest) if delta.is_empty() { - self.vault_roots.insert((account_id, block_num), prev_root); + self.track_vault_root(block_num, account_id, prev_root); return; } @@ -339,7 +425,7 @@ impl InnerForest { .batch_insert(prev_root, entries) .expect("forest insertion should succeed"); - self.vault_roots.insert((account_id, block_num), new_root); + self.track_vault_root(block_num, account_id, new_root); tracing::debug!( target: crate::COMPONENT, @@ -350,6 +436,12 @@ impl InnerForest { ); } + fn track_vault_root(&mut self, block_num: BlockNumber, account_id: AccountId, new_root: Word) { + self.vault_roots.insert((account_id, block_num), new_root); + self.vault_roots_by_block.entry(block_num).or_default().push(account_id); + *self.vault_refcount.entry(new_root).or_insert(0) += 1; + } + /// Updates the forest with vault changes from a delta. The vault delta is assumed to be /// non-empty. /// @@ -416,7 +508,6 @@ impl InnerForest { entries.push((asset.vault_key().into(), value)); } - assert!(!entries.is_empty(), "non-empty delta should contain entries"); let num_entries = entries.len(); let new_root = self @@ -424,7 +515,7 @@ impl InnerForest { .batch_insert(prev_root, entries) .expect("forest insertion should succeed"); - self.vault_roots.insert((account_id, block_num), new_root); + self.track_vault_root(block_num, account_id, new_root); tracing::debug!( target: crate::COMPONENT, @@ -455,23 +546,6 @@ impl InnerForest { .map_or_else(Self::empty_smt_root, |(_, root)| *root) } - /// Retrieves the most recent entries in the specified storage map. If no storage map exists - /// returns an empty map. - fn get_latest_storage_map_entries( - &self, - account_id: AccountId, - slot_name: &StorageSlotName, - ) -> BTreeMap { - self.storage_entries - .range( - (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), - ) - .next_back() - .map(|(_, entries)| entries.clone()) - .unwrap_or_default() - } - /// Inserts all storage maps from the provided storage delta into the forest. /// /// Assumes that storage maps for the provided account are not in the forest already. @@ -502,22 +576,26 @@ impl InnerForest { .collect(); // if the delta is empty, make sure we create an entry in the storage map roots map - // and storage entries map (so storage_map_entries() queries work) + // and update the cache if raw_map_entries.is_empty() { - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), prev_root); - self.storage_entries - .insert((account_id, slot_name.clone(), block_num), BTreeMap::new()); + self.track_storage_map_slot_root(block_num, account_id, slot_name, prev_root); + + // Update cache with empty map + self.storage_entries_per_account_per_slot.put( + (account_id, slot_name.clone()), + StorageSnapshot { block_num, entries: BTreeMap::new() }, + ); continue; } - // hash the keys before inserting into the forest, matching how `StorageMap` - // hashes keys before inserting into the SMT. - let hashed_entries: Vec<(Word, Word)> = raw_map_entries + let hashed_entries = raw_map_entries .iter() - .map(|(key, value)| (StorageMap::hash_key(*key), *value)) - .collect(); + .map(|(raw_key, value)| { + let hashed_key = StorageMap::hash_key(*raw_key); + (hashed_key, *value) + }) + .collect::>(); // insert the updates into the forest and update storage map roots map let new_root = self @@ -525,18 +603,15 @@ impl InnerForest { .batch_insert(prev_root, hashed_entries.iter().copied()) .expect("forest insertion should succeed"); - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), new_root); + self.track_storage_map_slot_root(block_num, account_id, slot_name, new_root); - assert!(!raw_map_entries.is_empty(), "a non-empty delta should have entries"); let num_entries = raw_map_entries.len(); - // keep track of the state of storage map entries (using raw keys for delta merging) - // TODO: this is a temporary solution until the LargeSmtForest is implemented as - // tracking multiple versions of all storage maps will be prohibitively expensive - let map_entries = BTreeMap::from_iter(raw_map_entries); - self.storage_entries - .insert((account_id, slot_name.clone(), block_num), map_entries); + // Keep track of the state of storage map entries (using raw keys for delta merging) + // Update cache with the entries from this insertion + let entries = BTreeMap::from_iter(raw_map_entries); + self.storage_entries_per_account_per_slot + .put((account_id, slot_name.clone()), StorageSnapshot { block_num, entries }); tracing::debug!( target: crate::COMPONENT, @@ -549,6 +624,22 @@ impl InnerForest { } } + fn track_storage_map_slot_root( + &mut self, + block_num: BlockNumber, + account_id: AccountId, + slot_name: &StorageSlotName, + new_root: Word, + ) { + self.storage_map_roots + .insert((account_id, slot_name.clone(), block_num), new_root); + self.storage_slots_by_block + .entry(block_num) + .or_default() + .push((account_id, slot_name.clone())); + *self.storage_slots_refcount.entry(new_root).or_insert(0) += 1; + } + /// Updates the forest with storage map changes from a delta. /// /// Processes storage map slot deltas, building SMTs for each modified slot and tracking the @@ -559,8 +650,6 @@ impl InnerForest { account_id: AccountId, delta: &AccountStorageDelta, ) { - assert!(!delta.is_empty(), "expected the delta not to be empty"); - for (slot_name, map_delta) in delta.maps() { // map delta shouldn't be empty, but if it is for some reason, there is nothing to do if map_delta.is_empty() { @@ -572,35 +661,28 @@ impl InnerForest { let delta_entries: Vec<(Word, Word)> = map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)).collect(); - // Hash the keys before inserting into the forest, matching how StorageMap - // hashes keys before inserting into the SMT. - let hashed_entries: Vec<(Word, Word)> = delta_entries + let hashed_entries = delta_entries .iter() - .map(|(key, value)| (StorageMap::hash_key(*key), *value)) - .collect(); + .map(|(raw_key, value)| { + let hashed_key = StorageMap::hash_key(*raw_key); + (hashed_key, *value) + }) + .collect::>(); let new_root = self .forest .batch_insert(prev_root, hashed_entries.iter().copied()) .expect("forest insertion should succeed"); - self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), new_root); - - // merge the delta with the latest entries in the map (using raw keys) - // TODO: this is a temporary solution until the LargeSmtForest is implemented as - // tracking multiple versions of all storage maps will be prohibitively expensive - let mut latest_entries = self.get_latest_storage_map_entries(account_id, slot_name); - for (key, value) in &delta_entries { - if *value == EMPTY_WORD { - latest_entries.remove(key); - } else { - latest_entries.insert(*key, *value); - } - } + self.track_storage_map_slot_root(block_num, account_id, slot_name, new_root); - self.storage_entries - .insert((account_id, slot_name.clone(), block_num), latest_entries); + self.update_storage_map_slot_cache_entry( + block_num, + account_id, + slot_name, + &delta_entries, + prev_root, + ); tracing::debug!( target: crate::COMPONENT, @@ -612,4 +694,171 @@ impl InnerForest { ); } } + + /// Update the storage map using the given set of key-value-entries. + fn update_storage_map_slot_cache_entry( + &mut self, + block_num: BlockNumber, + account_id: AccountId, + slot_name: &StorageSlotName, + delta_entries: &Vec<(Word, Word)>, + prev_root: Word, + ) { + // Update cache by merging delta with latest entries + let key = (account_id, slot_name.clone()); + let Some(mut latest_entries) = self + .storage_entries_per_account_per_slot + .get(&key) + .map(|snapshot| snapshot.entries.clone()) + .or_else(|| { + if prev_root == Self::empty_smt_root() { + Some(BTreeMap::new()) + } else { + None + } + }) + else { + return; + }; + + for (k, v) in delta_entries { + if *v == EMPTY_WORD { + latest_entries.remove(k); + } else { + latest_entries.insert(*k, *v); + } + } + + self.storage_entries_per_account_per_slot + .put(key, StorageSnapshot { block_num, entries: latest_entries }); + } + + // PRUNING + // -------------------------------------------------------------------------------------------- + + /// Prunes old entries from the in-memory forest data structures. + /// + /// Only iterates over blocks in the pruning window (before cutoff). For each affected account + /// or slot, checks if there's a newer entry before pruning - preserving the most recent state. + /// + /// The `SmtForest` itself is not pruned directly as it uses structural sharing and old roots + /// are naturally garbage-collected when they become unreachable. + /// + /// Note: Returns (`vault_roots_removed`, `storage_roots_removed`). Storage entries count is + /// no longer tracked since we use an LRU cache. + #[instrument(target = COMPONENT, skip_all, fields(block.number = %chain_tip), ret)] + pub(crate) fn prune(&mut self, chain_tip: BlockNumber) -> (usize, usize) { + let cutoff_block = + BlockNumber::from(chain_tip.as_u32().saturating_sub(HISTORICAL_BLOCK_RETENTION)); + + let vault_roots_removed = self.prune_vault_roots(cutoff_block); + let storage_roots_removed = self.prune_storage_roots(cutoff_block); + + // Cache is self-pruning via LRU eviction + (vault_roots_removed, storage_roots_removed) + } + + /// Prunes vault roots beyond the cutoff block. + /// + /// Only iterates over blocks in the pruning window, then for each affected account checks + /// if there's a newer entry before pruning. + fn prune_vault_roots(&mut self, cutoff_block: BlockNumber) -> usize { + // Get blocks to prune (only blocks before cutoff) + let blocks_to_check: Vec = self + .vault_roots_by_block + .range(..=cutoff_block) + .map(|(block, _)| *block) + .collect(); + + let mut roots_to_prune = HashSet::new(); + + for block in blocks_to_check { + let Some(accounts) = self.vault_roots_by_block.remove(&block) else { + continue; + }; + + let mut accounts_to_keep = Vec::new(); + + for account_id in accounts { + // Check if there's a newer entry for this account + let has_newer_entry = self + .vault_roots + .range((account_id, block.child())..=(account_id, BlockNumber::from(u32::MAX))) + .next() + .is_some(); + + if has_newer_entry { + if let Some(root) = self.vault_roots.remove(&(account_id, block)) { + if Self::decrement_refcount(&mut self.vault_refcount, root) { + roots_to_prune.insert(root); + } + } + } else { + accounts_to_keep.push(account_id); + } + } + + if !accounts_to_keep.is_empty() { + self.vault_roots_by_block.insert(block, accounts_to_keep); + } + } + + let roots_removed = roots_to_prune.len(); + self.forest.pop_smts(roots_to_prune); + roots_removed + } + + /// Prunes storage map roots older than/before the cutoff block. + /// + /// Only iterates over blocks in the pruning window, then for each affected slot checks + /// if there's a newer entry before pruning. + fn prune_storage_roots(&mut self, cutoff_block: BlockNumber) -> usize { + // Get blocks to prune (only blocks before cutoff) + let blocks_to_check: Vec = self + .storage_slots_by_block + .range(..=cutoff_block) + .map(|(block, _)| *block) + .collect(); + + let mut roots_to_prune = HashSet::new(); + + for block in blocks_to_check { + let Some(slots) = self.storage_slots_by_block.remove(&block) else { + continue; + }; + + let mut slots_to_keep = Vec::new(); + + for (account_id, slot_name) in slots { + // Check if there's a newer entry for this account/slot + let has_newer_entry = self + .storage_map_roots + .range( + (account_id, slot_name.clone(), block.child()) + ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ) + .next() + .is_some(); + + if has_newer_entry { + let key = (account_id, slot_name.clone(), block); + if let Some(root) = self.storage_map_roots.remove(&key) { + if Self::decrement_refcount(&mut self.storage_slots_refcount, root) { + roots_to_prune.insert(root); + } + } + } else { + slots_to_keep.push((account_id, slot_name)); + } + } + + if !slots_to_keep.is_empty() { + self.storage_slots_by_block.insert(block, slots_to_keep); + } + } + + let roots_removed = roots_to_prune.len(); + self.forest.pop_smts(roots_to_prune); + roots_removed + } } diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 5fc0cc6c0..4a27701ed 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -1,8 +1,11 @@ +use assert_matches::assert_matches; use miden_protocol::account::AccountCode; -use miden_protocol::asset::{Asset, AssetVault, FungibleAsset}; +use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; +use miden_protocol::crypto::merkle::smt::SmtProof; use miden_protocol::testing::account_id::{ ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, + ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, }; use miden_protocol::{Felt, FieldElement}; @@ -20,13 +23,16 @@ fn dummy_fungible_asset(faucet_id: AccountId, amount: u64) -> Asset { FungibleAsset::new(faucet_id, amount).unwrap().into() } +fn num_to_word(n: u64) -> Word { + [Felt::ZERO, Felt::ZERO, Felt::ZERO, Felt::new(n)].into() +} + /// Creates a partial `AccountDelta` (without code) for testing incremental updates. fn dummy_partial_delta( account_id: AccountId, vault_delta: AccountVaultDelta, storage_delta: AccountStorageDelta, ) -> AccountDelta { - // For partial deltas, nonce_delta must be > 0 if there are changes let nonce_delta = if vault_delta.is_empty() && storage_delta.is_empty() { Felt::ZERO } else { @@ -39,43 +45,41 @@ fn dummy_partial_delta( fn dummy_full_state_delta(account_id: AccountId, assets: &[Asset]) -> AccountDelta { use miden_protocol::account::{Account, AccountStorage}; - // Create a minimal account with the given assets let vault = AssetVault::new(assets).unwrap(); let storage = AccountStorage::new(vec![]).unwrap(); let code = AccountCode::mock(); let nonce = Felt::ONE; let account = Account::new(account_id, vault, storage, code, nonce, None).unwrap(); - - // Convert to delta - this will be a full-state delta because it has code AccountDelta::try_from(account).unwrap() } +// INITIALIZATION & BASIC OPERATIONS +// ================================================================================================ + #[test] -fn test_empty_smt_root_is_recognized() { +fn empty_smt_root_is_recognized() { use miden_protocol::crypto::merkle::smt::Smt; let empty_root = InnerForest::empty_smt_root(); - // Verify an empty SMT has the expected root assert_eq!(Smt::default().root(), empty_root); - // Test that SmtForest accepts this root in batch_insert let mut forest = SmtForest::new(); let entries = vec![(Word::from([1u32, 2, 3, 4]), Word::from([5u32, 6, 7, 8]))]; - assert!(forest.batch_insert(empty_root, entries).is_ok()); + assert_matches!(forest.batch_insert(empty_root, entries), Ok(_)); } #[test] -fn test_inner_forest_basic_initialization() { +fn inner_forest_basic_initialization() { let forest = InnerForest::new(); assert!(forest.storage_map_roots.is_empty()); assert!(forest.vault_roots.is_empty()); } #[test] -fn test_update_account_with_empty_deltas() { +fn update_account_with_empty_deltas() { let mut forest = InnerForest::new(); let account_id = dummy_account(); let block_num = BlockNumber::GENESIS.child(); @@ -88,37 +92,21 @@ fn test_update_account_with_empty_deltas() { forest.update_account(block_num, &delta).unwrap(); - // Empty deltas should not create entries assert!(!forest.vault_roots.contains_key(&(account_id, block_num))); assert!(forest.storage_map_roots.is_empty()); } -#[test] -fn test_update_vault_with_fungible_asset() { - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - let block_num = BlockNumber::GENESIS.child(); - - let asset = dummy_fungible_asset(faucet_id, 100); - let mut vault_delta = AccountVaultDelta::default(); - vault_delta.add_asset(asset).unwrap(); - - let delta = dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); - forest.update_account(block_num, &delta).unwrap(); - - let vault_root = forest.vault_roots[&(account_id, block_num)]; - assert_ne!(vault_root, EMPTY_WORD); -} +// VAULT TESTS +// ================================================================================================ #[test] -fn test_compare_partial_vs_full_state_delta_vault() { +fn vault_partial_vs_full_state_produces_same_root() { let account_id = dummy_account(); let faucet_id = dummy_faucet(); let block_num = BlockNumber::GENESIS.child(); let asset = dummy_fungible_asset(faucet_id, 100); - // Approach 1: Partial delta (simulates block application) + // Partial delta (block application) let mut forest_partial = InnerForest::new(); let mut vault_delta = AccountVaultDelta::default(); vault_delta.add_asset(asset).unwrap(); @@ -126,12 +114,11 @@ fn test_compare_partial_vs_full_state_delta_vault() { dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); forest_partial.update_account(block_num, &partial_delta).unwrap(); - // Approach 2: Full-state delta (simulates DB reconstruction) + // Full-state delta (DB reconstruction) let mut forest_full = InnerForest::new(); let full_delta = dummy_full_state_delta(account_id, &[asset]); forest_full.update_account(block_num, &full_delta).unwrap(); - // Both approaches must produce identical vault roots let root_partial = forest_partial.vault_roots.get(&(account_id, block_num)).unwrap(); let root_full = forest_full.vault_roots.get(&(account_id, block_num)).unwrap(); @@ -140,102 +127,20 @@ fn test_compare_partial_vs_full_state_delta_vault() { } #[test] -fn test_incremental_vault_updates() { - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - - // Block 1: 100 tokens - let block_1 = BlockNumber::GENESIS.child(); - let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); - let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); - forest.update_account(block_1, &delta_1).unwrap(); - let root_1 = forest.vault_roots[&(account_id, block_1)]; - - // Block 2: 150 tokens (update) - let block_2 = block_1.child(); - let mut vault_delta_2 = AccountVaultDelta::default(); - vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); - let delta_2 = dummy_partial_delta(account_id, vault_delta_2, AccountStorageDelta::default()); - forest.update_account(block_2, &delta_2).unwrap(); - let root_2 = forest.vault_roots[&(account_id, block_2)]; - - assert_ne!(root_1, root_2); -} - -#[test] -fn test_vault_state_persists_across_blocks_without_changes() { - // Regression test for issue #7: vault state should persist across blocks - // where no changes occur, not reset to empty. +fn vault_incremental_updates_with_add_and_remove() { let mut forest = InnerForest::new(); let account_id = dummy_account(); let faucet_id = dummy_faucet(); - // Helper to query vault root at or before a block (range query) - let get_vault_root = |forest: &InnerForest, account_id: AccountId, block_num: BlockNumber| { - forest - .vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) - .next_back() - .map(|(_, root)| *root) - }; - // Block 1: Add 100 tokens let block_1 = BlockNumber::GENESIS.child(); let mut vault_delta_1 = AccountVaultDelta::default(); vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); forest.update_account(block_1, &delta_1).unwrap(); - let root_after_block_1 = forest.vault_roots[&(account_id, block_1)]; - - // Blocks 2-5: No changes to this account (simulated by not calling update_account) - // This means no entries are added to vault_roots for these blocks. - - // Block 6: Add 50 more tokens - // The previous root lookup should find block_1's root, not return empty. - let block_6 = BlockNumber::from(6); - let mut vault_delta_6 = AccountVaultDelta::default(); - vault_delta_6.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); // 100 + 50 = 150 - let delta_6 = dummy_partial_delta(account_id, vault_delta_6, AccountStorageDelta::default()); - forest.update_account(block_6, &delta_6).unwrap(); - - // The root at block 6 should be different from block 1 (we added more tokens) - let root_after_block_6 = forest.vault_roots[&(account_id, block_6)]; - assert_ne!(root_after_block_1, root_after_block_6); - - // Verify range query finds the correct previous root for intermediate blocks - // Block 3 should return block 1's root (most recent before block 3) - let root_at_block_3 = get_vault_root(&forest, account_id, BlockNumber::from(3)); - assert_eq!(root_at_block_3, Some(root_after_block_1)); - - // Block 5 should also return block 1's root - let root_at_block_5 = get_vault_root(&forest, account_id, BlockNumber::from(5)); - assert_eq!(root_at_block_5, Some(root_after_block_1)); - - // Block 6 should return block 6's root - let root_at_block_6 = get_vault_root(&forest, account_id, block_6); - assert_eq!(root_at_block_6, Some(root_after_block_6)); -} - -#[test] -fn test_partial_delta_applies_fungible_changes_correctly() { - // Regression test for issue #8: partial deltas should apply changes to previous balance, - // not treat amounts as absolute values. - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - - // Block 1: Add 100 tokens (partial delta with +100) - let block_1 = BlockNumber::GENESIS.child(); - let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); - let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); - forest.update_account(block_1, &delta_1).unwrap(); let root_after_100 = forest.vault_roots[&(account_id, block_1)]; - // Block 2: Add 50 more tokens (partial delta with +50) - // Result should be 150 tokens, not 50 tokens + // Block 2: Add 50 more tokens (result: 150 tokens) let block_2 = block_1.child(); let mut vault_delta_2 = AccountVaultDelta::default(); vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 50)).unwrap(); @@ -243,11 +148,9 @@ fn test_partial_delta_applies_fungible_changes_correctly() { forest.update_account(block_2, &delta_2).unwrap(); let root_after_150 = forest.vault_roots[&(account_id, block_2)]; - // Roots should be different (100 tokens vs 150 tokens) assert_ne!(root_after_100, root_after_150); - // Block 3: Remove 30 tokens (partial delta with -30) - // Result should be 120 tokens + // Block 3: Remove 30 tokens (result: 120 tokens) let block_3 = block_2.child(); let mut vault_delta_3 = AccountVaultDelta::default(); vault_delta_3.remove_asset(dummy_fungible_asset(faucet_id, 30)).unwrap(); @@ -255,11 +158,9 @@ fn test_partial_delta_applies_fungible_changes_correctly() { forest.update_account(block_3, &delta_3).unwrap(); let root_after_120 = forest.vault_roots[&(account_id, block_3)]; - // Root should change again assert_ne!(root_after_150, root_after_120); - // Verify by creating a fresh forest with a full-state delta of 120 tokens - // The roots should match + // Verify by comparing to full-state delta let mut fresh_forest = InnerForest::new(); let full_delta = dummy_full_state_delta(account_id, &[dummy_fungible_asset(faucet_id, 120)]); fresh_forest.update_account(block_3, &full_delta).unwrap(); @@ -269,96 +170,59 @@ fn test_partial_delta_applies_fungible_changes_correctly() { } #[test] -fn test_partial_delta_across_long_block_range() { - // Validation test: partial deltas should work across 101+ blocks. - // - // This test passes now because InnerForest keeps all history. Once pruning is implemented - // (estimated ~50 blocks), this test will fail unless DB fallback is also implemented. - // When that happens, the test should be updated to use DB fallback or converted to an - // integration test that has DB access. +fn vault_state_persists_across_block_gaps() { let mut forest = InnerForest::new(); let account_id = dummy_account(); let faucet_id = dummy_faucet(); - // Block 1: Add 1000 tokens + let get_vault_root = |forest: &InnerForest, account_id: AccountId, block_num: BlockNumber| { + forest + .vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, block_num)) + .next_back() + .map(|(_, root)| *root) + }; + + // Block 1: Add 100 tokens let block_1 = BlockNumber::GENESIS.child(); let mut vault_delta_1 = AccountVaultDelta::default(); - vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 1000)).unwrap(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 100)).unwrap(); let delta_1 = dummy_partial_delta(account_id, vault_delta_1, AccountStorageDelta::default()); forest.update_account(block_1, &delta_1).unwrap(); - let root_after_1000 = forest.vault_roots[&(account_id, block_1)]; - - // Blocks 2-100: No changes to this account (simulating long gap) - - // Block 101: Add 500 more tokens (partial delta with +500) - // This requires looking up block 1's state across a 100-block gap. - let block_101 = BlockNumber::from(101); - let mut vault_delta_101 = AccountVaultDelta::default(); - vault_delta_101.add_asset(dummy_fungible_asset(faucet_id, 500)).unwrap(); - let delta_101 = - dummy_partial_delta(account_id, vault_delta_101, AccountStorageDelta::default()); - forest.update_account(block_101, &delta_101).unwrap(); - let root_after_1500 = forest.vault_roots[&(account_id, block_101)]; - - // Roots should be different (1000 tokens vs 1500 tokens) - assert_ne!(root_after_1000, root_after_1500); - - // Verify the final state matches a fresh forest with 1500 tokens - let mut fresh_forest = InnerForest::new(); - let full_delta = dummy_full_state_delta(account_id, &[dummy_fungible_asset(faucet_id, 1500)]); - fresh_forest.update_account(block_101, &full_delta).unwrap(); - let root_full_state_1500 = fresh_forest.vault_roots[&(account_id, block_101)]; - - assert_eq!(root_after_1500, root_full_state_1500); -} - -#[test] -fn test_update_storage_map() { - use std::collections::BTreeMap; - - use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; - - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let block_num = BlockNumber::GENESIS.child(); + let root_after_block_1 = forest.vault_roots[&(account_id, block_1)]; - let slot_name = StorageSlotName::mock(3); - let key = Word::from([1u32, 2, 3, 4]); - let value = Word::from([5u32, 6, 7, 8]); + // Blocks 2-5: No changes (simulated by not calling update_account) - let mut map_delta = StorageMapDelta::default(); - map_delta.insert(key, value); - let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); - let storage_delta = AccountStorageDelta::from_raw(raw); + // Block 6: Add 50 more tokens (total: 150) + let block_6 = BlockNumber::from(6); + let mut vault_delta_6 = AccountVaultDelta::default(); + vault_delta_6.add_asset(dummy_fungible_asset(faucet_id, 150)).unwrap(); + let delta_6 = dummy_partial_delta(account_id, vault_delta_6, AccountStorageDelta::default()); + forest.update_account(block_6, &delta_6).unwrap(); + let root_after_block_6 = forest.vault_roots[&(account_id, block_6)]; - let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); - forest.update_account(block_num, &delta).unwrap(); + assert_ne!(root_after_block_1, root_after_block_6); - // Verify storage root was created - assert!( - forest - .storage_map_roots - .contains_key(&(account_id, slot_name.clone(), block_num)) + // Verify range query finds correct previous roots + assert_eq!( + get_vault_root(&forest, account_id, BlockNumber::from(3)), + Some(root_after_block_1) + ); + assert_eq!( + get_vault_root(&forest, account_id, BlockNumber::from(5)), + Some(root_after_block_1) ); - let storage_root = forest.storage_map_roots[&(account_id, slot_name, block_num)]; - assert_ne!(storage_root, InnerForest::empty_smt_root()); + assert_eq!(get_vault_root(&forest, account_id, block_6), Some(root_after_block_6)); } #[test] -fn test_full_state_delta_with_empty_vault_records_root() { - // Regression test for issue #1581: full-state deltas with empty vaults must still record - // the vault root so that subsequent `get_vault_asset_witnesses` calls succeed. - // - // The network counter account from the network monitor has an empty vault (it only uses - // storage slots). Without this fix, `get_vault_asset_witnesses` fails with "root not found" - // because no vault root was ever recorded for the account. +fn vault_full_state_with_empty_vault_records_root() { use miden_protocol::account::{Account, AccountStorage}; let mut forest = InnerForest::new(); let account_id = dummy_account(); let block_num = BlockNumber::GENESIS.child(); - // Create a full-state delta with an empty vault (like the network counter account). let vault = AssetVault::new(&[]).unwrap(); let storage = AccountStorage::new(vec![]).unwrap(); let code = AccountCode::mock(); @@ -366,27 +230,19 @@ fn test_full_state_delta_with_empty_vault_records_root() { let account = Account::new(account_id, vault, storage, code, nonce, None).unwrap(); let full_delta = AccountDelta::try_from(account).unwrap(); - // Sanity check: the vault delta should be empty. assert!(full_delta.vault().is_empty()); assert!(full_delta.is_full_state()); forest.update_account(block_num, &full_delta).unwrap(); - // The vault root must be recorded even though the vault is empty. assert!( forest.vault_roots.contains_key(&(account_id, block_num)), "vault root should be recorded for full-state deltas with empty vaults" ); - // Verify the recorded root is the empty SMT root. let recorded_root = forest.vault_roots[&(account_id, block_num)]; - assert_eq!( - recorded_root, - InnerForest::empty_smt_root(), - "empty vault should have the empty SMT root" - ); + assert_eq!(recorded_root, InnerForest::empty_smt_root()); - // Verify `get_vault_asset_witnesses` succeeds (returns empty witnesses for empty keys). let witnesses = forest .get_vault_asset_witnesses(account_id, block_num, std::collections::BTreeSet::new()) .expect("get_vault_asset_witnesses should succeed for accounts with empty vaults"); @@ -394,7 +250,65 @@ fn test_full_state_delta_with_empty_vault_records_root() { } #[test] -fn test_storage_map_incremental_updates() { +fn vault_shared_root_retained_when_one_entry_pruned() { + let mut forest = InnerForest::new(); + let account1 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let account2 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); + let faucet_id = dummy_faucet(); + let block_1 = BlockNumber::GENESIS.child(); + let asset_amount = u64::from(HISTORICAL_BLOCK_RETENTION); + let amount_increment = asset_amount / u64::from(HISTORICAL_BLOCK_RETENTION); + let asset = dummy_fungible_asset(faucet_id, asset_amount); + let asset_key = AssetVaultKey::new_unchecked(asset.vault_key().into()); + + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(asset).unwrap(); + let delta_1 = dummy_partial_delta(account1, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + + let mut vault_delta_2 = AccountVaultDelta::default(); + vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, asset_amount)).unwrap(); + let delta_2 = dummy_partial_delta(account2, vault_delta_2, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_2).unwrap(); + + let root1 = forest.vault_roots[&(account1, block_1)]; + let root2 = forest.vault_roots[&(account2, block_1)]; + assert_eq!(root1, root2); + + let block_at_51 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 1); + let mut vault_delta_2_update = AccountVaultDelta::default(); + vault_delta_2_update + .add_asset(dummy_fungible_asset(faucet_id, amount_increment)) + .unwrap(); + let delta_2_update = + dummy_partial_delta(account2, vault_delta_2_update, AccountStorageDelta::default()); + forest.update_account(block_at_51, &delta_2_update).unwrap(); + + let block_at_52 = BlockNumber::from(HISTORICAL_BLOCK_RETENTION + 2); + let (vault_roots_removed, storage_roots_removed) = forest.prune(block_at_52); + + assert_eq!(vault_roots_removed, 0); + assert_eq!(storage_roots_removed, 0); + assert!(forest.vault_roots.contains_key(&(account1, block_1))); + assert!(!forest.vault_roots.contains_key(&(account2, block_1))); + assert_eq!(forest.vault_roots_by_block[&block_1], vec![account1]); + + let vault_root_at_52 = forest.get_vault_root(account1, block_at_52); + assert_eq!(vault_root_at_52, Some(root1)); + + let witnesses = forest + .get_vault_asset_witnesses(account1, block_at_52, [asset_key].into()) + .expect("Should be able to get vault witness after pruning"); + assert_eq!(witnesses.len(), 1); + let proof: SmtProof = witnesses[0].clone().into(); + assert_eq!(proof.compute_root(), root1); +} + +// STORAGE MAP TESTS +// ================================================================================================ + +#[test] +fn storage_map_incremental_updates() { use std::collections::BTreeMap; use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; @@ -419,7 +333,7 @@ fn test_storage_map_incremental_updates() { forest.update_account(block_1, &delta_1).unwrap(); let root_1 = forest.storage_map_roots[&(account_id, slot_name.clone(), block_1)]; - // Block 2: Insert key2 -> value2 (key1 should persist) + // Block 2: Insert key2 -> value2 let block_2 = block_1.child(); let mut map_delta_2 = StorageMapDelta::default(); map_delta_2.insert(key2, value2); @@ -439,14 +353,13 @@ fn test_storage_map_incremental_updates() { forest.update_account(block_3, &delta_3).unwrap(); let root_3 = forest.storage_map_roots[&(account_id, slot_name, block_3)]; - // All roots should be different assert_ne!(root_1, root_2); assert_ne!(root_2, root_3); assert_ne!(root_1, root_3); } #[test] -fn test_empty_storage_map_entries_query() { +fn storage_map_empty_entries_query() { use miden_protocol::account::auth::PublicKeyCommitment; use miden_protocol::account::{ AccountBuilder, @@ -463,7 +376,6 @@ fn test_empty_storage_map_entries_query() { let block_num = BlockNumber::GENESIS.child(); let slot_name = StorageSlotName::mock(0); - // Create an account with an empty storage map slot let storage_map = StorageMap::with_entries(vec![]).unwrap(); let component_storage = vec![StorageSlot::with_map(slot_name.clone(), storage_map)]; @@ -483,15 +395,11 @@ fn test_empty_storage_map_entries_query() { .unwrap(); let account_id = account.id(); - - // Convert to full-state delta (this triggers insert_account_storage path) let full_delta = AccountDelta::try_from(account).unwrap(); - assert!(full_delta.is_full_state(), "delta should be full-state"); + assert!(full_delta.is_full_state()); - // Apply the delta forest.update_account(block_num, &full_delta).unwrap(); - // Verify storage_map_roots has an entry assert!( forest .storage_map_roots @@ -499,11 +407,10 @@ fn test_empty_storage_map_entries_query() { "storage_map_roots should have an entry for the empty map" ); - // Verify storage_map_entries returns Some (not None) - this is the bug fix validation - let result = forest.storage_map_entries(account_id, slot_name.clone(), block_num); + let result = + forest.get_storage_map_details_full_from_cache(account_id, slot_name.clone(), block_num); assert!(result.is_some(), "storage_map_entries should return Some for empty maps"); - // Verify the entries are empty let details = result.unwrap(); assert_eq!(details.slot_name, slot_name); match details.entries { @@ -518,3 +425,502 @@ fn test_empty_storage_map_entries_query() { }, } } + +#[test] +fn storage_map_open_returns_proofs() { + use std::collections::BTreeMap; + + use assert_matches::assert_matches; + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(3); + let block_num = BlockNumber::GENESIS.child(); + + let mut map_delta = StorageMapDelta::default(); + for i in 0..20u32 { + let key = Word::from([i, 0, 0, 0]); + let value = Word::from([0, 0, 0, i]); + map_delta.insert(key, value); + } + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + + let keys: Vec = (0..20u32).map(|i| Word::from([i, 0, 0, 0])).collect(); + let result = + forest.get_storage_map_details_for_keys(account_id, slot_name.clone(), block_num, &keys); + + let details = result.expect("Should return Some").expect("Should not error"); + assert_matches!(details.entries, StorageMapEntries::EntriesWithProofs(entries) => { + assert_eq!(entries.len(), keys.len()); + }); +} + +#[test] +fn storage_map_all_entries_uses_db_after_cache_eviction() { + use std::collections::BTreeMap; + + use assert_matches::assert_matches; + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + + for slot_index in 0..6u32 { + let slot_name = StorageSlotName::mock(slot_index as usize); + let block_num = BlockNumber::from(slot_index + 1); + let key = num_to_word(u64::from(slot_index + 1)); + let value = num_to_word(u64::from(slot_index + 1) * 10); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key, value); + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + } + + let evicted_slot = StorageSlotName::mock(0); + assert!( + forest + .storage_entries_per_account_per_slot + .get(&(account_id, evicted_slot.clone())) + .is_none(), + "oldest slot should be evicted from LRU" + ); + + let db_entries = vec![(num_to_word(1), num_to_word(10))]; + forest.cache_storage_map_entries( + account_id, + evicted_slot.clone(), + BlockNumber::from(1), + db_entries.clone(), + ); + + let details = forest + .get_storage_map_details_full_from_cache( + account_id, + evicted_slot.clone(), + BlockNumber::from(1), + ) + .expect("cache should return details after fallback"); + + assert_matches!(details.entries, StorageMapEntries::AllEntries(entries) => { + assert_eq!(entries, db_entries); + }); +} + +// PRUNING TESTS +// ================================================================================================ + +const TEST_CHAIN_LENGTH: u32 = 100; +const TEST_AMOUNT_MULTIPLIER: u32 = 100; +const TEST_PRUNE_CHAIN_TIP: u32 = HISTORICAL_BLOCK_RETENTION + 5; + +#[test] +fn prune_handles_empty_forest() { + let mut forest = InnerForest::new(); + + let (vault_removed, storage_roots_removed) = forest.prune(BlockNumber::GENESIS); + + assert_eq!(vault_removed, 0); + assert_eq!(storage_roots_removed, 0); +} + +#[test] +fn prune_removes_smt_roots_from_forest() { + use miden_protocol::account::delta::StorageMapDelta; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let slot_name = StorageSlotName::mock(7); + + for i in 1..=TEST_PRUNE_CHAIN_TIP { + let block_num = BlockNumber::from(i); + + let mut vault_delta = AccountVaultDelta::default(); + vault_delta + .add_asset(dummy_fungible_asset(faucet_id, (i * TEST_AMOUNT_MULTIPLIER).into())) + .unwrap(); + let storage_delta = if i.is_multiple_of(3) { + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(Word::from([1u32, 0, 0, 0]), Word::from([99u32, i, i * i, i * i * i])); + let asd = AccountStorageDelta::new(); + asd.add_updated_maps([(slot_name.clone(), map_delta)]) + } else { + AccountStorageDelta::default() + }; + + let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); + forest.update_account(block_num, &delta).unwrap(); + } + + let retained_block = BlockNumber::from(TEST_PRUNE_CHAIN_TIP); + let pruned_block = BlockNumber::from(3u32); + let vault_root_retained = forest.vault_roots[&(account_id, retained_block)]; + let vault_root_pruned = forest.vault_roots[&(account_id, pruned_block)]; + let storage_root_pruned = + forest.storage_map_roots[&(account_id, slot_name.clone(), pruned_block)]; + + let (vault_removed, storage_roots_removed) = forest.prune(retained_block); + + assert!(vault_removed > 0); + assert!(storage_roots_removed > 0); + assert!(forest.vault_roots.contains_key(&(account_id, retained_block))); + assert!(!forest.vault_roots.contains_key(&(account_id, pruned_block))); + assert!(!forest.storage_map_roots.contains_key(&(account_id, slot_name, pruned_block))); + + let asset_key: Word = FungibleAsset::new(faucet_id, 0).unwrap().vault_key().into(); + assert_matches!(forest.forest.open(vault_root_retained, asset_key), Ok(_)); + assert_matches!(forest.forest.open(vault_root_pruned, asset_key), Err(_)); + + let storage_key = StorageMap::hash_key(Word::from([1u32, 0, 0, 0])); + assert_matches!(forest.forest.open(storage_root_pruned, storage_key), Err(_)); +} + +#[test] +fn prune_respects_retention_boundary() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + for i in 1..=HISTORICAL_BLOCK_RETENTION { + let block_num = BlockNumber::from(i); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta + .add_asset(dummy_fungible_asset(faucet_id, (i * TEST_AMOUNT_MULTIPLIER).into())) + .unwrap(); + let delta = dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); + forest.update_account(block_num, &delta).unwrap(); + } + + let (vault_removed, storage_roots_removed) = + forest.prune(BlockNumber::from(HISTORICAL_BLOCK_RETENTION)); + + assert_eq!(vault_removed, 0); + assert_eq!(storage_roots_removed, 0); + assert_eq!(forest.vault_roots.len(), HISTORICAL_BLOCK_RETENTION as usize); +} + +#[test] +fn prune_vault_roots_removes_old_entries() { + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + for i in 1..=TEST_CHAIN_LENGTH { + let block_num = BlockNumber::from(i); + let amount = (i * TEST_AMOUNT_MULTIPLIER).into(); + let mut vault_delta = AccountVaultDelta::default(); + vault_delta.add_asset(dummy_fungible_asset(faucet_id, amount)).unwrap(); + let delta = dummy_partial_delta(account_id, vault_delta, AccountStorageDelta::default()); + forest.update_account(block_num, &delta).unwrap(); + } + + assert_eq!(forest.vault_roots.len(), TEST_CHAIN_LENGTH as usize); + + let (vault_removed, ..) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + + let expected_removed = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION) as usize; + assert_eq!(vault_removed, expected_removed); + + let expected_remaining = HISTORICAL_BLOCK_RETENTION as usize; + assert_eq!(forest.vault_roots.len(), expected_remaining); + + let remaining_blocks = Vec::from_iter(forest.vault_roots.keys().map(|(_, b)| b.as_u32())); + let oldest_remaining = *remaining_blocks.iter().min().unwrap(); + let expected_oldest = TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION + 1; + assert_eq!(oldest_remaining, expected_oldest); +} + +#[test] +fn prune_storage_map_roots_removes_old_entries() { + use miden_protocol::account::delta::StorageMapDelta; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(3); + + for i in 1..=TEST_CHAIN_LENGTH { + let block_num = BlockNumber::from(i); + let key = Word::from([i, i * i, 5, 4]); + let value = Word::from([0, 0, i * i * i, 77]); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(key, value); + let asd = AccountStorageDelta::new().add_updated_maps([(slot_name.clone(), map_delta)]); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), asd); + forest.update_account(block_num, &delta).unwrap(); + } + + assert_eq!(forest.storage_map_roots.len(), TEST_CHAIN_LENGTH as usize); + + let (_, storage_roots_removed) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + + let expected_removed = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION) as usize; + assert_eq!(storage_roots_removed, expected_removed); + + let expected_remaining = HISTORICAL_BLOCK_RETENTION as usize; + assert_eq!(forest.storage_map_roots.len(), expected_remaining); + // Cache size: LRU may have evicted entries, just verify it's populated + assert!(!forest.storage_entries_per_account_per_slot.is_empty()); +} + +#[test] +fn prune_handles_multiple_accounts() { + let mut forest = InnerForest::new(); + let account1 = dummy_account(); + let account2 = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let faucet_id = dummy_faucet(); + + for i in 1..=TEST_CHAIN_LENGTH { + let block_num = BlockNumber::from(i); + let amount = (i * TEST_AMOUNT_MULTIPLIER).into(); + + let mut vault_delta1 = AccountVaultDelta::default(); + vault_delta1.add_asset(dummy_fungible_asset(faucet_id, amount)).unwrap(); + let delta1 = dummy_partial_delta(account1, vault_delta1, AccountStorageDelta::default()); + forest.update_account(block_num, &delta1).unwrap(); + + let mut vault_delta2 = AccountVaultDelta::default(); + vault_delta2.add_asset(dummy_fungible_asset(account2, amount * 2)).unwrap(); + let delta2 = dummy_partial_delta(account2, vault_delta2, AccountStorageDelta::default()); + forest.update_account(block_num, &delta2).unwrap(); + } + + assert_eq!(forest.vault_roots.len(), (TEST_CHAIN_LENGTH * 2) as usize); + + let (vault_removed, _) = forest.prune(BlockNumber::from(TEST_CHAIN_LENGTH)); + + let expected_removed_per_account = (TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION) as usize; + assert!(vault_removed > 0); + assert!(vault_removed <= expected_removed_per_account * 2); + + let expected_remaining_per_account = HISTORICAL_BLOCK_RETENTION as usize; + let account1_entries = forest.vault_roots.keys().filter(|(id, _)| *id == account1).count(); + let account2_entries = forest.vault_roots.keys().filter(|(id, _)| *id == account2).count(); + assert_eq!(account1_entries, expected_remaining_per_account); + assert_eq!(account2_entries, expected_remaining_per_account); +} + +#[test] +fn prune_handles_multiple_slots() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let slot_a = StorageSlotName::mock(1); + let slot_b = StorageSlotName::mock(2); + + for i in 1..=TEST_CHAIN_LENGTH { + let block_num = BlockNumber::from(i); + let mut map_delta_a = StorageMapDelta::default(); + map_delta_a.insert(Word::from([i, 0, 0, 0]), Word::from([i, 0, 0, 1])); + let mut map_delta_b = StorageMapDelta::default(); + map_delta_b.insert(Word::from([i, 0, 0, 2]), Word::from([i, 0, 0, 3])); + let raw = BTreeMap::from_iter([ + (slot_a.clone(), StorageSlotDelta::Map(map_delta_a)), + (slot_b.clone(), StorageSlotDelta::Map(map_delta_b)), + ]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + } + + assert_eq!(forest.storage_map_roots.len(), (TEST_CHAIN_LENGTH * 2) as usize); + + let chain_tip = BlockNumber::from(TEST_CHAIN_LENGTH); + let (_, storage_roots_removed) = forest.prune(chain_tip); + + let cutoff = TEST_CHAIN_LENGTH - HISTORICAL_BLOCK_RETENTION; + let expected_removed_per_slot = cutoff; + let expected_removed = expected_removed_per_slot * 2; + assert_eq!(storage_roots_removed, expected_removed as usize); + + let expected_remaining = HISTORICAL_BLOCK_RETENTION; + assert_eq!(forest.storage_map_roots.len(), (expected_remaining * 2) as usize); + // Cache contains an entry per slot + assert_eq!(forest.storage_entries_per_account_per_slot.len(), 2); +} + +#[test] +fn prune_preserves_most_recent_state_per_entity() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let slot_map_a = StorageSlotName::mock(1); + let slot_map_b = StorageSlotName::mock(2); + + // Block 1: Create vault + map_a + map_b + let block_1 = BlockNumber::from(1); + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(dummy_fungible_asset(faucet_id, 1000)).unwrap(); + + let mut map_delta_a = StorageMapDelta::default(); + map_delta_a.insert(Word::from([1u32, 0, 0, 0]), Word::from([100u32, 0, 0, 0])); + + let mut map_delta_b = StorageMapDelta::default(); + map_delta_b.insert(Word::from([2u32, 0, 0, 0]), Word::from([200u32, 0, 0, 0])); + + let raw = BTreeMap::from_iter([ + (slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a)), + (slot_map_b.clone(), StorageSlotDelta::Map(map_delta_b)), + ]); + let storage_delta_1 = AccountStorageDelta::from_raw(raw); + let delta_1 = dummy_partial_delta(account_id, vault_delta_1, storage_delta_1); + forest.update_account(block_1, &delta_1).unwrap(); + + // Block 51: Update only map_a + let block_at_51 = BlockNumber::from(51); + let mut map_delta_a_new = StorageMapDelta::default(); + map_delta_a_new.insert(Word::from([1u32, 0, 0, 0]), Word::from([999u32, 0, 0, 0])); + + let raw_at_51 = + BTreeMap::from_iter([(slot_map_a.clone(), StorageSlotDelta::Map(map_delta_a_new))]); + let storage_delta_at_51 = AccountStorageDelta::from_raw(raw_at_51); + let delta_at_51 = + dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta_at_51); + forest.update_account(block_at_51, &delta_at_51).unwrap(); + + // Block 100: Prune + let block_100 = BlockNumber::from(100); + let (vault_removed, storage_roots_removed) = forest.prune(block_100); + + // Vault at block 1 preserved (most recent) + assert_eq!(vault_removed, 0); + assert!(forest.vault_roots.contains_key(&(account_id, block_1))); + + // map_a: Block 51 preserved, block 1 pruned + assert!( + forest + .storage_map_roots + .contains_key(&(account_id, slot_map_a.clone(), block_at_51)) + ); + assert!(!forest.storage_map_roots.contains_key(&(account_id, slot_map_a, block_1))); + + // map_b: Block 1 preserved (most recent) + assert!(forest.storage_map_roots.contains_key(&(account_id, slot_map_b, block_1))); + + assert_eq!(storage_roots_removed, 1); +} + +#[test] +fn prune_preserves_entries_within_retention_window() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + let slot_map = StorageSlotName::mock(1); + + let blocks = [1, 25, 50, 75, 100]; + + for &block_num in &blocks { + let block = BlockNumber::from(block_num); + + let mut vault_delta = AccountVaultDelta::default(); + vault_delta + .add_asset(dummy_fungible_asset(faucet_id, u64::from(block_num) * 100)) + .unwrap(); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(Word::from([block_num, 0, 0, 0]), Word::from([block_num * 10, 0, 0, 0])); + + let raw = BTreeMap::from_iter([(slot_map.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, vault_delta, storage_delta); + forest.update_account(block, &delta).unwrap(); + } + + // Block 100: Prune (retention window = 50 blocks, cutoff = 50) + let block_100 = BlockNumber::from(100); + let (vault_removed, storage_roots_removed) = forest.prune(block_100); + + // Blocks 1, 25, and 50 pruned (outside retention, have newer entries) + assert_eq!(vault_removed, 3); + assert_eq!(storage_roots_removed, 3); + + // Verify preserved entries + assert!(!forest.vault_roots.contains_key(&(account_id, BlockNumber::from(1)))); + assert!(!forest.vault_roots.contains_key(&(account_id, BlockNumber::from(25)))); + assert!(!forest.vault_roots.contains_key(&(account_id, BlockNumber::from(50)))); + assert!(forest.vault_roots.contains_key(&(account_id, BlockNumber::from(75)))); + assert!(forest.vault_roots.contains_key(&(account_id, BlockNumber::from(100)))); +} + +/// Two accounts start with identical vault roots (same asset amount). When one account changes +/// in the next block, verify the unchanged account's vault root still works for lookups and +/// witness generation. +#[test] +fn shared_vault_root_retained_when_one_account_changes() { + let mut forest = InnerForest::new(); + let account1 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let account2 = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); + let faucet_id = dummy_faucet(); + + // Block 1: Both accounts have identical vaults (same asset) + let block_1 = BlockNumber::GENESIS.child(); + let initial_amount = 1000u64; + let asset = dummy_fungible_asset(faucet_id, initial_amount); + let asset_key = AssetVaultKey::new_unchecked(asset.vault_key().into()); + + let mut vault_delta_1 = AccountVaultDelta::default(); + vault_delta_1.add_asset(asset).unwrap(); + let delta_1 = dummy_partial_delta(account1, vault_delta_1, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_1).unwrap(); + + let mut vault_delta_2 = AccountVaultDelta::default(); + vault_delta_2 + .add_asset(dummy_fungible_asset(faucet_id, initial_amount)) + .unwrap(); + let delta_2 = dummy_partial_delta(account2, vault_delta_2, AccountStorageDelta::default()); + forest.update_account(block_1, &delta_2).unwrap(); + + // Both accounts should have the same vault root (structural sharing in SmtForest) + let root1_at_block1 = forest.vault_roots[&(account1, block_1)]; + let root2_at_block1 = forest.vault_roots[&(account2, block_1)]; + assert_eq!(root1_at_block1, root2_at_block1, "identical vaults should have identical roots"); + + // Block 2: Only account2 changes (adds more assets) + let block_2 = block_1.child(); + let mut vault_delta_2_update = AccountVaultDelta::default(); + vault_delta_2_update.add_asset(dummy_fungible_asset(faucet_id, 500)).unwrap(); + let delta_2_update = + dummy_partial_delta(account2, vault_delta_2_update, AccountStorageDelta::default()); + forest.update_account(block_2, &delta_2_update).unwrap(); + + // Account2 now has a different root + let root2_at_block2 = forest.vault_roots[&(account2, block_2)]; + assert_ne!(root2_at_block1, root2_at_block2, "account2 vault should have changed"); + + // Account1 has no entry at block 2, but lookup should still return block 1's root + assert!(!forest.vault_roots.contains_key(&(account1, block_2))); + let root1_lookup = forest.get_vault_root(account1, block_2); + assert_eq!( + root1_lookup, + Some(root1_at_block1), + "account1 should still resolve to block 1 root" + ); + + // Account1 should still be able to generate witnesses at block 2 (using block 1's data) + let witnesses = forest + .get_vault_asset_witnesses(account1, block_2, [asset_key].into()) + .expect("witness generation should succeed for unchanged account"); + assert_eq!(witnesses.len(), 1); + + // The proof should verify against the original root + let proof: SmtProof = witnesses[0].clone().into(); + assert_eq!(proof.compute_root(), root1_at_block1); +} diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index 504ea0631..4171053fe 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -216,7 +216,7 @@ pub async fn load_mmr(db: &mut Db) -> Result = block - .body() + let duplicate_nullifiers: Vec<_> = body .created_nullifiers() .iter() .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) @@ -304,11 +306,7 @@ impl State { let nullifier_tree_update = inner .nullifier_tree .compute_mutations( - block - .body() - .created_nullifiers() - .iter() - .map(|nullifier| (*nullifier, block_num)), + body.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), ) .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; @@ -325,9 +323,7 @@ impl State { let account_tree_update = inner .account_tree .compute_mutations( - block - .body() - .updated_accounts() + body.updated_accounts() .iter() .map(|update| (update.account_id(), update.final_state_commitment())), ) @@ -355,14 +351,13 @@ impl State { ) }; - // build note tree - let note_tree = block.body().compute_block_note_tree(); + // Build note tree + let note_tree = body.compute_block_note_tree(); if note_tree.root() != header.note_root() { return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); } - let notes = block - .body() + let notes = body .output_notes() .map(|(note_index, note)| { let (details, nullifier) = match note { @@ -401,12 +396,12 @@ impl State { // Extract public account updates with deltas before block is moved into async task. // Private accounts are filtered out since they don't expose their state changes. let account_deltas = - Vec::from_iter(block.body().updated_accounts().iter().filter_map(|update| { - match update.details() { + Vec::from_iter(body.updated_accounts().iter().filter_map( + |update| match update.details() { AccountUpdateDetails::Delta(delta) => Some(delta.clone()), AccountUpdateDetails::Private => None, - } - })); + }, + )); // The DB and in-memory state updates need to be synchronized and are partially // overlapping. Namely, the DB transaction only proceeds after this task acquires the @@ -471,7 +466,8 @@ impl State { .in_current_span() .await?; - self.forest.write().await.apply_block_updates(block_num, account_deltas)?; + let mut forest = self.forest.write().await; + forest.apply_block_updates(block_num, account_deltas)?; info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); @@ -1055,7 +1051,8 @@ impl State { /// /// For specific key queries (`SlotData::MapKeys`), the forest is used to provide SMT proofs. /// Returns an error if the forest doesn't have data for the requested slot. - /// All-entries queries (`SlotData::All`) use the forest to return all entries. + /// All-entries queries (`SlotData::All`) use the forest to request all entries from cache or + /// fall back to database reconstruction. async fn fetch_public_account_details( &self, account_id: AccountId, @@ -1108,25 +1105,62 @@ impl State { Vec::::with_capacity(storage_requests.len()); // Use forest for storage map queries - let forest_guard = self.forest.read().await; + let mut forest_guard = self.forest.write().await; for StorageMapRequest { slot_name, slot_data } in storage_requests { let details = match &slot_data { SlotData::MapKeys(keys) => forest_guard - .open_storage_map(account_id, slot_name.clone(), block_num, keys) + .get_storage_map_details_for_keys( + account_id, + slot_name.clone(), + block_num, + keys, + ) .ok_or_else(|| DatabaseError::StorageRootNotFound { account_id, slot_name: slot_name.to_string(), block_num, })? .map_err(DatabaseError::MerkleError)?, - SlotData::All => forest_guard - .storage_map_entries(account_id, slot_name.clone(), block_num) - .ok_or_else(|| DatabaseError::StorageRootNotFound { + SlotData::All => { + // Try cache first (latest block only) + if let Some(details) = forest_guard.get_storage_map_details_full_from_cache( account_id, - slot_name: slot_name.to_string(), + slot_name.clone(), block_num, - })?, + ) { + details + } else { + // we don't want to hold the forest guard for a prolonged time + drop(forest_guard); + // we collect all storage items, if the account is small enough or + // return `AccountStorageMapDetails::LimitExceeded` + let details = self + .db + .reconstruct_storage_map_from_db( + account_id, + slot_name.clone(), + block_num, + Some( + // TODO unify this with + // `AccountStorageMapDetails::MAX_RETURN_ENTRIES` + // and accumulated the limits + ::LIMIT, + ), + ) + .await?; + forest_guard = self.forest.write().await; + if let StorageMapEntries::AllEntries(entries) = details.entries.clone() { + forest_guard.cache_storage_map_entries( + account_id, + slot_name.clone(), + block_num, + entries, + ); + } + details + } + }, }; storage_map_details.push(details); @@ -1149,7 +1183,7 @@ impl State { account_id: AccountId, block_range: RangeInclusive, ) -> Result { - self.db.select_storage_map_sync_values(account_id, block_range).await + self.db.select_storage_map_sync_values(account_id, block_range, None).await } /// Loads a block from the block store. Return `Ok(None)` if the block is not found.