Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@

### Enhancements

- Added cleanup of old account data from the in-memory forest ([#1175](https://github.com/0xMiden/miden-node/issues/1175)).
- Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)).
- Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)).
- Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)).
Expand Down
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ rand = { version = "0.9" }
rand_chacha = { version = "0.9" }
rstest = { version = "0.26" }
serde = { features = ["derive"], version = "1" }
tempfile = { version = "3.12" }
thiserror = { default-features = false, version = "2.0" }
tokio = { features = ["rt-multi-thread"], version = "1.46" }
tokio-stream = { version = "0.1" }
Expand Down
2 changes: 2 additions & 0 deletions crates/store/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ fs-err = { workspace = true }
hex = { version = "0.4" }
indexmap = { workspace = true }
libsqlite3-sys = { workspace = true }
lru = { workspace = true }
miden-crypto = { features = ["concurrent", "hashmaps"], workspace = true }
miden-node-proto = { workspace = true }
miden-node-proto-build = { features = ["internal"], workspace = true }
Expand Down Expand Up @@ -55,6 +56,7 @@ miden-protocol = { default-features = true, features = ["testing"], works
miden-standards = { features = ["testing"], workspace = true }
rand = { workspace = true }
regex = { version = "1.11" }
tempfile = { workspace = true }
termtree = { version = "0.5" }

[features]
Expand Down
100 changes: 99 additions & 1 deletion crates/store/src/db/mod.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
use std::collections::{BTreeMap, BTreeSet, HashSet};
use std::mem::size_of;
use std::ops::RangeInclusive;
use std::path::PathBuf;

use anyhow::Context;
use diesel::{Connection, QueryableByName, RunQueryDsl, SqliteConnection};
use miden_node_proto::domain::account::{AccountInfo, AccountSummary};
use miden_node_proto::generated as proto;
use miden_node_utils::limiter::MAX_RESPONSE_PAYLOAD_BYTES;
use miden_node_utils::tracing::OpenTelemetrySpanExt;
use miden_protocol::Word;
use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader};
Expand Down Expand Up @@ -600,13 +602,109 @@ impl Db {
&self,
account_id: AccountId,
block_range: RangeInclusive<BlockNumber>,
entries_limit: Option<usize>,
) -> Result<StorageMapValuesPage> {
let entries_limit = entries_limit.unwrap_or_else(|| {
// TODO: These limits should be given by the protocol.
// See miden-base/issues/1770 for more details
pub const ROW_OVERHEAD_BYTES: usize =
2 * size_of::<Word>() + size_of::<u32>() + size_of::<u8>(); // key + value + block_num + slot_idx
MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES
});

self.transact("select storage map sync values", move |conn| {
models::queries::select_account_storage_map_values(conn, account_id, block_range)
models::queries::select_account_storage_map_values_paged(
conn,
account_id,
block_range,
entries_limit,
)
})
.await
}

/// Reconstructs storage map details from the database for a specific slot at a block.
///
/// Used as fallback when `InnerForest` cache misses (historical or evicted queries).
/// Rebuilds all entries by querying the DB and filtering to the specific slot.
///
/// Returns:
/// - `::LimitExceeded` when too many entries are present
/// - `::AllEntries` if the size is sufficiently small
pub(crate) async fn reconstruct_storage_map_from_db(
&self,
account_id: AccountId,
slot_name: miden_protocol::account::StorageSlotName,
block_num: BlockNumber,
entries_limit: Option<usize>,
) -> Result<miden_node_proto::domain::account::AccountStorageMapDetails> {
use miden_node_proto::domain::account::AccountStorageMapDetails;
use miden_protocol::EMPTY_WORD;
use miden_protocol::account::StorageSlotName;

// TODO this remains expensive with a large history until we implement pruning for DB
// columns
let mut values = Vec::new();
let mut block_range_start = BlockNumber::GENESIS;
let entries_limit = entries_limit.unwrap_or_else(|| {
// TODO: These limits should be given by the protocol.
// See miden-base/issues/1770 for more details
pub const ROW_OVERHEAD_BYTES: usize =
2 * size_of::<Word>() + size_of::<u32>() + size_of::<u8>(); // key + value + block_num + slot_idx
MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES
});

let mut page = self
.select_storage_map_sync_values(
account_id,
block_range_start..=block_num,
Some(entries_limit),
)
.await?;

values.extend(page.values);

loop {
if page.last_block_included == block_num || page.last_block_included < block_range_start
{
break;
}

block_range_start = page.last_block_included.child();
page = self
.select_storage_map_sync_values(
account_id,
block_range_start..=block_num,
Some(entries_limit),
)
.await?;

values.extend(page.values);
}

if page.last_block_included != block_num {
return Ok(AccountStorageMapDetails::limit_exceeded(StorageSlotName::mock(0)));
Copy link

Copilot AI Feb 16, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The function returns AccountStorageMapDetails::limit_exceeded(StorageSlotName::mock(0)) when the limit is exceeded, but it uses a mocked slot name (slot 0) instead of the actual slot_name parameter that was requested. This could mislead clients about which slot exceeded limits. Use the actual slot_name parameter instead.

Copilot uses AI. Check for mistakes.
}

// Filter to the specific slot and collect latest values per key
let mut latest_values = BTreeMap::<Word, Word>::new();
for value in values {
if value.slot_name == slot_name {
latest_values.insert(value.key, value.value);
}
}

// Remove EMPTY_WORD entries (deletions)
latest_values.retain(|_, v| *v != EMPTY_WORD);

if latest_values.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES {
return Ok(AccountStorageMapDetails::limit_exceeded(StorageSlotName::mock(0)));
Copy link

Copilot AI Feb 16, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The function returns AccountStorageMapDetails::limit_exceeded(StorageSlotName::mock(0)) when the total entries exceed MAX_RETURN_ENTRIES, but it uses a mocked slot name (slot 0) instead of the actual slot_name parameter that was requested. This could mislead clients about which slot exceeded limits. Use the actual slot_name parameter instead.

Copilot uses AI. Check for mistakes.
}

let entries = Vec::from_iter(latest_values.into_iter());
Ok(AccountStorageMapDetails::from_forest_entries(slot_name, entries))
}

/// Emits size metrics for each table in the database, and the entire database.
#[instrument(target = COMPONENT, skip_all, err)]
pub async fn analyze_table_sizes(&self) -> Result<(), DatabaseError> {
Expand Down
17 changes: 7 additions & 10 deletions crates/store/src/db/models/queries/accounts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -653,19 +653,14 @@ impl StorageMapValue {
///
/// * Response payload size: 0 <= size <= 2MB
/// * Storage map values per response: 0 <= count <= (2MB / (2*Word + u32 + u8)) + 1
pub(crate) fn select_account_storage_map_values(
pub(crate) fn select_account_storage_map_values_paged(
conn: &mut SqliteConnection,
account_id: AccountId,
block_range: RangeInclusive<BlockNumber>,
limit: usize,
) -> Result<StorageMapValuesPage, DatabaseError> {
use schema::account_storage_map_values as t;

// TODO: These limits should be given by the protocol.
// See miden-base/issues/1770 for more details
pub const ROW_OVERHEAD_BYTES: usize =
2 * size_of::<Word>() + size_of::<u32>() + size_of::<u8>(); // key + value + block_num + slot_idx
pub const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES;

if !account_id.is_public() {
return Err(DatabaseError::AccountNotPublic(account_id));
}
Expand All @@ -686,13 +681,13 @@ pub(crate) fn select_account_storage_map_values(
.and(t::block_num.le(block_range.end().to_raw_sql())),
)
.order(t::block_num.asc())
.limit(i64::try_from(MAX_ROWS + 1).expect("limit fits within i64"))
.limit(i64::try_from(limit + 1).expect("limit fits within i64"))
.load(conn)?;

// Discard the last block in the response (assumes more than one block may be present)

let (last_block_included, values) = if let Some(&(last_block_num, ..)) = raw.last()
&& raw.len() > MAX_ROWS
&& raw.len() > limit
{
// NOTE: If the query contains at least one more row than the amount of storage map updates
// allowed in a single block for an account, then the response is guaranteed to have at
Expand All @@ -708,7 +703,9 @@ pub(crate) fn select_account_storage_map_values(
} else {
(
*block_range.end(),
raw.into_iter().map(StorageMapValue::from_raw_row).collect::<Result<_, _>>()?,
raw.into_iter()
.map(StorageMapValue::from_raw_row)
.collect::<Result<Vec<_>, _>>()?,
)
};

Expand Down
Loading
Loading