Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
- Validator now persists validated transactions ([#1614](https://github.com/0xMiden/miden-node/pull/1614)).
- [BREAKING] Remove `SynState` and introduce `SyncChainMmr` ([#1591](https://github.com/0xMiden/miden-node/issues/1591)).
- Introduce `SyncChainMmr` RPC endpoint to sync chain MMR deltas within specified block ranges ([#1591](https://github.com/0xMiden/miden-node/issues/1591)).
- Fixed `TransactionHeader` serialization for row insertion on database & fixed transaction cursor on retrievals ([#1701](https://github.com/0xMiden/miden-node/issues/1701)).

### Changes

Expand Down
34 changes: 21 additions & 13 deletions crates/store/src/db/models/queries/transactions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,11 +125,25 @@ impl TransactionSummaryRowInsert {
) -> Self {
const HEADER_BASE_SIZE: usize = 4 + 32 + 16 + 64; // block_num + tx_id + account_id + commitments

// Serialize input notes using binary format (store nullifiers)
let nullifiers_binary = transaction_header.input_notes().to_bytes();
// Extract nullifiers from input notes and serialize them.
// We only store the nullifiers (not the full `InputNoteCommitment`) since
// that's all that's needed when reading back `TransactionRecords`.
let nullifiers: Vec<Nullifier> = transaction_header
.input_notes()
.iter()
.map(miden_protocol::transaction::InputNoteCommitment::nullifier)
.collect();
let nullifiers_binary = nullifiers.to_bytes();

// Serialize output notes using binary format (store note IDs)
let output_notes_binary = transaction_header.output_notes().to_bytes();
// Extract note IDs from output note headers and serialize them.
// We only store the `NoteId`s (not the full `NoteHeader` with metadata) since
// that's all that's needed when reading back `TransactionRecords`.
let output_note_ids: Vec<NoteId> = transaction_header
.output_notes()
.iter()
.map(miden_protocol::note::NoteHeader::id)
.collect();
let output_notes_binary = output_note_ids.to_bytes();

// Manually calculate the estimated size of the transaction header to avoid
// the cost of serialization. The size estimation includes:
Expand Down Expand Up @@ -269,26 +283,20 @@ pub fn select_transactions_records(

// Add transactions from this chunk one by one until we hit the limit
let mut added_from_chunk = 0;
let mut last_added_tx: Option<TransactionRecordRaw> = None;

for tx in chunk {
if total_size + tx.size_in_bytes <= max_payload_bytes {
total_size += tx.size_in_bytes;
last_added_tx = Some(tx);
last_block_num = Some(tx.block_num);
last_transaction_id = Some(tx.transaction_id.clone());
all_transactions.push(tx);
added_from_chunk += 1;
} else {
// Can't fit this transaction, stop here
break;
}
}

// Update cursor position only for the last transaction that was actually added
if let Some(tx) = last_added_tx {
last_block_num = Some(tx.block_num);
last_transaction_id = Some(tx.transaction_id.clone());
all_transactions.push(tx);
}

// Break if chunk incomplete (size limit hit or data exhausted)
if added_from_chunk < NUM_TXS_PER_CHUNK {
break;
Expand Down
78 changes: 78 additions & 0 deletions crates/store/src/db/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2398,3 +2398,81 @@ fn test_prune_history() {
"is_latest=true entry should be retained even if old"
);
}

#[test]
#[miden_node_test_macro::enable_logging]
fn db_roundtrip_transactions() {
let mut conn = create_db();
let block_num = BlockNumber::from(1);
create_block(&mut conn, block_num);

let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap();
queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block_num)
.unwrap();

// Build two transaction headers with distinct data
let tx1 = mock_block_transaction(account_id, 1);
let tx2 = mock_block_transaction(account_id, 2);
let ordered = OrderedTransactionHeaders::new_unchecked(vec![tx1.clone(), tx2.clone()]);

// Insert
let count = queries::insert_transactions(&mut conn, block_num, &ordered).unwrap();
assert_eq!(count, 2, "Should insert 2 transactions");

// Retrieve
let (last_block, records) = queries::select_transactions_records(
&mut conn,
&[account_id],
BlockNumber::GENESIS..=block_num,
)
.unwrap();
assert_eq!(last_block, block_num, "Last block should match");
assert_eq!(records.len(), 2, "Should retrieve 2 transactions");

// Verify each transaction roundtrips correctly.
// Records are ordered by (block_num, transaction_id), so match by ID.
let originals = [&tx1, &tx2];
for record in &records {
let original = originals
.iter()
.find(|tx| tx.id() == record.transaction_id)
.expect("Retrieved transaction should match one of the originals");
assert_eq!(
record.transaction_id,
original.id(),
"TransactionId DB roundtrip must be symmetric"
);
assert_eq!(
record.account_id,
original.account_id(),
"AccountId DB roundtrip must be symmetric"
);
assert_eq!(record.block_num, block_num, "Block number must match");
assert_eq!(
record.initial_state_commitment,
original.initial_state_commitment(),
"Initial state commitment DB roundtrip must be symmetric"
);
assert_eq!(
record.final_state_commitment,
original.final_state_commitment(),
"Final state commitment DB roundtrip must be symmetric"
);

// Input notes are stored as nullifiers only
let expected_nullifiers: Vec<Nullifier> =
original.input_notes().iter().map(InputNoteCommitment::nullifier).collect();
assert_eq!(
record.nullifiers, expected_nullifiers,
"Nullifiers (from input notes) DB roundtrip must be symmetric"
);

// Output notes are stored as note IDs only
let expected_note_ids: Vec<NoteId> =
original.output_notes().iter().map(NoteHeader::id).collect();
assert_eq!(
record.output_notes, expected_note_ids,
"Output note IDs DB roundtrip must be symmetric"
);
}
}
Loading