From c1898302ecb28b60bf387f0a262820b42101eeba Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 6 Jan 2026 15:20:51 -0500 Subject: [PATCH 01/17] add blob header --- runtime/fuzz/fuzz_targets/buffer.rs | 19 +- runtime/src/deterministic.rs | 20 +- runtime/src/lib.rs | 257 +++++++++++++++-- runtime/src/storage/audited.rs | 46 ++- runtime/src/storage/iouring.rs | 219 ++++++++++++-- runtime/src/storage/memory.rs | 222 +++++++++++++- runtime/src/storage/metered.rs | 38 ++- runtime/src/storage/mod.rs | 78 +++-- runtime/src/storage/tokio/fallback.rs | 11 +- runtime/src/storage/tokio/mod.rs | 234 ++++++++++++++- runtime/src/storage/tokio/unix.rs | 11 +- runtime/src/tokio/runtime.rs | 9 +- runtime/src/utils/buffer/append.rs | 27 +- runtime/src/utils/buffer/mod.rs | 287 ++++++++++++++----- runtime/src/utils/buffer/pool.rs | 7 +- runtime/src/utils/buffer/read.rs | 5 +- runtime/src/utils/buffer/write.rs | 4 +- runtime/src/utils/cell.rs | 6 +- storage/src/archive/prunable/mod.rs | 5 +- storage/src/cache/mod.rs | 5 +- storage/src/freezer/mod.rs | 16 +- storage/src/freezer/storage.rs | 7 +- storage/src/journal/contiguous/fixed.rs | 61 ++-- storage/src/journal/segmented/variable.rs | 73 +++-- storage/src/metadata/mod.rs | 12 +- storage/src/metadata/storage.rs | 11 +- storage/src/mmr/journaled.rs | 9 +- storage/src/ordinal/mod.rs | 33 +-- storage/src/ordinal/storage.rs | 9 +- storage/src/qmdb/any/unordered/fixed/sync.rs | 8 +- 30 files changed, 1442 insertions(+), 307 deletions(-) diff --git a/runtime/fuzz/fuzz_targets/buffer.rs b/runtime/fuzz/fuzz_targets/buffer.rs index de596ae15b..d028a891fc 100644 --- a/runtime/fuzz/fuzz_targets/buffer.rs +++ b/runtime/fuzz/fuzz_targets/buffer.rs @@ -12,6 +12,9 @@ const MAX_SIZE: usize = 1024 * 1024; const SHARED_BLOB: &[u8] = b"buffer_blob"; const MAX_OPERATIONS: usize = 50; +/// Default version range for fuzz tests (application version 0). +const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + #[derive(Arbitrary, Debug)] struct FuzzInput { seed: u64, @@ -87,8 +90,8 @@ enum FuzzOperation { fn fuzz(input: FuzzInput) { let executor = deterministic::Runner::default(); executor.start(|context| async move { - let (blob, initial_size) = context - .open("test_partition", SHARED_BLOB) + let (blob, initial_size, _) = context + .open("test_partition", SHARED_BLOB, TEST_VERSIONS) .await .expect("cannot open context"); @@ -113,8 +116,8 @@ fn fuzz(input: FuzzInput) { let blob_size = blob_size as u64; let buffer_size = (buffer_size as usize).clamp(1, MAX_SIZE); - let (blob, size) = context - .open("test_partition", b"read_blob") + let (blob, size, _) = context + .open("test_partition", b"read_blob", TEST_VERSIONS) .await .expect("cannot open context"); @@ -134,8 +137,8 @@ fn fuzz(input: FuzzInput) { } => { let capacity = (capacity as usize).clamp(1, MAX_SIZE); - let (blob, _) = context - .open("test_partition", b"write_blob") + let (blob, _, _) = context + .open("test_partition", b"write_blob", TEST_VERSIONS) .await .expect("cannot open context"); @@ -152,8 +155,8 @@ fn fuzz(input: FuzzInput) { let pool_page_size = NZUsize!((pool_page_size as usize).clamp(1, MAX_SIZE)); let pool_capacity = NZUsize!((pool_capacity as usize).clamp(1, MAX_SIZE)); - let (blob, _) = context - .open("test_partition", b"append_blob") + let (blob, _, _) = context + .open("test_partition", b"append_blob", TEST_VERSIONS) .await .expect("cannot open write blob"); diff --git a/runtime/src/deterministic.rs b/runtime/src/deterministic.rs index 127e19fc09..cd94265749 100644 --- a/runtime/src/deterministic.rs +++ b/runtime/src/deterministic.rs @@ -1390,8 +1390,13 @@ impl CryptoRng for Context {} impl crate::Storage for Context { type Blob = ::Blob; - async fn open(&self, partition: &str, name: &[u8]) -> Result<(Self::Blob, u64), Error> { - self.storage.open(partition, name).await + async fn open( + &self, + partition: &str, + name: &[u8], + versions: std::ops::RangeInclusive, + ) -> Result<(Self::Blob, u64, u16), Error> { + self.storage.open(partition, name, versions).await } async fn remove(&self, partition: &str, name: Option<&[u8]>) -> Result<(), Error> { @@ -1420,6 +1425,9 @@ mod tests { use futures::{channel::mpsc, SinkExt, StreamExt}; use futures::{channel::oneshot, task::noop_waker}; + /// Default version range for tests + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + fn run_with_seed(seed: u64) -> (String, Vec) { let executor = deterministic::Runner::seeded(seed); run_tasks(5, executor) @@ -1535,7 +1543,7 @@ mod tests { // Run some tasks, sync storage, and recover the runtime let (state, checkpoint) = executor1.start_and_recover(|context| async move { - let (blob, _) = context.open(partition, name).await.unwrap(); + let (blob, _, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); blob.write_at(Vec::from(data), 0).await.unwrap(); blob.sync().await.unwrap(); context.auditor().state() @@ -1547,7 +1555,7 @@ mod tests { // Check that synced storage persists after recovery let executor = Runner::from(checkpoint); executor.start(|context| async move { - let (blob, len) = context.open(partition, name).await.unwrap(); + let (blob, len, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); assert_eq!(len, data.len() as u64); let read = blob.read_at(vec![0; data.len()], 0).await.unwrap(); assert_eq!(read.as_ref(), data); @@ -1581,7 +1589,7 @@ mod tests { // Run some tasks without syncing storage let (_, checkpoint) = executor.start_and_recover(|context| async move { let context = context.clone(); - let (blob, _) = context.open(partition, name).await.unwrap(); + let (blob, _, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); blob.write_at(data, 0).await.unwrap(); }); @@ -1590,7 +1598,7 @@ mod tests { // Check that unsynced storage does not persist after recovery executor.start(|context| async move { - let (_, len) = context.open(partition, name).await.unwrap(); + let (_, len, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); assert_eq!(len, 0); }); } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index e500e6fca5..56551348b2 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -98,6 +98,15 @@ pub enum Error { BlobSyncFailed(String, String, IoError), #[error("blob insufficient length")] BlobInsufficientLength, + #[error("blob magic mismatch: expected {:?}, found {found:?}", Header::MAGIC)] + BlobMagicMismatch { found: [u8; Header::MAGIC_LENGTH] }, + #[error("blob header version mismatch: expected {expected}, found {found}")] + BlobHeaderVersionMismatch { expected: u16, found: u16 }, + #[error("blob application version mismatch: expected one of {expected:?}, found {found}")] + BlobApplicationVersionMismatch { + expected: std::ops::RangeInclusive, + found: u16, + }, #[error("offset overflow")] OffsetOverflow, #[error("io error: {0}")] @@ -514,11 +523,25 @@ pub trait Storage: Clone + Send + Sync + 'static { /// writing to the same blob concurrently may lead to undefined behavior. /// /// An Ok result indicates the blob is durably created (or already exists). + /// + /// # Versions + /// + /// The blob's [Header] contains an application version. + /// `versions` specifies the range of acceptable application versions for an opened blob. + /// If the blob already exists and its version is not in `versions`, returns + /// [Error::BlobApplicationVersionMismatch]. + /// If the blob does not exist, it is created with the application version set to the last + /// value in `versions`. + /// + /// # Returns + /// + /// A tuple of (blob, logical_size, application_version). fn open( &self, partition: &str, name: &[u8], - ) -> impl Future> + Send; + versions: std::ops::RangeInclusive, + ) -> impl Future> + Send; /// Remove a blob from a given partition. /// @@ -535,6 +558,87 @@ pub trait Storage: Clone + Send + Sync + 'static { fn scan(&self, partition: &str) -> impl Future>, Error>> + Send; } +/// Fixed-size header at the start of each [Blob]. +/// +/// On-disk layout (8 bytes, big-endian): +/// - Bytes 0-3: [Header::MAGIC] +/// - Bytes 4-5: Header Version (u16) +/// - Bytes 6-7: Application Version (u16) +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct Header { + magic: [u8; 4], + header_version: u16, + application_version: u16, +} + +impl Header { + /// Size of the header in bytes. + pub const SIZE: usize = 8; + + /// Size of the header as u64 for offset calculations. + pub const SIZE_U64: u64 = Self::SIZE as u64; + + /// Length of magic bytes. + pub const MAGIC_LENGTH: usize = 4; + + /// Length of version fields. + pub const VERSION_LENGTH: usize = 2; + + /// Magic bytes identifying a valid commonware blob. + pub const MAGIC: [u8; Self::MAGIC_LENGTH] = *b"CWIC"; // Commonware Is CWIC + + /// The current version of the header format. + pub const HEADER_VERSION: u16 = 0; + + /// Creates a new header with the given application version. + pub const fn new(app_version: u16) -> Self { + Self { + magic: Self::MAGIC, + header_version: Self::HEADER_VERSION, + application_version: app_version, + } + } + + /// Parses a header from bytes (big-endian format). + pub fn from_bytes(bytes: [u8; Self::SIZE]) -> Self { + Self { + magic: bytes[..4].try_into().unwrap(), + header_version: u16::from_be_bytes(bytes[4..6].try_into().unwrap()), + application_version: u16::from_be_bytes(bytes[6..8].try_into().unwrap()), + } + } + + /// Serializes the header to bytes (big-endian format). + pub fn to_bytes(&self) -> [u8; Self::SIZE] { + let mut bytes = [0u8; Self::SIZE]; + bytes[..4].copy_from_slice(&self.magic); + bytes[4..6].copy_from_slice(&self.header_version.to_be_bytes()); + bytes[6..8].copy_from_slice(&self.application_version.to_be_bytes()); + bytes + } + + /// Validates the magic bytes, header version, and application version. + /// `app_versions` is the range of acceptable application versions. + pub fn validate(&self, app_versions: &std::ops::RangeInclusive) -> Result<(), Error> { + if self.magic != Self::MAGIC { + return Err(Error::BlobMagicMismatch { found: self.magic }); + } + if self.header_version != Self::HEADER_VERSION { + return Err(Error::BlobHeaderVersionMismatch { + expected: Self::HEADER_VERSION, + found: self.header_version, + }); + } + if !app_versions.contains(&self.application_version) { + return Err(Error::BlobApplicationVersionMismatch { + expected: app_versions.clone(), + found: self.application_version, + }); + } + Ok(()) + } +} + /// Interface to read and write to a blob. /// /// To support blob implementations that enable concurrent reads and @@ -549,6 +653,12 @@ pub trait Storage: Clone + Send + Sync + 'static { /// When a blob is dropped, any unsynced changes may be discarded. Implementations /// may attempt to sync during drop but errors will go unhandled. Call `sync` /// before dropping to ensure all changes are durably persisted. +/// +/// # Header +/// +/// All blobs have a [`Header`] at the start. The header is read on open +/// (for existing blobs) or written (for new blobs). All I/O operations use logical +/// offsets that start after the header; the header offset is handled internally. #[allow(clippy::len_without_is_empty)] pub trait Blob: Clone + Send + Sync + 'static { /// Read from the blob at the given offset. @@ -604,6 +714,98 @@ mod tests { use tracing::{error, Level}; use utils::reschedule; + /// Default version range for tests + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + + #[test] + fn test_header_fields() { + let header = Header::new(*TEST_VERSIONS.end()); + assert_eq!(header.header_version, Header::HEADER_VERSION); + assert_eq!(header.application_version, *TEST_VERSIONS.end()); + + // Verify byte serialization + let bytes = header.to_bytes(); + assert_eq!(&bytes[..4], &Header::MAGIC); + assert_eq!(&bytes[4..6], &Header::HEADER_VERSION.to_be_bytes()); + assert_eq!(&bytes[6..8], &TEST_VERSIONS.end().to_be_bytes()); + + // Verify round-trip + let parsed = Header::from_bytes(bytes); + assert_eq!(parsed, header); + } + + #[test] + fn test_header_validate_success() { + let header = Header::new(*TEST_VERSIONS.end()); + assert!(header.validate(&TEST_VERSIONS).is_ok()); + } + + #[test] + fn test_header_validate_magic_wrong_bytes() { + let header = Header::from_bytes([0u8; Header::SIZE]); + let result = header.validate(&TEST_VERSIONS); + match result { + Err(Error::BlobMagicMismatch { found }) => { + assert_eq!(found, [0u8; 4]); + } + _ => panic!("expected BlobMagicMismatch error"), + } + + let mut bytes = Header::new(*TEST_VERSIONS.end()).to_bytes(); + bytes[0] = b'X'; // Corrupt first byte + let header = Header::from_bytes(bytes); + let result = header.validate(&TEST_VERSIONS); + match result { + Err(Error::BlobMagicMismatch { found }) => { + assert_eq!(found[0], b'X'); + } + _ => panic!("expected BlobMagicMismatch error"), + } + } + + #[test] + fn test_header_validate_app_version_mismatch() { + let header = Header::new(5); + let result = header.validate(&(10..=20)); + match result { + Err(Error::BlobApplicationVersionMismatch { expected, found }) => { + assert_eq!(expected, 10..=20); + assert_eq!(found, 5); + } + _ => panic!("expected BlobApplicationVersionMismatch error"), + } + } + + #[test] + fn test_header_bytes_round_trip() { + // Test round-trip with default version + let original = Header::new(0); + let bytes = original.to_bytes(); + let restored = Header::from_bytes(bytes); + assert_eq!(original, restored); + + // Test round-trip with non-zero version + let original = Header::new(42); + let bytes = original.to_bytes(); + let restored = Header::from_bytes(bytes); + assert_eq!(original, restored); + assert_eq!(restored.application_version, 42); + + // Test round-trip with max version + let original = Header::new(u16::MAX); + let bytes = original.to_bytes(); + let restored = Header::from_bytes(bytes); + assert_eq!(original, restored); + assert_eq!(restored.application_version, u16::MAX); + + // Verify byte layout explicitly + let header = Header::new(0x1234); + let bytes = header.to_bytes(); + assert_eq!(&bytes[..4], &Header::MAGIC); + assert_eq!(&bytes[4..6], &Header::HEADER_VERSION.to_be_bytes()); + assert_eq!(&bytes[6..8], &0x1234u16.to_be_bytes()); + } + fn test_error_future(runner: R) { async fn error_future() -> Result<&'static str, &'static str> { Err("An error occurred") @@ -891,11 +1093,17 @@ mod tests { let partition = "test_partition"; let name = b"test_blob"; - // Open a new blob - let (blob, _) = context - .open(partition, name) + // Open a new blob and verify returned version + let (blob, size, app_version) = context + .open(partition, name, TEST_VERSIONS) .await .expect("Failed to open blob"); + assert_eq!(size, 0, "new blob should have size 0"); + assert_eq!( + app_version, + *TEST_VERSIONS.end(), + "new blob should have app version from end of range" + ); // Write data to the blob let data = b"Hello, Storage!"; @@ -923,12 +1131,17 @@ mod tests { .expect("Failed to scan partition"); assert!(blobs.contains(&name.to_vec())); - // Reopen the blob - let (blob, len) = context - .open(partition, name) + // Reopen the blob and verify version persists + let (blob, len, app_version) = context + .open(partition, name, TEST_VERSIONS) .await .expect("Failed to reopen blob"); assert_eq!(len, data.len() as u64); + assert_eq!( + app_version, + *TEST_VERSIONS.end(), + "reopened blob should have same app version" + ); // Read data part of message back let read = blob @@ -974,8 +1187,8 @@ mod tests { let name = b"test_blob_rw"; // Open a new blob - let (blob, _) = context - .open(partition, name) + let (blob, _, _) = context + .open(partition, name, TEST_VERSIONS) .await .expect("Failed to open blob"); @@ -1030,8 +1243,8 @@ mod tests { let name = b"test_blob_resize"; // Open and write to a new blob - let (blob, _) = context - .open(partition, name) + let (blob, _, _) = context + .open(partition, name, TEST_VERSIONS) .await .expect("Failed to open blob"); @@ -1042,7 +1255,7 @@ mod tests { blob.sync().await.expect("Failed to sync after write"); // Re-open and check length - let (blob, len) = context.open(partition, name).await.unwrap(); + let (blob, len, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); assert_eq!(len, data.len() as u64); // Resize to extend the file @@ -1053,7 +1266,7 @@ mod tests { blob.sync().await.expect("Failed to sync after resize"); // Re-open and check length again - let (blob, len) = context.open(partition, name).await.unwrap(); + let (blob, len, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); assert_eq!(len, new_len); // Read original data @@ -1072,7 +1285,7 @@ mod tests { blob.sync().await.unwrap(); // Reopen to check truncation - let (blob, size) = context.open(partition, name).await.unwrap(); + let (blob, size, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); assert_eq!(size, data.len() as u64); // Read truncated data @@ -1094,8 +1307,8 @@ mod tests { for (additional, partition) in partitions.iter().enumerate() { // Open a new blob - let (blob, _) = context - .open(partition, name) + let (blob, _, _) = context + .open(partition, name, TEST_VERSIONS) .await .expect("Failed to open blob"); @@ -1113,8 +1326,8 @@ mod tests { for (additional, partition) in partitions.iter().enumerate() { // Open a new blob - let (blob, len) = context - .open(partition, name) + let (blob, len, _) = context + .open(partition, name, TEST_VERSIONS) .await .expect("Failed to open blob"); assert_eq!(len, (data1.len() + data2.len() + additional) as u64); @@ -1139,8 +1352,8 @@ mod tests { let name = b"test_blob_rw"; // Open a new blob - let (blob, _) = context - .open(partition, name) + let (blob, _, _) = context + .open(partition, name, TEST_VERSIONS) .await .expect("Failed to open blob"); @@ -1169,8 +1382,8 @@ mod tests { let name = b"test_blob_rw"; // Open a new blob - let (blob, _) = context - .open(partition, name) + let (blob, _, _) = context + .open(partition, name, TEST_VERSIONS) .await .expect("Failed to open blob"); diff --git a/runtime/src/storage/audited.rs b/runtime/src/storage/audited.rs index 3bdfb6b9ee..3adf531142 100644 --- a/runtime/src/storage/audited.rs +++ b/runtime/src/storage/audited.rs @@ -18,22 +18,31 @@ impl Storage { impl crate::Storage for Storage { type Blob = Blob; - async fn open(&self, partition: &str, name: &[u8]) -> Result<(Self::Blob, u64), Error> { + async fn open( + &self, + partition: &str, + name: &[u8], + versions: std::ops::RangeInclusive, + ) -> Result<(Self::Blob, u64, u16), Error> { self.auditor.event(b"open", |hasher| { hasher.update(partition.as_bytes()); hasher.update(name); }); - self.inner.open(partition, name).await.map(|(blob, len)| { - ( - Blob { - auditor: self.auditor.clone(), - inner: blob, - partition: partition.into(), - name: name.to_vec(), - }, - len, - ) - }) + self.inner + .open(partition, name, versions) + .await + .map(|(blob, len, app_version)| { + ( + Blob { + auditor: self.auditor.clone(), + inner: blob, + partition: partition.into(), + name: name.to_vec(), + }, + len, + app_version, + ) + }) } async fn remove(&self, partition: &str, name: Option<&[u8]>) -> Result<(), Error> { @@ -114,6 +123,9 @@ mod tests { }; use std::sync::Arc; + /// Default version range for tests + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + #[tokio::test] async fn test_audited_storage() { let inner = MemStorage::default(); @@ -138,8 +150,14 @@ mod tests { let storage2 = AuditedStorage::new(inner2, auditor2.clone()); // Perform a sequence of operations on both storages simultaneously - let (blob1, _) = storage1.open("partition", b"test_blob").await.unwrap(); - let (blob2, _) = storage2.open("partition", b"test_blob").await.unwrap(); + let (blob1, _, _) = storage1 + .open("partition", b"test_blob", TEST_VERSIONS) + .await + .unwrap(); + let (blob2, _, _) = storage2 + .open("partition", b"test_blob", TEST_VERSIONS) + .await + .unwrap(); // Write data to the blobs blob1.write_at(b"hello world".to_vec(), 0).await.unwrap(); diff --git a/runtime/src/storage/iouring.rs b/runtime/src/storage/iouring.rs index f32e81ed75..0bac52b72f 100644 --- a/runtime/src/storage/iouring.rs +++ b/runtime/src/storage/iouring.rs @@ -22,7 +22,7 @@ use crate::{ iouring::{self, should_retry}, - Blob as _, Error, + Error, Header, }; use commonware_utils::{from_hex, hex, StableBuf}; use futures::{ @@ -34,7 +34,8 @@ use io_uring::{opcode, types}; use prometheus_client::registry::Registry; use std::{ fs::{self, File}, - io::Error as IoError, + io::{Error as IoError, Read, Seek, SeekFrom, Write}, + ops::RangeInclusive, os::fd::AsRawFd, path::{Path, PathBuf}, sync::Arc, @@ -99,7 +100,12 @@ impl Storage { impl crate::Storage for Storage { type Blob = Blob; - async fn open(&self, partition: &str, name: &[u8]) -> Result<(Blob, u64), Error> { + async fn open( + &self, + partition: &str, + name: &[u8], + versions: RangeInclusive, + ) -> Result<(Blob, u64, u16), Error> { super::validate_partition_name(partition)?; // Construct the full path @@ -115,7 +121,7 @@ impl crate::Storage for Storage { fs::create_dir_all(parent).map_err(|_| Error::PartitionCreationFailed(partition.into()))?; // Open the file, creating it if it doesn't exist - let file = fs::OpenOptions::new() + let mut file = fs::OpenOptions::new() .read(true) .write(true) .create(true) @@ -124,27 +130,47 @@ impl crate::Storage for Storage { .map_err(|e| Error::BlobOpenFailed(partition.into(), hex(name), e))?; // Assume empty files are newly created. Existing empty files will be synced too; that's OK. - let len = file.metadata().map_err(|_| Error::ReadFailed)?.len(); - let newly_created = len == 0; - - // Create the blob - let blob = Blob::new(partition.into(), name, file, self.io_sender.clone()); - - // Only sync if we created a new file - if newly_created { - // Sync the blob to ensure it is durably created - blob.sync().await?; + let raw_len = file.metadata().map_err(|_| Error::ReadFailed)?.len(); + + // Handle header: new/corrupted blobs get a fresh header written, + // existing blobs have their header read. + let (app_version, logical_len) = if raw_len < Header::SIZE_U64 { + // New (or corrupted) blob - truncate and write header with latest version + let app_version = *versions.end(); + let header = Header::new(app_version); + file.set_len(Header::SIZE_U64) + .map_err(|e| Error::BlobResizeFailed(partition.into(), hex(name), e))?; + file.seek(SeekFrom::Start(0)) + .map_err(|_| Error::WriteFailed)?; + file.write_all(&header.to_bytes()) + .map_err(|_| Error::WriteFailed)?; + file.sync_all() + .map_err(|e| Error::BlobSyncFailed(partition.into(), hex(name), e))?; + + // For new files, sync the parent directory to ensure the directory entry is durable. + if raw_len == 0 { + sync_dir(parent)?; + if !parent_existed { + sync_dir(&self.storage_directory)?; + } + } - // Sync the parent directory to ensure the directory entry is durable - sync_dir(parent)?; + (app_version, 0) + } else { + // Existing blob - read and validate header + file.seek(SeekFrom::Start(0)) + .map_err(|_| Error::ReadFailed)?; + let mut header_bytes = [0u8; Header::SIZE]; + file.read_exact(&mut header_bytes) + .map_err(|_| Error::ReadFailed)?; + let header = Header::from_bytes(header_bytes); + header.validate(&versions)?; - // Sync storage directory if parent directory did not exist - if !parent_existed { - sync_dir(&self.storage_directory)?; - } - } + (header.application_version, raw_len - Header::SIZE_U64) + }; - Ok((blob, len)) + let blob = Blob::new(partition.into(), name, file, self.io_sender.clone()); + Ok((blob, logical_len, app_version)) } async fn remove(&self, partition: &str, name: Option<&[u8]>) -> Result<(), Error> { @@ -243,6 +269,9 @@ impl crate::Blob for Blob { let mut bytes_read = 0; let buf_len = buf.len(); let mut io_sender = self.io_sender.clone(); + let offset = offset + .checked_add(Header::SIZE_U64) + .ok_or(Error::OffsetOverflow)?; while bytes_read < buf_len { // Figure out how much is left to read and where to read into. // @@ -299,6 +328,9 @@ impl crate::Blob for Blob { let mut bytes_written = 0; let buf_len = buf.len(); let mut io_sender = self.io_sender.clone(); + let offset = offset + .checked_add(Header::SIZE_U64) + .ok_or(Error::OffsetOverflow)?; while bytes_written < buf_len { // Figure out how much is left to write and where to write from. // @@ -348,6 +380,9 @@ impl crate::Blob for Blob { // TODO: Make this async. See https://github.com/commonwarexyz/monorepo/issues/831 async fn resize(&self, len: u64) -> Result<(), Error> { + let len = len + .checked_add(Header::SIZE_U64) + .ok_or(Error::OffsetOverflow)?; self.file.set_len(len).map_err(|e| { Error::BlobResizeFailed(self.partition.clone(), hex(&self.name), IoError::other(e)) }) @@ -405,10 +440,13 @@ impl crate::Blob for Blob { #[cfg(test)] mod tests { use super::*; - use crate::storage::tests::run_storage_tests; + use crate::{storage::tests::run_storage_tests, Blob, Header, Storage as _}; use rand::{Rng as _, SeedableRng as _}; use std::env; + /// Default version range for tests + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + // Helper for creating test storage fn create_test_storage() -> (Storage, PathBuf) { let mut rng = rand::rngs::StdRng::from_entropy(); @@ -431,4 +469,139 @@ mod tests { run_storage_tests(storage).await; let _ = std::fs::remove_dir_all(storage_directory); } + + #[tokio::test] + async fn test_blob_header_handling() { + let (storage, storage_directory) = create_test_storage(); + + // Test 1: New blob returns logical size 0 and correct application version + let (blob, size, app_version) = storage + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); + assert_eq!(size, 0, "new blob should have logical size 0"); + assert_eq!( + app_version, + *TEST_VERSIONS.end(), + "new blob should have app version 0" + ); + + // Verify raw file has 8 bytes (header only) + let file_path = storage_directory.join("partition").join(hex(b"test")); + let metadata = std::fs::metadata(&file_path).unwrap(); + assert_eq!( + metadata.len(), + Header::SIZE_U64, + "raw file should have 8-byte header" + ); + + // Test 2: Logical offset handling - write at offset 0 stores at raw offset 8 + let data = b"hello world"; + blob.write_at(data.to_vec(), 0).await.unwrap(); + blob.sync().await.unwrap(); + + // Verify raw file size + let metadata = std::fs::metadata(&file_path).unwrap(); + assert_eq!(metadata.len(), Header::SIZE_U64 + data.len() as u64); + + // Verify raw file layout + let raw_content = std::fs::read(&file_path).unwrap(); + assert_eq!(&raw_content[..Header::MAGIC_LENGTH], &Header::MAGIC); + // Header version (bytes 4-5) and App version (bytes 6-7) + assert_eq!( + &raw_content[Header::MAGIC_LENGTH..Header::MAGIC_LENGTH + Header::VERSION_LENGTH], + &Header::HEADER_VERSION.to_be_bytes() + ); + // Data should start at offset 8 + assert_eq!(&raw_content[Header::SIZE..], data); + + // Test 3: Read at logical offset 0 returns data from raw offset 8 + let read_buf = blob.read_at(vec![0u8; data.len()], 0).await.unwrap(); + assert_eq!(read_buf.as_ref(), data); + + // Test 4: Resize with logical length + blob.resize(5).await.unwrap(); + blob.sync().await.unwrap(); + let metadata = std::fs::metadata(&file_path).unwrap(); + assert_eq!( + metadata.len(), + Header::SIZE_U64 + 5, + "resize(5) should result in 13 raw bytes" + ); + + // resize(0) should leave only header + blob.resize(0).await.unwrap(); + blob.sync().await.unwrap(); + let metadata = std::fs::metadata(&file_path).unwrap(); + assert_eq!( + metadata.len(), + Header::SIZE_U64, + "resize(0) should leave only header" + ); + + // Test 5: Reopen existing blob preserves header and returns correct logical size + blob.write_at(b"test data".to_vec(), 0).await.unwrap(); + blob.sync().await.unwrap(); + drop(blob); + + let (blob2, size2, app_version2) = storage + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); + assert_eq!(size2, 9, "reopened blob should have logical size 9"); + assert_eq!(app_version2, *TEST_VERSIONS.end()); + let read_buf = blob2.read_at(vec![0u8; 9], 0).await.unwrap(); + assert_eq!(read_buf.as_ref(), b"test data"); + drop(blob2); + + // Test 6: Corrupted blob recovery (0 < raw_size < 8) + // Manually create a corrupted file with only 4 bytes + let corrupted_path = storage_directory.join("partition").join(hex(b"corrupted")); + std::fs::write(&corrupted_path, vec![0u8; 4]).unwrap(); + + // Opening should truncate and write fresh header + let (blob3, size3, app_version3) = storage + .open("partition", b"corrupted", TEST_VERSIONS) + .await + .unwrap(); + assert_eq!(size3, 0, "corrupted blob should return logical size 0"); + assert_eq!(app_version3, *TEST_VERSIONS.end()); + + // Verify raw file now has proper 8-byte header + let metadata = std::fs::metadata(&corrupted_path).unwrap(); + assert_eq!( + metadata.len(), + Header::SIZE_U64, + "corrupted blob should be reset to header-only" + ); + + // Cleanup + drop(blob3); + let _ = std::fs::remove_dir_all(&storage_directory); + } + + #[tokio::test] + async fn test_blob_magic_mismatch() { + let (storage, storage_directory) = create_test_storage(); + + // Create the partition directory + let partition_path = storage_directory.join("partition"); + std::fs::create_dir_all(&partition_path).unwrap(); + + // Manually create a file with invalid magic bytes + let bad_magic_path = partition_path.join(hex(b"bad_magic")); + std::fs::write(&bad_magic_path, vec![0u8; Header::SIZE]).unwrap(); + + // Opening should fail with magic mismatch error + let result = storage.open("partition", b"bad_magic", TEST_VERSIONS).await; + match result { + Err(crate::Error::BlobMagicMismatch { found }) => { + assert_eq!(found, [0u8; Header::MAGIC_LENGTH]); + } + Err(err) => panic!("expected BlobMagicMismatch error, got: {:?}", err), + Ok(_) => panic!("expected error, got Ok"), + } + + let _ = std::fs::remove_dir_all(&storage_directory); + } } diff --git a/runtime/src/storage/memory.rs b/runtime/src/storage/memory.rs index a92add9a5c..8d6332dc8d 100644 --- a/runtime/src/storage/memory.rs +++ b/runtime/src/storage/memory.rs @@ -1,6 +1,8 @@ +use crate::Header; use commonware_utils::{hex, StableBuf}; use std::{ collections::BTreeMap, + ops::RangeInclusive, sync::{Arc, Mutex, RwLock}, }; @@ -21,12 +23,36 @@ impl Default for Storage { impl crate::Storage for Storage { type Blob = Blob; - async fn open(&self, partition: &str, name: &[u8]) -> Result<(Self::Blob, u64), crate::Error> { + async fn open( + &self, + partition: &str, + name: &[u8], + versions: RangeInclusive, + ) -> Result<(Self::Blob, u64, u16), crate::Error> { super::validate_partition_name(partition)?; let mut partitions = self.partitions.lock().unwrap(); let partition_entry = partitions.entry(partition.into()).or_default(); let content = partition_entry.entry(name.into()).or_default(); + + let raw_len = content.len() as u64; + let (app_version, logical_len) = if raw_len < Header::SIZE_U64 { + // New or corrupted blob - truncate and write default header with latest version + let app_version = *versions.end(); + let header = Header::new(app_version); + content.clear(); + content.extend_from_slice(&header.to_bytes()); + (app_version, 0) + } else { + // Existing blob - read and validate header + let mut header_bytes = [0u8; Header::SIZE]; + header_bytes.copy_from_slice(&content[..Header::SIZE]); + let header = Header::from_bytes(header_bytes); + header.validate(&versions)?; + + (header.application_version, raw_len - Header::SIZE_U64) + }; + Ok(( Blob::new( self.partitions.clone(), @@ -34,7 +60,8 @@ impl crate::Storage for Storage { name, content.clone(), ), - content.len() as u64, + logical_len, + app_version, )) } @@ -109,6 +136,9 @@ impl crate::Blob for Blob { ) -> Result { let mut buf = buf.into(); let offset = offset + .checked_add(Header::SIZE_U64) + .ok_or(crate::Error::OffsetOverflow)?; + let offset: usize = offset .try_into() .map_err(|_| crate::Error::OffsetOverflow)?; let content = self.content.read().unwrap(); @@ -127,6 +157,9 @@ impl crate::Blob for Blob { ) -> Result<(), crate::Error> { let buf = buf.into(); let offset = offset + .checked_add(Header::SIZE_U64) + .ok_or(crate::Error::OffsetOverflow)?; + let offset: usize = offset .try_into() .map_err(|_| crate::Error::OffsetOverflow)?; let mut content = self.content.write().unwrap(); @@ -139,7 +172,10 @@ impl crate::Blob for Blob { } async fn resize(&self, len: u64) -> Result<(), crate::Error> { - let len = len.try_into().map_err(|_| crate::Error::OffsetOverflow)?; + let len = len + .checked_add(Header::SIZE_U64) + .ok_or(crate::Error::OffsetOverflow)?; + let len: usize = len.try_into().map_err(|_| crate::Error::OffsetOverflow)?; let mut content = self.content.write().unwrap(); content.resize(len, 0); Ok(()) @@ -168,11 +204,189 @@ impl crate::Blob for Blob { #[cfg(test)] mod tests { use super::*; - use crate::storage::tests::run_storage_tests; + use crate::{storage::tests::run_storage_tests, Blob, Header, Storage as _}; + + /// Default version range for tests (application version 0). + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; #[tokio::test] async fn test_memory_storage() { let storage = Storage::default(); run_storage_tests(storage).await; } + + #[tokio::test] + async fn test_blob_header_handling() { + let storage = Storage::default(); + + // Test 1: New blob returns logical size 0 and correct app version + let (blob, size, app_version) = storage + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); + assert_eq!(size, 0, "new blob should have logical size 0"); + assert_eq!( + app_version, + *TEST_VERSIONS.end(), + "new blob should have default app version" + ); + + // Verify raw storage has 8 bytes (header only) + { + let partitions = storage.partitions.lock().unwrap(); + let partition = partitions.get("partition").unwrap(); + let raw_content = partition.get(&b"test".to_vec()).unwrap(); + assert_eq!( + raw_content.len(), + Header::SIZE, + "raw storage should have 8-byte header" + ); + } + + // Test 2: Logical offset handling - write at offset 0 stores at raw offset 8 + let data = b"hello world"; + blob.write_at(data.to_vec(), 0).await.unwrap(); + blob.sync().await.unwrap(); + + // Verify raw storage layout + { + let partitions = storage.partitions.lock().unwrap(); + let partition = partitions.get("partition").unwrap(); + let raw_content = partition.get(&b"test".to_vec()).unwrap(); + assert_eq!(raw_content.len(), Header::SIZE + data.len()); + // First 4 bytes should be magic bytes + assert_eq!(&raw_content[..Header::MAGIC_LENGTH], &Header::MAGIC); + // Next 2 bytes should be header version + assert_eq!( + &raw_content[Header::MAGIC_LENGTH..Header::MAGIC_LENGTH + Header::VERSION_LENGTH], + &Header::HEADER_VERSION.to_be_bytes() + ); + // Data should start at offset 8 + assert_eq!(&raw_content[Header::SIZE..], data); + } + + // Test 3: Read at logical offset 0 returns data from raw offset 8 + let read_buf = blob.read_at(vec![0u8; data.len()], 0).await.unwrap(); + assert_eq!(read_buf.as_ref(), data); + + // Test 4: Resize with logical length + blob.resize(5).await.unwrap(); + blob.sync().await.unwrap(); + { + let partitions = storage.partitions.lock().unwrap(); + let partition = partitions.get("partition").unwrap(); + let raw_content = partition.get(&b"test".to_vec()).unwrap(); + assert_eq!( + raw_content.len(), + Header::SIZE + 5, + "resize(5) should result in 13 raw bytes" + ); + } + + // resize(0) should leave only header + blob.resize(0).await.unwrap(); + blob.sync().await.unwrap(); + { + let partitions = storage.partitions.lock().unwrap(); + let partition = partitions.get("partition").unwrap(); + let raw_content = partition.get(&b"test".to_vec()).unwrap(); + assert_eq!( + raw_content.len(), + Header::SIZE, + "resize(0) should leave only header" + ); + } + + // Test 5: Reopen existing blob preserves header and returns correct logical size + blob.write_at(b"test data".to_vec(), 0).await.unwrap(); + blob.sync().await.unwrap(); + drop(blob); + + let (blob2, size2, app_version2) = storage + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); + assert_eq!(size2, 9, "reopened blob should have logical size 9"); + assert_eq!(app_version2, *TEST_VERSIONS.end()); + let read_buf = blob2.read_at(vec![0u8; 9], 0).await.unwrap(); + assert_eq!(read_buf.as_ref(), b"test data"); + + // Test 6: Corrupted blob recovery (0 < raw_size < 8) + // Manually corrupt the raw storage to have only 2 bytes + { + let mut partitions = storage.partitions.lock().unwrap(); + let partition = partitions.get_mut("partition").unwrap(); + partition.insert(b"corrupted".to_vec(), vec![0u8; 2]); + } + + // Opening should truncate and write fresh header + let (_blob3, size3, app_version3) = storage + .open("partition", b"corrupted", TEST_VERSIONS) + .await + .unwrap(); + assert_eq!(size3, 0, "corrupted blob should return logical size 0"); + assert_eq!(app_version3, *TEST_VERSIONS.end()); + + // Verify raw storage now has proper 8-byte header + { + let partitions = storage.partitions.lock().unwrap(); + let partition = partitions.get("partition").unwrap(); + let raw_content = partition.get(&b"corrupted".to_vec()).unwrap(); + assert_eq!( + raw_content.len(), + Header::SIZE, + "corrupted blob should be reset to header-only" + ); + } + } + + #[tokio::test] + async fn test_blob_magic_mismatch() { + let storage = Storage::default(); + + // Manually insert a blob with invalid magic bytes + { + let mut partitions = storage.partitions.lock().unwrap(); + let partition = partitions.entry("partition".into()).or_default(); + // Create a blob with wrong magic bytes (all zeros) + partition.insert(b"bad_magic".to_vec(), vec![0u8; Header::SIZE]); + } + + // Opening should fail with magic mismatch error + let result = storage.open("partition", b"bad_magic", TEST_VERSIONS).await; + match result { + Err(crate::Error::BlobMagicMismatch { found }) => { + assert_eq!(found, [0u8; Header::MAGIC_LENGTH]); + } + Err(err) => panic!("expected BlobMagicMismatch error, got: {:?}", err), + Ok(_) => panic!("expected error, got Ok"), + } + } + + #[tokio::test] + async fn test_blob_version_mismatch() { + let storage = Storage::default(); + + // Create blob with version 1 + let (_, _, app_version) = storage.open("partition", b"v1", 1..=1).await.unwrap(); + assert_eq!(app_version, 1, "new blob should have version 1"); + + // Reopen with a range that includes version 1 + let (_, _, app_version) = storage.open("partition", b"v1", 0..=2).await.unwrap(); + assert_eq!(app_version, 1, "existing blob should retain version 1"); + + // Try to open with version range 2..=2 (should fail) + let result = storage.open("partition", b"v1", 2..=2).await; + match result { + Err(crate::Error::BlobApplicationVersionMismatch { expected, found }) => { + assert_eq!(expected, 2..=2); + assert_eq!(found, 1); + } + Err(err) => panic!( + "expected BlobApplicationVersionMismatch error, got: {:?}", + err + ), + Ok(_) => panic!("expected error, got Ok"), + } + } } diff --git a/runtime/src/storage/metered.rs b/runtime/src/storage/metered.rs index b4a118acf4..540b860528 100644 --- a/runtime/src/storage/metered.rs +++ b/runtime/src/storage/metered.rs @@ -4,7 +4,10 @@ use prometheus_client::{ metrics::{counter::Counter, gauge::Gauge}, registry::Registry, }; -use std::{ops::Deref, sync::Arc}; +use std::{ + ops::{Deref, RangeInclusive}, + sync::Arc, +}; pub struct Metrics { pub open_blobs: Gauge, @@ -74,15 +77,21 @@ impl Storage { impl crate::Storage for Storage { type Blob = Blob; - async fn open(&self, partition: &str, name: &[u8]) -> Result<(Self::Blob, u64), Error> { + async fn open( + &self, + partition: &str, + name: &[u8], + versions: RangeInclusive, + ) -> Result<(Self::Blob, u64, u16), Error> { self.metrics.open_blobs.inc(); - let (inner, len) = self.inner.open(partition, name).await?; + let (inner, len, app_version) = self.inner.open(partition, name, versions).await?; Ok(( Blob { inner, metrics: Arc::new(MetricsHandle(self.metrics.clone())), }, len, + app_version, )) } @@ -161,6 +170,9 @@ mod tests { }; use prometheus_client::registry::Registry; + /// Default version range for tests + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + #[tokio::test] async fn test_metered_storage() { let mut registry = Registry::default(); @@ -178,7 +190,10 @@ mod tests { let storage = Storage::new(inner, &mut registry); // Open a blob - let (blob, _) = storage.open("partition", b"test_blob").await.unwrap(); + let (blob, _, _) = storage + .open("partition", b"test_blob", TEST_VERSIONS) + .await + .unwrap(); // Verify that the open_blobs metric is incremented let open_blobs = storage.metrics.open_blobs.get(); @@ -234,8 +249,14 @@ mod tests { let storage = Storage::new(inner, &mut registry); // Open multiple blobs - let (blob1, _) = storage.open("partition", b"blob1").await.unwrap(); - let (blob2, _) = storage.open("partition", b"blob2").await.unwrap(); + let (blob1, _, _) = storage + .open("partition", b"blob1", TEST_VERSIONS) + .await + .unwrap(); + let (blob2, _, _) = storage + .open("partition", b"blob2", TEST_VERSIONS) + .await + .unwrap(); // Verify that the open_blobs metric is incremented correctly let open_blobs = storage.metrics.open_blobs.get(); @@ -275,7 +296,10 @@ mod tests { let storage = Storage::new(inner, &mut registry); // Open a blob - let (blob, _) = storage.open("partition", b"test_blob").await.unwrap(); + let (blob, _, _) = storage + .open("partition", b"test_blob", TEST_VERSIONS) + .await + .unwrap(); // Verify that the open_blobs metric is incremented assert_eq!( diff --git a/runtime/src/storage/mod.rs b/runtime/src/storage/mod.rs index c7a8f5501e..83f70c7cdd 100644 --- a/runtime/src/storage/mod.rs +++ b/runtime/src/storage/mod.rs @@ -26,6 +26,9 @@ pub fn validate_partition_name(partition: &str) -> Result<(), crate::Error> { pub(crate) mod tests { use crate::{Blob, Storage}; + /// Default version range for tests + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + /// Runs the full suite of tests on the provided storage implementation. pub(crate) async fn run_storage_tests(storage: S) where @@ -55,7 +58,10 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, len) = storage.open("partition", b"test_blob").await.unwrap(); + let (blob, len, _) = storage + .open("partition", b"test_blob", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(len, 0); blob.write_at(Vec::from("hello world"), 0).await.unwrap(); @@ -74,7 +80,10 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - storage.open("partition", b"test_blob").await.unwrap(); + storage + .open("partition", b"test_blob", TEST_VERSIONS) + .await + .unwrap(); storage .remove("partition", Some(b"test_blob")) .await @@ -90,8 +99,14 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - storage.open("partition", b"blob1").await.unwrap(); - storage.open("partition", b"blob2").await.unwrap(); + storage + .open("partition", b"blob1", TEST_VERSIONS) + .await + .unwrap(); + storage + .open("partition", b"blob2", TEST_VERSIONS) + .await + .unwrap(); let blobs = storage.scan("partition").await.unwrap(); assert_eq!( @@ -115,7 +130,10 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _) = storage.open("partition", b"test_blob").await.unwrap(); + let (blob, _, _) = storage + .open("partition", b"test_blob", TEST_VERSIONS) + .await + .unwrap(); // Initialize blob with data of sufficient length first blob.write_at(b"concurrent write".to_vec(), 0) @@ -153,7 +171,10 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _) = storage.open("partition", b"large_blob").await.unwrap(); + let (blob, _, _) = storage + .open("partition", b"large_blob", TEST_VERSIONS) + .await + .unwrap(); let large_data = vec![42u8; 10 * 1024 * 1024]; // 10 MB blob.write_at(large_data.clone(), 0).await.unwrap(); @@ -169,8 +190,8 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _) = storage - .open("test_overwrite_data", b"test_blob") + let (blob, _, _) = storage + .open("test_overwrite_data", b"test_blob", TEST_VERSIONS) .await .unwrap(); @@ -196,8 +217,8 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _) = storage - .open("test_read_beyond_written_data", b"test_blob") + let (blob, _, _) = storage + .open("test_read_beyond_written_data", b"test_blob", TEST_VERSIONS) .await .unwrap(); @@ -219,8 +240,8 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _) = storage - .open("test_write_at_large_offset", b"test_blob") + let (blob, _, _) = storage + .open("test_write_at_large_offset", b"test_blob", TEST_VERSIONS) .await .unwrap(); @@ -244,8 +265,8 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _) = storage - .open("test_append_data", b"test_blob") + let (blob, _, _) = storage + .open("test_append_data", b"test_blob", TEST_VERSIONS) .await .unwrap(); @@ -266,7 +287,10 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _) = storage.open("partition", b"test_blob").await.unwrap(); + let (blob, _, _) = storage + .open("partition", b"test_blob", TEST_VERSIONS) + .await + .unwrap(); // Write data at different offsets blob.write_at(b"first".to_vec(), 0).await.unwrap(); @@ -286,8 +310,8 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _) = storage - .open("test_large_data_in_chunks", b"large_blob") + let (blob, _, _) = storage + .open("test_large_data_in_chunks", b"large_blob", TEST_VERSIONS) .await .unwrap(); @@ -316,8 +340,8 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _) = storage - .open("test_read_empty_blob", b"empty_blob") + let (blob, _, _) = storage + .open("test_read_empty_blob", b"empty_blob", TEST_VERSIONS) .await .unwrap(); @@ -334,8 +358,8 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _) = storage - .open("test_overlapping_writes", b"test_blob") + let (blob, _, _) = storage + .open("test_overlapping_writes", b"test_blob", TEST_VERSIONS) .await .unwrap(); @@ -358,8 +382,8 @@ pub(crate) mod tests { S::Blob: Send + Sync, { { - let (blob, _) = storage - .open("test_resize_then_open", b"test_blob") + let (blob, _, _) = storage + .open("test_resize_then_open", b"test_blob", TEST_VERSIONS) .await .unwrap(); @@ -374,8 +398,8 @@ pub(crate) mod tests { } // Reopen the blob - let (blob, len) = storage - .open("test_resize_then_open", b"test_blob") + let (blob, len, _) = storage + .open("test_resize_then_open", b"test_blob", TEST_VERSIONS) .await .unwrap(); assert_eq!(len, 5, "Blob length after resize is incorrect"); @@ -401,7 +425,7 @@ pub(crate) mod tests { ] { assert!( !matches!( - storage.open(valid, b"blob").await, + storage.open(valid, b"blob", TEST_VERSIONS).await, Err(crate::Error::PartitionNameInvalid(_)) ), "Valid partition name '{valid}' should be accepted by open" @@ -432,7 +456,7 @@ pub(crate) mod tests { ] { assert!( matches!( - storage.open(invalid, b"blob").await, + storage.open(invalid, b"blob", TEST_VERSIONS).await, Err(crate::Error::PartitionNameInvalid(_)) ), "Invalid partition name '{invalid}' should be rejected by open" diff --git a/runtime/src/storage/tokio/fallback.rs b/runtime/src/storage/tokio/fallback.rs index 7968ce44d8..ac273c8471 100644 --- a/runtime/src/storage/tokio/fallback.rs +++ b/runtime/src/storage/tokio/fallback.rs @@ -1,4 +1,4 @@ -use crate::Error; +use crate::{Error, Header}; use commonware_utils::{hex, StableBuf}; use std::{io::SeekFrom, sync::Arc}; use tokio::{ @@ -35,6 +35,9 @@ impl crate::Blob for Blob { ) -> Result { let mut file = self.file.lock().await; let mut buf = buf.into(); + let offset = offset + .checked_add(Header::SIZE_U64) + .ok_or(Error::OffsetOverflow)?; file.seek(SeekFrom::Start(offset)) .await .map_err(|_| Error::ReadFailed)?; @@ -46,6 +49,9 @@ impl crate::Blob for Blob { async fn write_at(&self, buf: impl Into + Send, offset: u64) -> Result<(), Error> { let mut file = self.file.lock().await; + let offset = offset + .checked_add(Header::SIZE_U64) + .ok_or(Error::OffsetOverflow)?; file.seek(SeekFrom::Start(offset)) .await .map_err(|_| Error::WriteFailed)?; @@ -57,6 +63,9 @@ impl crate::Blob for Blob { async fn resize(&self, len: u64) -> Result<(), Error> { let file = self.file.lock().await; + let len = len + .checked_add(Header::SIZE_U64) + .ok_or(Error::OffsetOverflow)?; file.set_len(len) .await .map_err(|e| Error::BlobResizeFailed(self.partition.clone(), hex(&self.name), e))?; diff --git a/runtime/src/storage/tokio/mod.rs b/runtime/src/storage/tokio/mod.rs index 7e882bd4d6..521a92a6a4 100644 --- a/runtime/src/storage/tokio/mod.rs +++ b/runtime/src/storage/tokio/mod.rs @@ -1,9 +1,13 @@ -use crate::Error; +use crate::{Error, Header}; use commonware_utils::{from_hex, hex}; #[cfg(unix)] use std::path::Path; -use std::{path::PathBuf, sync::Arc}; -use tokio::{fs, sync::Mutex}; +use std::{ops::RangeInclusive, path::PathBuf, sync::Arc}; +use tokio::{ + fs, + io::{AsyncReadExt, AsyncWriteExt}, + sync::Mutex, +}; #[cfg(not(unix))] mod fallback; @@ -67,7 +71,12 @@ impl crate::Storage for Storage { #[cfg(not(unix))] type Blob = fallback::Blob; - async fn open(&self, partition: &str, name: &[u8]) -> Result<(Self::Blob, u64), Error> { + async fn open( + &self, + partition: &str, + name: &[u8], + versions: RangeInclusive, + ) -> Result<(Self::Blob, u64, u16), Error> { super::validate_partition_name(partition)?; // Acquire the filesystem lock @@ -127,18 +136,54 @@ impl crate::Storage for Storage { // Set the maximum buffer size file.set_max_buf_size(self.cfg.maximum_buffer_size); + // Handle header: new/corrupted blobs get a fresh header written, + // existing blobs have their header read. + let (app_version, logical_size) = if len < Header::SIZE_U64 { + // New or corrupted blob - truncate and write header with latest version + let app_version = *versions.end(); + let header = Header::new(app_version); + file.set_len(Header::SIZE_U64) + .await + .map_err(|e| Error::BlobResizeFailed(partition.into(), hex(name), e))?; + file.write_all(&header.to_bytes()) + .await + .map_err(|e| Error::BlobSyncFailed(partition.into(), hex(name), e))?; + file.sync_all() + .await + .map_err(|e| Error::BlobSyncFailed(partition.into(), hex(name), e))?; + (app_version, 0) + } else { + // Existing blob - read and validate header + let mut header_bytes = [0u8; Header::SIZE]; + file.read_exact(&mut header_bytes) + .await + .map_err(|_| Error::ReadFailed)?; + let header = Header::from_bytes(header_bytes); + header.validate(&versions)?; + + (header.application_version, len - Header::SIZE_U64) + }; + #[cfg(unix)] { // Convert to a blocking std::fs::File let file = file.into_std().await; // Construct the blob - Ok((Self::Blob::new(partition.into(), name, file), len)) + Ok(( + Self::Blob::new(partition.into(), name, file), + logical_size, + app_version, + )) } #[cfg(not(unix))] { // Construct the blob - Ok((Self::Blob::new(partition.into(), name, file), len)) + Ok(( + Self::Blob::new(partition.into(), name, file), + logical_size, + app_version, + )) } } @@ -204,10 +249,13 @@ impl crate::Storage for Storage { #[cfg(test)] mod tests { use super::*; - use crate::storage::tests::run_storage_tests; + use crate::{storage::tests::run_storage_tests, Blob, Header, Storage as _}; use rand::{Rng as _, SeedableRng}; use std::env; + /// Default version range for tests + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + #[tokio::test] async fn test_storage() { let mut rng = rand::rngs::StdRng::from_entropy(); @@ -216,4 +264,176 @@ mod tests { let storage = Storage::new(config); run_storage_tests(storage).await; } + + #[tokio::test] + async fn test_blob_header_handling() { + let mut rng = rand::rngs::StdRng::from_entropy(); + let storage_directory = + env::temp_dir().join(format!("storage_tokio_header_{}", rng.gen::())); + let config = Config::new(storage_directory.clone(), 2 * 1024 * 1024); + let storage = Storage::new(config); + + // Test 1: New blob returns logical size 0 and correct app version + let (blob, size, app_version) = storage + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); + assert_eq!(size, 0, "new blob should have logical size 0"); + assert_eq!( + app_version, + *TEST_VERSIONS.end(), + "new blob should have app version 0" + ); + + // Verify raw file has 8 bytes (header only) + let file_path = storage_directory.join("partition").join(hex(b"test")); + let metadata = std::fs::metadata(&file_path).unwrap(); + assert_eq!( + metadata.len(), + Header::SIZE_U64, + "raw file should have 8-byte header" + ); + + // Test 2: Logical offset handling - write at offset 0 stores at raw offset 8 + let data = b"hello world"; + blob.write_at(data.to_vec(), 0).await.unwrap(); + blob.sync().await.unwrap(); + + // Verify raw file size + let metadata = std::fs::metadata(&file_path).unwrap(); + assert_eq!(metadata.len(), Header::SIZE_U64 + data.len() as u64); + + // Verify raw file layout + let raw_content = std::fs::read(&file_path).unwrap(); + assert_eq!(&raw_content[..Header::MAGIC_LENGTH], &Header::MAGIC); + // Header version (bytes 4-5) and App version (bytes 6-7) + assert_eq!( + &raw_content[Header::MAGIC_LENGTH..Header::MAGIC_LENGTH + Header::VERSION_LENGTH], + &Header::HEADER_VERSION.to_be_bytes() + ); + // Data should start at offset 8 + assert_eq!(&raw_content[Header::SIZE..], data); + + // Test 3: Read at logical offset 0 returns data from raw offset 8 + let read_buf = blob.read_at(vec![0u8; data.len()], 0).await.unwrap(); + assert_eq!(read_buf.as_ref(), data); + + // Test 4: Resize with logical length + blob.resize(5).await.unwrap(); + blob.sync().await.unwrap(); + let metadata = std::fs::metadata(&file_path).unwrap(); + assert_eq!( + metadata.len(), + Header::SIZE_U64 + 5, + "resize(5) should result in 13 raw bytes" + ); + + // resize(0) should leave only header + blob.resize(0).await.unwrap(); + blob.sync().await.unwrap(); + let metadata = std::fs::metadata(&file_path).unwrap(); + assert_eq!( + metadata.len(), + Header::SIZE_U64, + "resize(0) should leave only header" + ); + + // Test 5: Reopen existing blob preserves header and returns correct logical size + blob.write_at(b"test data".to_vec(), 0).await.unwrap(); + blob.sync().await.unwrap(); + drop(blob); + + let (blob2, size2, app_version2) = storage + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); + assert_eq!(size2, 9, "reopened blob should have logical size 9"); + assert_eq!(app_version2, *TEST_VERSIONS.end()); + let read_buf = blob2.read_at(vec![0u8; 9], 0).await.unwrap(); + assert_eq!(read_buf.as_ref(), b"test data"); + drop(blob2); + + // Test 6: Corrupted blob recovery (0 < raw_size < 8) + // Manually create a corrupted file with only 4 bytes + let corrupted_path = storage_directory.join("partition").join(hex(b"corrupted")); + std::fs::write(&corrupted_path, vec![0u8; 4]).unwrap(); + + // Opening should truncate and write fresh header + let (blob3, size3, app_version3) = storage + .open("partition", b"corrupted", TEST_VERSIONS) + .await + .unwrap(); + assert_eq!(size3, 0, "corrupted blob should return logical size 0"); + assert_eq!(app_version3, *TEST_VERSIONS.end()); + + // Verify raw file now has proper 8-byte header + let metadata = std::fs::metadata(&corrupted_path).unwrap(); + assert_eq!( + metadata.len(), + Header::SIZE_U64, + "corrupted blob should be reset to header-only" + ); + + // Cleanup + drop(blob3); + let _ = std::fs::remove_dir_all(&storage_directory); + } + + #[tokio::test] + async fn test_blob_magic_mismatch() { + let storage_directory = + env::temp_dir().join(format!("test_magic_mismatch_{}", rand::random::())); + let storage = Storage::new(Config { + storage_directory: storage_directory.clone(), + maximum_buffer_size: 1024 * 1024, + }); + + // Create the partition directory + let partition_path = storage_directory.join("partition"); + std::fs::create_dir_all(&partition_path).unwrap(); + + // Manually create a file with invalid magic bytes + let bad_magic_path = partition_path.join(hex(b"bad_magic")); + std::fs::write(&bad_magic_path, vec![0u8; Header::SIZE]).unwrap(); + + // Opening should fail with magic mismatch error + let result = storage.open("partition", b"bad_magic", TEST_VERSIONS).await; + match result { + Err(crate::Error::BlobMagicMismatch { found }) => { + assert_eq!(found, [0u8; Header::MAGIC_LENGTH]); + } + Err(err) => panic!("expected BlobMagicMismatch error, got: {:?}", err), + Ok(_) => panic!("expected error, got Ok"), + } + + let _ = std::fs::remove_dir_all(&storage_directory); + } + + #[tokio::test] + async fn test_blob_version_mismatch() { + let mut rng = rand::rngs::StdRng::from_entropy(); + let storage_directory = + env::temp_dir().join(format!("storage_tokio_version_{}", rng.gen::())); + let config = Config::new(storage_directory.clone(), 2 * 1024 * 1024); + let storage = Storage::new(config); + + // Create blob with version 1 + storage.open("partition", b"v1", 1..=1).await.unwrap(); + + // Try to open with version range 2..=2 + let result = storage.open("partition", b"v1", 2..=2).await; + match result { + Err(crate::Error::BlobApplicationVersionMismatch { expected, found }) => { + assert_eq!(expected, 2..=2); + assert_eq!(found, 1); + } + Err(err) => panic!( + "expected BlobApplicationVersionMismatch error, got: {:?}", + err + ), + Ok(_) => panic!("expected error, got Ok"), + } + + let _ = std::fs::remove_dir_all(&storage_directory); + } } diff --git a/runtime/src/storage/tokio/unix.rs b/runtime/src/storage/tokio/unix.rs index 422d5685ea..73ee686b09 100644 --- a/runtime/src/storage/tokio/unix.rs +++ b/runtime/src/storage/tokio/unix.rs @@ -1,4 +1,4 @@ -use crate::Error; +use crate::{Error, Header}; use commonware_utils::{hex, StableBuf}; use std::{fs::File, os::unix::fs::FileExt, sync::Arc}; use tokio::task; @@ -28,6 +28,9 @@ impl crate::Blob for Blob { ) -> Result { let mut buf = buf.into(); let file = self.file.clone(); + let offset = offset + .checked_add(Header::SIZE_U64) + .ok_or(Error::OffsetOverflow)?; task::spawn_blocking(move || { file.read_exact_at(buf.as_mut(), offset)?; Ok(buf) @@ -39,6 +42,9 @@ impl crate::Blob for Blob { async fn write_at(&self, buf: impl Into + Send, offset: u64) -> Result<(), Error> { let buf = buf.into(); let file = self.file.clone(); + let offset = offset + .checked_add(Header::SIZE_U64) + .ok_or(Error::OffsetOverflow)?; task::spawn_blocking(move || { file.write_all_at(buf.as_ref(), offset)?; Ok(()) @@ -49,6 +55,9 @@ impl crate::Blob for Blob { async fn resize(&self, len: u64) -> Result<(), Error> { let file = self.file.clone(); + let len = len + .checked_add(Header::SIZE_U64) + .ok_or(Error::OffsetOverflow)?; task::spawn_blocking(move || file.set_len(len)) .await .map_err(|e| e.into()) diff --git a/runtime/src/tokio/runtime.rs b/runtime/src/tokio/runtime.rs index 51085606fa..1d754127be 100644 --- a/runtime/src/tokio/runtime.rs +++ b/runtime/src/tokio/runtime.rs @@ -663,8 +663,13 @@ impl CryptoRng for Context {} impl crate::Storage for Context { type Blob = ::Blob; - async fn open(&self, partition: &str, name: &[u8]) -> Result<(Self::Blob, u64), Error> { - self.storage.open(partition, name).await + async fn open( + &self, + partition: &str, + name: &[u8], + versions: std::ops::RangeInclusive, + ) -> Result<(Self::Blob, u64, u16), Error> { + self.storage.open(partition, name, versions).await } async fn remove(&self, partition: &str, name: Option<&[u8]>) -> Result<(), Error> { diff --git a/runtime/src/utils/buffer/append.rs b/runtime/src/utils/buffer/append.rs index 27b35d7e17..c048d1f44d 100644 --- a/runtime/src/utils/buffer/append.rs +++ b/runtime/src/utils/buffer/append.rs @@ -329,6 +329,9 @@ mod tests { const PAGE_SIZE: usize = 1024; const BUFFER_SIZE: usize = PAGE_SIZE * 2; + /// Default version range for tests + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + #[test_traced] #[should_panic(expected = "not implemented")] fn test_append_blob_write_panics() { @@ -336,8 +339,8 @@ mod tests { let executor = deterministic::Runner::default(); // Start the test within the executor executor.start(|context| async move { - let (blob, size) = context - .open("test", "blob".as_bytes()) + let (blob, size, _) = context + .open("test", "blob".as_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); let pool_ref = PoolRef::new(NZUsize!(PAGE_SIZE), NZUsize!(10)); @@ -355,8 +358,8 @@ mod tests { let executor = deterministic::Runner::default(); // Start the test within the executor executor.start(|context| async move { - let (blob, size) = context - .open("test", "blob".as_bytes()) + let (blob, size, _) = context + .open("test", "blob".as_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); assert_eq!(size, 0); @@ -375,8 +378,8 @@ mod tests { blob.sync().await.expect("Failed to sync blob"); // Make sure blob has expected size when reopened. - let (blob, size) = context - .open("test", "blob".as_bytes()) + let (blob, size, _) = context + .open("test", "blob".as_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); assert_eq!(size, 11 * PAGE_SIZE as u64); @@ -390,8 +393,8 @@ mod tests { let executor = deterministic::Runner::default(); // Start the test within the executor executor.start(|context| async move { - let (blob, size) = context - .open("test", "blob".as_bytes()) + let (blob, size, _) = context + .open("test", "blob".as_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); assert_eq!(size, 0); @@ -485,8 +488,8 @@ mod tests { fn test_append_blob_tracks_physical_size() { let executor = deterministic::Runner::default(); executor.start(|context| async move { - let (blob, size) = context - .open("test", "blob".as_bytes()) + let (blob, size, _) = context + .open("test", "blob".as_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); @@ -524,8 +527,8 @@ mod tests { assert_eq!(blob.buffer.read().await.1, 250); // Close and reopen. - let (blob, size) = context - .open("test", "blob".as_bytes()) + let (blob, size, _) = context + .open("test", "blob".as_bytes(), TEST_VERSIONS) .await .expect("Failed to reopen blob"); diff --git a/runtime/src/utils/buffer/mod.rs b/runtime/src/utils/buffer/mod.rs index 87083648b8..4700190f0a 100644 --- a/runtime/src/utils/buffer/mod.rs +++ b/runtime/src/utils/buffer/mod.rs @@ -18,13 +18,19 @@ mod tests { use commonware_macros::test_traced; use commonware_utils::NZUsize; + /// Default version range for tests + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + #[test_traced] fn test_read_basic() { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test basic buffered reading functionality with sequential reads let data = b"Hello, world! This is a test."; - let (blob, size) = context.open("partition", b"test").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -63,7 +69,10 @@ mod tests { executor.start(|context| async move { // Test reading data that spans multiple buffer refills let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - let (blob, size) = context.open("partition", b"test").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -96,7 +105,10 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - let (blob, size) = context.open("partition", b"test").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -130,7 +142,10 @@ mod tests { executor.start(|context| async move { // Test reader behavior with known blob size limits let data = b"This is a test with known size limitations."; - let (blob, size) = context.open("partition", b"test").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -174,7 +189,10 @@ mod tests { // Test reading large amounts of data in chunks let data_size = 1024 * 256; // 256KB of data let data = vec![0x42; data_size]; - let (blob, size) = context.open("partition", b"test").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0); blob.write_at(data.clone(), 0).await.unwrap(); let size = data.len() as u64; @@ -222,7 +240,10 @@ mod tests { let data_size = buffer_size * 5 / 2; // 2.5 buffers let data = vec![0x37; data_size]; - let (blob, size) = context.open("partition", b"test").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0); blob.write_at(data.clone(), 0).await.unwrap(); let size = data.len() as u64; @@ -257,7 +278,10 @@ mod tests { executor.start(|context| async move { // Create a memory blob with some test data let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - let (blob, size) = context.open("partition", b"test").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -309,7 +333,10 @@ mod tests { executor.start(|context| async move { // Create a memory blob with longer data let data = vec![0x41; 1000]; // 1000 'A' characters - let (blob, size) = context.open("partition", b"test").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0); blob.write_at(data.clone(), 0).await.unwrap(); let size = data.len() as u64; @@ -346,7 +373,10 @@ mod tests { executor.start(|context| async move { // Create a memory blob with some test data let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - let (blob, size) = context.open("partition", b"test").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let data_len = data.len() as u64; @@ -359,7 +389,10 @@ mod tests { reader.resize(resize_len).await.unwrap(); // Reopen to check truncation - let (blob, size) = context.open("partition", b"test").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, resize_len, "Blob should be resized to half size"); // Create a new buffer and read to verify truncation @@ -382,7 +415,10 @@ mod tests { new_reader.resize(data_len * 2).await.unwrap(); // Reopen to check resize - let (blob, new_size) = context.open("partition", b"test").await.unwrap(); + let (blob, new_size, _) = context + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(new_size, data_len * 2); // Create a new buffer and read to verify resize @@ -407,7 +443,10 @@ mod tests { // Create a memory blob with some test data let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; let data_len = data.len() as u64; - let (blob, size) = context.open("partition", b"test").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); @@ -418,7 +457,10 @@ mod tests { reader.resize(0).await.unwrap(); // Reopen to check truncation - let (blob, size) = context.open("partition", b"test").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"test", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0, "Blob should be resized to zero"); // Create a new buffer and try to read (should fail) @@ -436,7 +478,10 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test basic buffered write and sync functionality - let (blob, size) = context.open("partition", b"write_basic").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"write_basic", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0); let writer = Write::new(blob.clone(), size, NZUsize!(8)); @@ -446,7 +491,10 @@ mod tests { assert_eq!(writer.size().await, 5); // Verify data was written correctly - let (blob, size) = context.open("partition", b"write_basic").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"write_basic", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 5); let mut reader = Read::new(blob, size, NZUsize!(8)); let mut buf = [0u8; 5]; @@ -460,7 +508,10 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writes that cause buffer flushes due to capacity limits - let (blob, size) = context.open("partition", b"write_multi").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"write_multi", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0); let writer = Write::new(blob.clone(), size, NZUsize!(4)); @@ -471,7 +522,10 @@ mod tests { writer.sync().await.unwrap(); // Verify the final result - let (blob, size) = context.open("partition", b"write_multi").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"write_multi", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 7); let mut reader = Read::new(blob, size, NZUsize!(4)); let mut buf = [0u8; 7]; @@ -485,7 +539,10 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writing data larger than buffer capacity (direct write) - let (blob, size) = context.open("partition", b"write_large").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"write_large", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 0); let writer = Write::new(blob.clone(), size, NZUsize!(4)); @@ -500,7 +557,10 @@ mod tests { assert_eq!(writer.size().await, 26); // Verify the complete data - let (blob, size) = context.open("partition", b"write_large").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"write_large", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 26); let mut reader = Read::new(blob, size, NZUsize!(4)); let mut buf = [0u8; 26]; @@ -514,7 +574,10 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test sequential appends that exceed buffer capacity - let (blob, size) = context.open("partition", b"append_buf").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"append_buf", TEST_VERSIONS) + .await + .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); // Write data that fits in buffer @@ -527,7 +590,10 @@ mod tests { assert_eq!(writer.size().await, 11); // Verify the complete result - let (blob, size) = context.open("partition", b"append_buf").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"append_buf", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 11); let mut reader = Read::new(blob, size, NZUsize!(10)); let mut buf = vec![0u8; 11]; @@ -541,7 +607,10 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test overwriting data within the buffer and extending it - let (blob, size) = context.open("partition", b"middle_buf").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"middle_buf", TEST_VERSIONS) + .await + .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(20)); // Initial write @@ -554,7 +623,10 @@ mod tests { writer.sync().await.unwrap(); // Verify overwrite result - let (blob, size) = context.open("partition", b"middle_buf").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"middle_buf", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 10); let mut reader = Read::new(blob, size, NZUsize!(10)); let mut buf = vec![0u8; 10]; @@ -569,7 +641,10 @@ mod tests { writer.sync().await.unwrap(); // Verify final result - let (blob, size) = context.open("partition", b"middle_buf").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"middle_buf", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 20); let mut reader = Read::new(blob, size, NZUsize!(20)); let mut buf = vec![0u8; 20]; @@ -583,7 +658,10 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writing at offsets before the current buffer position - let (blob, size) = context.open("partition", b"before_buf").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"before_buf", TEST_VERSIONS) + .await + .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); // Write data at a later offset first @@ -596,7 +674,10 @@ mod tests { writer.sync().await.unwrap(); // Verify data placement with gap - let (blob, size) = context.open("partition", b"before_buf").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"before_buf", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 20); let mut reader = Read::new(blob, size, NZUsize!(20)); let mut buf = vec![0u8; 20]; @@ -613,7 +694,10 @@ mod tests { assert_eq!(writer.size().await, 20); // Verify gap is filled - let (blob, size) = context.open("partition", b"before_buf").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"before_buf", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 20); let mut reader = Read::new(blob, size, NZUsize!(20)); let mut buf = vec![0u8; 20]; @@ -628,7 +712,10 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test blob resize functionality and subsequent writes - let (blob, size) = context.open("partition", b"resize_write").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"resize_write", TEST_VERSIONS) + .await + .unwrap(); let writer = Write::new(blob, size, NZUsize!(10)); // Write initial data @@ -637,8 +724,10 @@ mod tests { writer.sync().await.unwrap(); assert_eq!(writer.size().await, 11); - let (blob_check, size_check) = - context.open("partition", b"resize_write").await.unwrap(); + let (blob_check, size_check, _) = context + .open("partition", b"resize_write", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size_check, 11); drop(blob_check); @@ -648,7 +737,10 @@ mod tests { writer.sync().await.unwrap(); // Verify resize - let (blob, size) = context.open("partition", b"resize_write").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"resize_write", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 5); let mut reader = Read::new(blob, size, NZUsize!(5)); let mut buf = vec![0u8; 5]; @@ -661,7 +753,10 @@ mod tests { writer.sync().await.unwrap(); // Verify overwrite - let (blob, size) = context.open("partition", b"resize_write").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"resize_write", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 5); let mut reader = Read::new(blob, size, NZUsize!(5)); let mut buf = vec![0u8; 5]; @@ -674,7 +769,10 @@ mod tests { writer.sync().await.unwrap(); // Verify resize - let (blob, size) = context.open("partition", b"resize_write").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"resize_write", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size, 10); let mut reader = Read::new(blob, size, NZUsize!(10)); let mut buf = vec![0u8; 10]; @@ -683,7 +781,10 @@ mod tests { assert_eq!(&buf[5..10], [0u8; 5]); // Test resize to zero - let (blob_zero, size) = context.open("partition", b"resize_zero").await.unwrap(); + let (blob_zero, size, _) = context + .open("partition", b"resize_zero", TEST_VERSIONS) + .await + .unwrap(); let writer_zero = Write::new(blob_zero.clone(), size, NZUsize!(10)); writer_zero .write_at(b"some data".to_vec(), 0) @@ -698,7 +799,10 @@ mod tests { assert_eq!(writer_zero.size().await, 0); // Ensure the blob is empty - let (_, size_z) = context.open("partition", b"resize_zero").await.unwrap(); + let (_, size_z, _) = context + .open("partition", b"resize_zero", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size_z, 0); }); } @@ -708,7 +812,10 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test reading through writer's read_at method (buffer + blob reads) - let (blob, size) = context.open("partition", b"read_at_writer").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"read_at_writer", TEST_VERSIONS) + .await + .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); // Write data that stays in buffer @@ -759,8 +866,10 @@ mod tests { // Verify complete content by reopening writer.sync().await.unwrap(); assert_eq!(writer.size().await, 30); - let (final_blob, final_size) = - context.open("partition", b"read_at_writer").await.unwrap(); + let (final_blob, final_size, _) = context + .open("partition", b"read_at_writer", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(final_size, 30); let mut final_reader = Read::new(final_blob, final_size, NZUsize!(30)); let mut full_content = vec![0u8; 30]; @@ -777,7 +886,10 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writes that cannot be merged into buffer (non-contiguous/too large) - let (blob, size) = context.open("partition", b"write_straddle").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"write_straddle", TEST_VERSIONS) + .await + .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); // Fill buffer completely @@ -791,8 +903,10 @@ mod tests { assert_eq!(writer.size().await, 18); // Verify data with gap - let (blob_check, size_check) = - context.open("partition", b"write_straddle").await.unwrap(); + let (blob_check, size_check, _) = context + .open("partition", b"write_straddle", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size_check, 18); let mut reader = Read::new(blob_check, size_check, NZUsize!(20)); let mut buf = vec![0u8; 18]; @@ -804,7 +918,10 @@ mod tests { assert_eq!(buf, expected); // Test write that exceeds buffer capacity - let (blob2, size) = context.open("partition", b"write_straddle2").await.unwrap(); + let (blob2, size, _) = context + .open("partition", b"write_straddle2", TEST_VERSIONS) + .await + .unwrap(); let writer2 = Write::new(blob2.clone(), size, NZUsize!(10)); writer2.write_at(b"0123456789".to_vec(), 0).await.unwrap(); assert_eq!(writer2.size().await, 10); @@ -816,8 +933,10 @@ mod tests { assert_eq!(writer2.size().await, 17); // Verify overwrite result - let (blob_check2, size_check2) = - context.open("partition", b"write_straddle2").await.unwrap(); + let (blob_check2, size_check2, _) = context + .open("partition", b"write_straddle2", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size_check2, 17); let mut reader2 = Read::new(blob_check2, size_check2, NZUsize!(20)); let mut buf2 = vec![0u8; 17]; @@ -831,7 +950,10 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test that closing writer flushes and persists buffered data - let (blob_orig, size) = context.open("partition", b"write_close").await.unwrap(); + let (blob_orig, size, _) = context + .open("partition", b"write_close", TEST_VERSIONS) + .await + .unwrap(); let writer = Write::new(blob_orig.clone(), size, NZUsize!(8)); writer.write_at(b"pending".to_vec(), 0).await.unwrap(); assert_eq!(writer.size().await, 7); @@ -840,7 +962,10 @@ mod tests { writer.sync().await.unwrap(); // Verify data persistence - let (blob_check, size_check) = context.open("partition", b"write_close").await.unwrap(); + let (blob_check, size_check, _) = context + .open("partition", b"write_close", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size_check, 7); let mut reader = Read::new(blob_check, size_check, NZUsize!(8)); let mut buf = [0u8; 7]; @@ -854,8 +979,8 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test direct writes when data exceeds buffer capacity - let (blob, size) = context - .open("partition", b"write_direct_size") + let (blob, size, _) = context + .open("partition", b"write_direct_size", TEST_VERSIONS) .await .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(5)); @@ -869,8 +994,8 @@ mod tests { writer.sync().await.unwrap(); // Verify direct write worked - let (blob_check, size_check) = context - .open("partition", b"write_direct_size") + let (blob_check, size_check, _) = context + .open("partition", b"write_direct_size", TEST_VERSIONS) .await .unwrap(); assert_eq!(size_check, 10); @@ -891,8 +1016,8 @@ mod tests { writer.sync().await.unwrap(); // Verify final state - let (blob_check2, size_check2) = context - .open("partition", b"write_direct_size") + let (blob_check2, size_check2, _) = context + .open("partition", b"write_direct_size", TEST_VERSIONS) .await .unwrap(); assert_eq!(size_check2, 13); @@ -908,8 +1033,8 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test complex buffer operations: overwrite and extend within capacity - let (blob, size) = context - .open("partition", b"overwrite_extend_buf") + let (blob, size, _) = context + .open("partition", b"overwrite_extend_buf", TEST_VERSIONS) .await .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(15)); @@ -930,8 +1055,8 @@ mod tests { writer.sync().await.unwrap(); // Verify persisted result - let (blob_check, size_check) = context - .open("partition", b"overwrite_extend_buf") + let (blob_check, size_check, _) = context + .open("partition", b"overwrite_extend_buf", TEST_VERSIONS) .await .unwrap(); assert_eq!(size_check, 15); @@ -947,7 +1072,10 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writing at the current logical end of the blob - let (blob, size) = context.open("partition", b"write_end").await.unwrap(); + let (blob, size, _) = context + .open("partition", b"write_end", TEST_VERSIONS) + .await + .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(20)); // Write initial data @@ -964,7 +1092,10 @@ mod tests { writer.sync().await.unwrap(); // Verify complete result - let (blob_check, size_check) = context.open("partition", b"write_end").await.unwrap(); + let (blob_check, size_check, _) = context + .open("partition", b"write_end", TEST_VERSIONS) + .await + .unwrap(); assert_eq!(size_check, 13); let mut reader = Read::new(blob_check, size_check, NZUsize!(13)); let mut buf = vec![0u8; 13]; @@ -978,8 +1109,12 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test multiple appends using writer.size() - let (blob, size) = context - .open("partition", b"write_multiple_appends_at_size") + let (blob, size, _) = context + .open( + "partition", + b"write_multiple_appends_at_size", + TEST_VERSIONS, + ) .await .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(5)); // Small buffer @@ -1009,8 +1144,12 @@ mod tests { assert_eq!(writer.size().await, 9); // Verify final content - let (blob_check, size_check) = context - .open("partition", b"write_multiple_appends_at_size") + let (blob_check, size_check, _) = context + .open( + "partition", + b"write_multiple_appends_at_size", + TEST_VERSIONS, + ) .await .unwrap(); assert_eq!(size_check, 9); @@ -1026,8 +1165,12 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writing non-contiguously, then appending at the new size - let (blob, size) = context - .open("partition", b"write_non_contiguous_then_append") + let (blob, size, _) = context + .open( + "partition", + b"write_non_contiguous_then_append", + TEST_VERSIONS, + ) .await .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); @@ -1053,8 +1196,12 @@ mod tests { assert_eq!(writer.size().await, 35); // Verify final content - let (blob_check, size_check) = context - .open("partition", b"write_non_contiguous_then_append") + let (blob_check, size_check, _) = context + .open( + "partition", + b"write_non_contiguous_then_append", + TEST_VERSIONS, + ) .await .unwrap(); assert_eq!(size_check, 35); @@ -1075,8 +1222,8 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test truncating, then appending at the new size - let (blob, size) = context - .open("partition", b"resize_then_append_at_size") + let (blob, size, _) = context + .open("partition", b"resize_then_append_at_size", TEST_VERSIONS) .await .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); @@ -1110,8 +1257,8 @@ mod tests { assert_eq!(writer.size().await, 10); // Verify final content - let (blob_check, size_check) = context - .open("partition", b"resize_then_append_at_size") + let (blob_check, size_check, _) = context + .open("partition", b"resize_then_append_at_size", TEST_VERSIONS) .await .unwrap(); assert_eq!(size_check, 10); diff --git a/runtime/src/utils/buffer/pool.rs b/runtime/src/utils/buffer/pool.rs index 7ab0381b24..7ae12eac28 100644 --- a/runtime/src/utils/buffer/pool.rs +++ b/runtime/src/utils/buffer/pool.rs @@ -407,6 +407,9 @@ mod tests { const PAGE_SIZE: usize = 1024; + /// Default version range for tests + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + #[test_traced] fn test_pool_basic() { let mut pool: Pool = Pool::new(10); @@ -455,8 +458,8 @@ mod tests { // Start the test within the executor executor.start(|context| async move { // Populate a blob with 11 consecutive pages of data. - let (blob, size) = context - .open("test", "blob".as_bytes()) + let (blob, size, _) = context + .open("test", "blob".as_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); assert_eq!(size, 0); diff --git a/runtime/src/utils/buffer/read.rs b/runtime/src/utils/buffer/read.rs index fc91de1299..55b51879cd 100644 --- a/runtime/src/utils/buffer/read.rs +++ b/runtime/src/utils/buffer/read.rs @@ -11,10 +11,13 @@ use std::num::NonZeroUsize; /// use commonware_utils::NZUsize; /// use commonware_runtime::{Runner, buffer::Read, Blob, Error, Storage, deterministic}; /// +/// const BLOB_VERSION: u16 = 0; +/// /// let executor = deterministic::Runner::default(); /// executor.start(|context| async move { /// // Open a blob and add some data (e.g., a journal file) -/// let (blob, size) = context.open("my_partition", b"my_data").await.expect("unable to open blob"); +/// let (blob, size, blob_version) = context.open("my_partition", b"my_data", BLOB_VERSION..=BLOB_VERSION).await.expect("unable to open blob"); +/// assert_eq!(blob_version, BLOB_VERSION); /// let data = b"Hello, world! This is a test.".to_vec(); /// let size = data.len() as u64; /// blob.write_at(data, 0).await.expect("unable to write data"); diff --git a/runtime/src/utils/buffer/write.rs b/runtime/src/utils/buffer/write.rs index e18189f3c2..a9e7fd9c70 100644 --- a/runtime/src/utils/buffer/write.rs +++ b/runtime/src/utils/buffer/write.rs @@ -14,7 +14,7 @@ use std::{num::NonZeroUsize, sync::Arc}; /// let executor = deterministic::Runner::default(); /// executor.start(|context| async move { /// // Open a blob for writing -/// let (blob, size) = context.open("my_partition", b"my_data").await.expect("unable to open blob"); +/// let (blob, size, _) = context.open("my_partition", b"my_data", 0..=0).await.expect("unable to open blob"); /// assert_eq!(size, 0); /// /// // Create a buffered writer with 16-byte buffer @@ -28,7 +28,7 @@ use std::{num::NonZeroUsize, sync::Arc}; /// blob.sync().await.expect("sync failed"); /// /// // Read back the data to verify -/// let (blob, size) = context.open("my_partition", b"my_data").await.expect("unable to reopen blob"); +/// let (blob, size, _) = context.open("my_partition", b"my_data", 0..=0).await.expect("unable to reopen blob"); /// let mut reader = Read::new(blob, size, NZUsize!(8)); /// let mut buf = vec![0u8; size as usize]; /// reader.read_exact(&mut buf, size as usize).await.expect("read failed"); diff --git a/runtime/src/utils/cell.rs b/runtime/src/utils/cell.rs index b55fcd7e8c..3810f81cfc 100644 --- a/runtime/src/utils/cell.rs +++ b/runtime/src/utils/cell.rs @@ -5,6 +5,7 @@ use rand::{CryptoRng, RngCore}; use std::{ future::Future, net::SocketAddr, + ops::RangeInclusive, time::{Duration, SystemTime}, }; @@ -222,8 +223,9 @@ where &self, partition: &str, name: &[u8], - ) -> impl Future> + Send { - self.as_present().open(partition, name) + versions: RangeInclusive, + ) -> impl Future> + Send { + self.as_present().open(partition, name, versions) } fn remove( diff --git a/storage/src/archive/prunable/mod.rs b/storage/src/archive/prunable/mod.rs index dcbb79fe6f..c111ebeb9d 100644 --- a/storage/src/archive/prunable/mod.rs +++ b/storage/src/archive/prunable/mod.rs @@ -198,6 +198,7 @@ mod tests { const DEFAULT_REPLAY_BUFFER: usize = 4096; const PAGE_SIZE: NonZeroUsize = NZUsize!(1024); const PAGE_CACHE_SIZE: NonZeroUsize = NZUsize!(10); + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; fn test_key(key: &str) -> FixedBytes<64> { let mut buf = [0u8; 64]; @@ -295,8 +296,8 @@ mod tests { // Corrupt the value let section = (index / DEFAULT_ITEMS_PER_SECTION) * DEFAULT_ITEMS_PER_SECTION; - let (blob, _) = context - .open("test_partition", §ion.to_be_bytes()) + let (blob, _, _) = context + .open("test_partition", §ion.to_be_bytes(), TEST_VERSIONS) .await .unwrap(); let value_location = 4 /* journal size */ + UInt(1u64).encode_size() as u64 /* index */ + 64 + 4 /* value length */; diff --git a/storage/src/cache/mod.rs b/storage/src/cache/mod.rs index 0c4f170707..ca2280ae18 100644 --- a/storage/src/cache/mod.rs +++ b/storage/src/cache/mod.rs @@ -138,6 +138,7 @@ mod tests { const DEFAULT_REPLAY_BUFFER: usize = 4096; const PAGE_SIZE: NonZeroUsize = NZUsize!(1024); const PAGE_CACHE_SIZE: NonZeroUsize = NZUsize!(10); + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; #[test_traced] fn test_cache_compression_then_none() { @@ -219,8 +220,8 @@ mod tests { // Corrupt the value let section = (index / DEFAULT_ITEMS_PER_BLOB) * DEFAULT_ITEMS_PER_BLOB; - let (blob, _) = context - .open("test_partition", §ion.to_be_bytes()) + let (blob, _, _) = context + .open("test_partition", §ion.to_be_bytes(), TEST_VERSIONS) .await .unwrap(); let value_location = 4 /* journal size */ + UInt(1u64).encode_size() as u64 /* index */ + 4 /* value length */; diff --git a/storage/src/freezer/mod.rs b/storage/src/freezer/mod.rs index 7dda5392ae..d27e8d9214 100644 --- a/storage/src/freezer/mod.rs +++ b/storage/src/freezer/mod.rs @@ -255,6 +255,7 @@ mod tests { const DEFAULT_TABLE_REPLAY_BUFFER: usize = 64 * 1024; // 64KB const PAGE_SIZE: NonZeroUsize = NZUsize!(1024); const PAGE_CACHE_SIZE: NonZeroUsize = NZUsize!(10); + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; fn test_key(key: &str) -> FixedBytes<64> { let mut buf = [0u8; 64]; @@ -703,7 +704,10 @@ mod tests { // Corrupt the table by writing partial entry { - let (blob, _) = context.open(&cfg.table_partition, b"table").await.unwrap(); + let (blob, _, _) = context + .open(&cfg.table_partition, b"table", TEST_VERSIONS) + .await + .unwrap(); // Write incomplete table entry (only 10 bytes instead of 24) blob.write_at(vec![0xFF; 10], 0).await.unwrap(); blob.sync().await.unwrap(); @@ -763,7 +767,10 @@ mod tests { // Corrupt the CRC in the index entry { - let (blob, _) = context.open(&cfg.table_partition, b"table").await.unwrap(); + let (blob, _, _) = context + .open(&cfg.table_partition, b"table", TEST_VERSIONS) + .await + .unwrap(); // Read the first entry let entry_data = blob.read_at(vec![0u8; 24], 0).await.unwrap(); let mut corrupted = entry_data.as_ref().to_vec(); @@ -827,7 +834,10 @@ mod tests { // Add extra bytes to the table blob { - let (blob, size) = context.open(&cfg.table_partition, b"table").await.unwrap(); + let (blob, size, _) = context + .open(&cfg.table_partition, b"table", TEST_VERSIONS) + .await + .unwrap(); // Append garbage data blob.write_at(hex!("0xdeadbeef").to_vec(), size) .await diff --git a/storage/src/freezer/storage.rs b/storage/src/freezer/storage.rs index 51a0fd74a8..e14f5dd588 100644 --- a/storage/src/freezer/storage.rs +++ b/storage/src/freezer/storage.rs @@ -165,6 +165,9 @@ impl FixedSize for Checkpoint { /// Name of the table blob. const TABLE_BLOB_NAME: &[u8] = b"table"; +/// Current version of the freezer blob format. +const BLOB_VERSION: std::ops::RangeInclusive = 0..=0; + /// Single table entry stored in the table blob. #[derive(Debug, Clone, PartialEq)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] @@ -567,8 +570,8 @@ impl Freezer { let mut journal = Journal::init(context.with_label("journal"), journal_config).await?; // Open table blob - let (table, table_len) = context - .open(&config.table_partition, TABLE_BLOB_NAME) + let (table, table_len, _) = context + .open(&config.table_partition, TABLE_BLOB_NAME, BLOB_VERSION) .await?; // Determine checkpoint based on initialization scenario diff --git a/storage/src/journal/contiguous/fixed.rs b/storage/src/journal/contiguous/fixed.rs index d2b70df875..55746c6fb4 100644 --- a/storage/src/journal/contiguous/fixed.rs +++ b/storage/src/journal/contiguous/fixed.rs @@ -80,6 +80,9 @@ use std::{ }; use tracing::{debug, trace, warn}; +/// Current version of the fixed journal blob format. +pub(crate) const BLOB_VERSION: std::ops::RangeInclusive = 0..=0; + /// Configuration for `Journal` storage. #[derive(Clone)] pub struct Config { @@ -160,8 +163,8 @@ impl> Journal { Err(err) => return Err(Error::Runtime(err)), }; for name in stored_blobs { - let (blob, size) = context - .open(&cfg.partition, &name) + let (blob, size, _) = context + .open(&cfg.partition, &name, BLOB_VERSION) .await .map_err(Error::Runtime)?; let index = match name.try_into() { @@ -190,7 +193,9 @@ impl> Journal { } } else { debug!("no blobs found"); - let (blob, size) = context.open(&cfg.partition, &0u64.to_be_bytes()).await?; + let (blob, size, _) = context + .open(&cfg.partition, &0u64.to_be_bytes(), BLOB_VERSION) + .await?; assert_eq!(size, 0); blobs.insert(0, (blob, size)); } @@ -222,8 +227,8 @@ impl> Journal { ); blobs.insert(tail_index, (tail, tail_size)); tail_index += 1; - (tail, tail_size) = context - .open(&cfg.partition, &tail_index.to_be_bytes()) + (tail, tail_size, _) = context + .open(&cfg.partition, &tail_index.to_be_bytes(), BLOB_VERSION) .await?; assert_eq!(tail_size, 0); tracked.inc(); @@ -355,9 +360,13 @@ impl> Journal { // Create a new empty blob. let next_blob_index = self.tail_index + 1; debug!(blob = next_blob_index, "creating next blob"); - let (next_blob, size) = self + let (next_blob, size, _) = self .context - .open(&self.cfg.partition, &next_blob_index.to_be_bytes()) + .open( + &self.cfg.partition, + &next_blob_index.to_be_bytes(), + BLOB_VERSION, + ) .await?; assert_eq!(size, 0); let next_blob = Append::new( @@ -721,6 +730,8 @@ mod tests { } } + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + #[test_traced] fn test_fixed_journal_append_and_prune() { // Initialize the deterministic context @@ -971,8 +982,8 @@ mod tests { // Corrupt one of the checksums and make sure it's detected. let checksum_offset = Digest::SIZE as u64 + (ITEMS_PER_BLOB.get() / 2) * (Digest::SIZE + u32::SIZE) as u64; - let (blob, _) = context - .open(&cfg.partition, &40u64.to_be_bytes()) + let (blob, _, _) = context + .open(&cfg.partition, &40u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); // Write incorrect checksum @@ -1051,8 +1062,8 @@ mod tests { assert!(buffer.contains("tracked 101")); // Manually truncate a non-tail blob to make sure it's detected during initialization. - let (blob, size) = context - .open(&cfg.partition, &40u64.to_be_bytes()) + let (blob, size, _) = context + .open(&cfg.partition, &40u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); blob.resize(size - 1).await.expect("Failed to corrupt blob"); @@ -1100,8 +1111,8 @@ mod tests { // Truncate the tail blob by one byte, which should result in the 3rd item being // trimmed. - let (blob, size) = context - .open(&cfg.partition, &1u64.to_be_bytes()) + let (blob, size, _) = context + .open(&cfg.partition, &1u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); blob.resize(size - 1).await.expect("Failed to corrupt blob"); @@ -1124,8 +1135,8 @@ mod tests { assert_eq!(journal.size(), item_count - 2); // Corrupt the last item, ensuring last blob is trimmed to empty state. - let (blob, size) = context - .open(&cfg.partition, &1u64.to_be_bytes()) + let (blob, size, _) = context + .open(&cfg.partition, &1u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); blob.resize(size - 1).await.expect("Failed to corrupt blob"); @@ -1236,8 +1247,8 @@ mod tests { drop(journal); // Manually truncate most recent blob to simulate a partial write. - let (blob, size) = context - .open(&cfg.partition, &1u64.to_be_bytes()) + let (blob, size, _) = context + .open(&cfg.partition, &1u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); // truncate the most recent blob by 1 byte which corrupts the most recent item @@ -1294,8 +1305,8 @@ mod tests { drop(journal); // Manually truncate most recent blob to simulate a partial write. - let (blob, size) = context - .open(&cfg.partition, &0u64.to_be_bytes()) + let (blob, size, _) = context + .open(&cfg.partition, &0u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); // Truncate the most recent blob by 1 byte which corrupts the one appended item @@ -1344,8 +1355,8 @@ mod tests { // Manually extend the blob by an amount at least some multiple of the chunk size to // simulate a failure where the file was extended, but no bytes were written due to // failure. - let (blob, size) = context - .open(&cfg.partition, &0u64.to_be_bytes()) + let (blob, size, _) = context + .open(&cfg.partition, &0u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); blob.write_at(vec![0u8; Digest::SIZE * 3 - 1], size) @@ -1519,8 +1530,8 @@ mod tests { drop(journal); // Hash blob contents - let (blob, size) = context - .open(&cfg.partition, &0u64.to_be_bytes()) + let (blob, size, _) = context + .open(&cfg.partition, &0u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); assert!(size > 0); @@ -1534,8 +1545,8 @@ mod tests { "ed2ea67208cde2ee8c16cca5aa4f369f55b1402258c6b7760e5baf134e38944a", ); blob.sync().await.expect("Failed to sync blob"); - let (blob, size) = context - .open(&cfg.partition, &1u64.to_be_bytes()) + let (blob, size, _) = context + .open(&cfg.partition, &1u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); assert!(size > 0); diff --git a/storage/src/journal/segmented/variable.rs b/storage/src/journal/segmented/variable.rs index 9e68b8fef9..8ff40083f9 100644 --- a/storage/src/journal/segmented/variable.rs +++ b/storage/src/journal/segmented/variable.rs @@ -114,6 +114,9 @@ use std::{ use tracing::{debug, trace, warn}; use zstd::{bulk::compress, decode_all}; +/// Current version of the variable journal blob format. +pub(crate) const BLOB_VERSION: std::ops::RangeInclusive = 0..=0; + /// Configuration for `Journal` storage. #[derive(Clone)] pub struct Config { @@ -188,7 +191,7 @@ impl Journal { Err(err) => return Err(Error::Runtime(err)), }; for name in stored_blobs { - let (blob, size) = context.open(&cfg.partition, &name).await?; + let (blob, size, _) = context.open(&cfg.partition, &name, BLOB_VERSION).await?; let hex_name = hex(&name); let section = match name.try_into() { Ok(section) => u64::from_be_bytes(section), @@ -533,7 +536,10 @@ impl Journal { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => { let name = section.to_be_bytes(); - let (blob, size) = self.context.open(&self.cfg.partition, &name).await?; + let (blob, size, _) = self + .context + .open(&self.cfg.partition, &name, BLOB_VERSION) + .await?; let blob = Append::new( blob, size, @@ -1223,8 +1229,8 @@ mod tests { // Manually create a blob with an invalid name (not 8 bytes) let invalid_blob_name = b"invalid"; // Less than 8 bytes - let (blob, _) = context - .open(&cfg.partition, invalid_blob_name) + let (blob, _, _) = context + .open(&cfg.partition, invalid_blob_name, TEST_VERSIONS) .await .expect("Failed to create blob with invalid name"); blob.sync().await.expect("Failed to sync blob"); @@ -1256,8 +1262,8 @@ mod tests { // Manually create a blob with incomplete size data let section = 1u64; let blob_name = section.to_be_bytes(); - let (blob, _) = context - .open(&cfg.partition, &blob_name) + let (blob, _, _) = context + .open(&cfg.partition, &blob_name, TEST_VERSIONS) .await .expect("Failed to create blob"); @@ -1311,8 +1317,8 @@ mod tests { // Manually create a blob with missing item data let section = 1u64; let blob_name = section.to_be_bytes(); - let (blob, _) = context - .open(&cfg.partition, &blob_name) + let (blob, _, _) = context + .open(&cfg.partition, &blob_name, TEST_VERSIONS) .await .expect("Failed to create blob"); @@ -1368,8 +1374,8 @@ mod tests { // Manually create a blob with missing checksum let section = 1u64; let blob_name = section.to_be_bytes(); - let (blob, _) = context - .open(&cfg.partition, &blob_name) + let (blob, _, _) = context + .open(&cfg.partition, &blob_name, TEST_VERSIONS) .await .expect("Failed to create blob"); @@ -1430,8 +1436,8 @@ mod tests { // Manually create a blob with incorrect checksum let section = 1u64; let blob_name = section.to_be_bytes(); - let (blob, _) = context - .open(&cfg.partition, &blob_name) + let (blob, _, _) = context + .open(&cfg.partition, &blob_name, TEST_VERSIONS) .await .expect("Failed to create blob"); @@ -1475,8 +1481,8 @@ mod tests { drop(journal); // Confirm blob is expected length - let (_, blob_size) = context - .open(&cfg.partition, §ion.to_be_bytes()) + let (_, blob_size, _) = context + .open(&cfg.partition, §ion.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); assert_eq!(blob_size, 0); @@ -1522,8 +1528,8 @@ mod tests { drop(journal); // Manually corrupt the end of the second blob - let (blob, blob_size) = context - .open(&cfg.partition, &2u64.to_be_bytes()) + let (blob, blob_size, _) = context + .open(&cfg.partition, &2u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); blob.resize(blob_size - 4) @@ -1565,8 +1571,8 @@ mod tests { // Confirm blob is expected length // entry = 1 (varint for 4) + 4 (data) + 4 (checksum) = 9 bytes // Item 2 ends at position 16 + 9 = 25 - let (_, blob_size) = context - .open(&cfg.partition, &2u64.to_be_bytes()) + let (_, blob_size, _) = context + .open(&cfg.partition, &2u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); assert_eq!(blob_size, 25); @@ -1615,8 +1621,8 @@ mod tests { // Confirm blob is expected length // Items 1 and 2 at positions 0 and 16, item 3 (value 5) at position 32 // Item 3 = 1 (varint) + 4 (data) + 4 (checksum) = 9 bytes, ends at 41 - let (_, blob_size) = context - .open(&cfg.partition, &2u64.to_be_bytes()) + let (_, blob_size, _) = context + .open(&cfg.partition, &2u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); assert_eq!(blob_size, 41); @@ -1694,8 +1700,8 @@ mod tests { drop(journal); // Manually corrupt the end of the second blob - let (blob, blob_size) = context - .open(&cfg.partition, &2u64.to_be_bytes()) + let (blob, blob_size, _) = context + .open(&cfg.partition, &2u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); blob.resize(blob_size - 4) @@ -1747,8 +1753,8 @@ mod tests { // Confirm blob is expected length // entry = 1 (varint for 8) + 8 (u64 data) + 4 (checksum) = 13 bytes // Items at positions 0, 16, 32; item 3 ends at 32 + 13 = 45 - let (_, blob_size) = context - .open(&cfg.partition, &2u64.to_be_bytes()) + let (_, blob_size, _) = context + .open(&cfg.partition, &2u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); assert_eq!(blob_size, 45); @@ -1826,8 +1832,8 @@ mod tests { drop(journal); // Manually add extra data to the end of the second blob - let (blob, blob_size) = context - .open(&cfg.partition, &2u64.to_be_bytes()) + let (blob, blob_size, _) = context + .open(&cfg.partition, &2u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); blob.write_at(vec![0u8; 16], blob_size) @@ -1856,6 +1862,8 @@ mod tests { }); } + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + // Define `MockBlob` that returns an offset length that should overflow #[derive(Clone)] struct MockBlob {} @@ -1895,8 +1903,13 @@ mod tests { impl Storage for MockStorage { type Blob = MockBlob; - async fn open(&self, _partition: &str, _name: &[u8]) -> Result<(MockBlob, u64), RError> { - Ok((MockBlob {}, self.len)) + async fn open( + &self, + _partition: &str, + _name: &[u8], + _versions: std::ops::RangeInclusive, + ) -> Result<(MockBlob, u64, u16), RError> { + Ok((MockBlob {}, self.len, *TEST_VERSIONS.end())) } async fn remove(&self, _partition: &str, _name: Option<&[u8]>) -> Result<(), RError> { @@ -2123,8 +2136,8 @@ mod tests { drop(journal); // Hash blob contents - let (blob, size) = context - .open(&cfg.partition, &1u64.to_be_bytes()) + let (blob, size, _) = context + .open(&cfg.partition, &1u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); assert!(size > 0); diff --git a/storage/src/metadata/mod.rs b/storage/src/metadata/mod.rs index 5b085265ff..3746a39e16 100644 --- a/storage/src/metadata/mod.rs +++ b/storage/src/metadata/mod.rs @@ -94,6 +94,8 @@ mod tests { use commonware_utils::{hex, sequence::U64}; use rand::{Rng, RngCore}; + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; + #[test_traced] fn test_put_get_clear() { // Initialize the deterministic context @@ -315,7 +317,7 @@ mod tests { drop(metadata); // Corrupt the metadata store - let (blob, _) = context.open("test", b"left").await.unwrap(); + let (blob, _, _) = context.open("test", b"left", TEST_VERSIONS).await.unwrap(); blob.write_at(b"corrupted".to_vec(), 0).await.unwrap(); blob.sync().await.unwrap(); @@ -370,10 +372,10 @@ mod tests { drop(metadata); // Corrupt the metadata store - let (blob, _) = context.open("test", b"left").await.unwrap(); + let (blob, _, _) = context.open("test", b"left", TEST_VERSIONS).await.unwrap(); blob.write_at(b"corrupted".to_vec(), 0).await.unwrap(); blob.sync().await.unwrap(); - let (blob, _) = context.open("test", b"right").await.unwrap(); + let (blob, _, _) = context.open("test", b"right", TEST_VERSIONS).await.unwrap(); blob.write_at(b"corrupted".to_vec(), 0).await.unwrap(); blob.sync().await.unwrap(); @@ -432,7 +434,7 @@ mod tests { drop(metadata); // Corrupt the metadata store - let (blob, len) = context.open("test", b"left").await.unwrap(); + let (blob, len, _) = context.open("test", b"left", TEST_VERSIONS).await.unwrap(); blob.resize(len - 8).await.unwrap(); blob.sync().await.unwrap(); @@ -485,7 +487,7 @@ mod tests { drop(metadata); // Corrupt the metadata store - let (blob, _) = context.open("test", b"left").await.unwrap(); + let (blob, _, _) = context.open("test", b"left", TEST_VERSIONS).await.unwrap(); blob.resize(5).await.unwrap(); blob.sync().await.unwrap(); diff --git a/storage/src/metadata/storage.rs b/storage/src/metadata/storage.rs index d4cee20853..7e4d3a4eb9 100644 --- a/storage/src/metadata/storage.rs +++ b/storage/src/metadata/storage.rs @@ -13,6 +13,9 @@ use tracing::{debug, warn}; /// The names of the two blobs that store metadata. const BLOB_NAMES: [&[u8]; 2] = [b"left", b"right"]; +/// Current version of the metadata blob format. +const BLOB_VERSION: std::ops::RangeInclusive = 0..=0; + /// Information about a value in a [Wrapper]. struct Info { start: usize, @@ -79,8 +82,12 @@ impl Metadata { /// Initialize a new [Metadata] instance. pub async fn init(context: E, cfg: Config) -> Result { // Open dedicated blobs - let (left_blob, left_len) = context.open(&cfg.partition, BLOB_NAMES[0]).await?; - let (right_blob, right_len) = context.open(&cfg.partition, BLOB_NAMES[1]).await?; + let (left_blob, left_len, _) = context + .open(&cfg.partition, BLOB_NAMES[0], BLOB_VERSION) + .await?; + let (right_blob, right_len, _) = context + .open(&cfg.partition, BLOB_NAMES[1], BLOB_VERSION) + .await?; // Find latest blob (check which includes a hash of the other) let (left_map, left_wrapper) = diff --git a/storage/src/mmr/journaled.rs b/storage/src/mmr/journaled.rs index 91ab56c051..930c73d3aa 100644 --- a/storage/src/mmr/journaled.rs +++ b/storage/src/mmr/journaled.rs @@ -842,6 +842,7 @@ mod tests { const PAGE_SIZE: usize = 111; const PAGE_CACHE_SIZE: usize = 5; + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; fn test_config() -> Config { Config { @@ -1136,8 +1137,8 @@ mod tests { // 497. Simulate a partial write by corrupting the last parent's checksum by truncating // the last blob by a single byte. let partition: String = "journal_partition".into(); - let (blob, len) = context - .open(&partition, &71u64.to_be_bytes()) + let (blob, len, _) = context + .open(&partition, &71u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); assert_eq!(len, 36); // N+4 = 36 bytes per node, 1 node in the last blob @@ -1169,8 +1170,8 @@ mod tests { .remove(&partition, Some(&71u64.to_be_bytes())) .await .expect("Failed to remove blob"); - let (blob, len) = context - .open(&partition, &70u64.to_be_bytes()) + let (blob, len, _) = context + .open(&partition, &70u64.to_be_bytes(), TEST_VERSIONS) .await .expect("Failed to open blob"); assert_eq!(len, 36 * 7); // this blob should be full. diff --git a/storage/src/ordinal/mod.rs b/storage/src/ordinal/mod.rs index 193dc609c5..3030b9a422 100644 --- a/storage/src/ordinal/mod.rs +++ b/storage/src/ordinal/mod.rs @@ -144,6 +144,7 @@ mod tests { const DEFAULT_ITEMS_PER_BLOB: u64 = 1000; const DEFAULT_WRITE_BUFFER: usize = 4096; const DEFAULT_REPLAY_BUFFER: usize = 1024 * 1024; + const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; #[test_traced] fn test_put_get() { @@ -518,8 +519,8 @@ mod tests { // Corrupt the data { - let (blob, _) = context - .open("test_ordinal", &0u64.to_be_bytes()) + let (blob, _, _) = context + .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) .await .unwrap(); // Corrupt the CRC by changing a byte @@ -645,8 +646,8 @@ mod tests { // Corrupt by writing partial record (only value, no CRC) { - let (blob, _) = context - .open("test_ordinal", &0u64.to_be_bytes()) + let (blob, _, _) = context + .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) .await .unwrap(); // Overwrite second record with partial data (32 bytes instead of 36) @@ -712,8 +713,8 @@ mod tests { // Corrupt the value portion of a record { - let (blob, _) = context - .open("test_ordinal", &0u64.to_be_bytes()) + let (blob, _, _) = context + .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) .await .unwrap(); // Corrupt some bytes in the value of the first record @@ -771,16 +772,16 @@ mod tests { // Corrupt CRCs in different blobs { // Corrupt CRC in first blob - let (blob, _) = context - .open("test_ordinal", &0u64.to_be_bytes()) + let (blob, _, _) = context + .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) .await .unwrap(); blob.write_at(vec![0xFF], 32).await.unwrap(); // Corrupt CRC of index 0 blob.sync().await.unwrap(); // Corrupt value in second blob (which will invalidate CRC) - let (blob, _) = context - .open("test_ordinal", &1u64.to_be_bytes()) + let (blob, _, _) = context + .open("test_ordinal", &1u64.to_be_bytes(), TEST_VERSIONS) .await .unwrap(); blob.write_at(vec![0xFF; 4], 5).await.unwrap(); // Corrupt value of index 10 @@ -843,8 +844,8 @@ mod tests { // Add extra bytes at the end of blob { - let (blob, size) = context - .open("test_ordinal", &0u64.to_be_bytes()) + let (blob, size, _) = context + .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) .await .unwrap(); // Add garbage data that forms a complete but invalid record @@ -900,8 +901,8 @@ mod tests { // Create blob with zero-filled space { - let (blob, _) = context - .open("test_ordinal", &0u64.to_be_bytes()) + let (blob, _, _) = context + .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) .await .unwrap(); @@ -1951,8 +1952,8 @@ mod tests { // Corrupt record at index 2 { - let (blob, _) = context - .open("test_ordinal", &0u64.to_be_bytes()) + let (blob, _, _) = context + .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) .await .unwrap(); // Corrupt the CRC of record at index 2 diff --git a/storage/src/ordinal/storage.rs b/storage/src/ordinal/storage.rs index 68d62d0539..847046366c 100644 --- a/storage/src/ordinal/storage.rs +++ b/storage/src/ordinal/storage.rs @@ -15,6 +15,9 @@ use std::{ }; use tracing::{debug, warn}; +/// Current version of the ordinal blob format. +const BLOB_VERSION: std::ops::RangeInclusive = 0..=0; + /// Value stored in the index file. #[derive(Debug, Clone)] struct Record> { @@ -120,7 +123,7 @@ impl> Ordinal { // Open all blobs and check for partial records for name in stored_blobs { - let (blob, mut len) = context.open(&config.partition, &name).await?; + let (blob, mut len, _) = context.open(&config.partition, &name, BLOB_VERSION).await?; let index = match name.try_into() { Ok(index) => u64::from_be_bytes(index), Err(nm) => Err(Error::InvalidBlobName(hex(&nm)))?, @@ -255,9 +258,9 @@ impl> Ordinal { let items_per_blob = self.config.items_per_blob.get(); let section = index / items_per_blob; if let Entry::Vacant(entry) = self.blobs.entry(section) { - let (blob, len) = self + let (blob, len, _) = self .context - .open(&self.config.partition, §ion.to_be_bytes()) + .open(&self.config.partition, §ion.to_be_bytes(), BLOB_VERSION) .await?; entry.insert(Write::new(blob, len, self.config.write_buffer)); debug!(section, "created blob"); diff --git a/storage/src/qmdb/any/unordered/fixed/sync.rs b/storage/src/qmdb/any/unordered/fixed/sync.rs index d3462c390c..b81e34267e 100644 --- a/storage/src/qmdb/any/unordered/fixed/sync.rs +++ b/storage/src/qmdb/any/unordered/fixed/sync.rs @@ -217,8 +217,12 @@ pub(crate) async fn init_journal_at_size Date: Tue, 6 Jan 2026 20:10:56 -0500 Subject: [PATCH 02/17] split open into open and open_versioned --- runtime/fuzz/fuzz_targets/buffer.rs | 11 +- runtime/src/deterministic.rs | 15 +- runtime/src/lib.rs | 78 ++++-- runtime/src/storage/audited.rs | 17 +- runtime/src/storage/iouring.rs | 32 +-- runtime/src/storage/memory.rs | 44 ++- runtime/src/storage/metered.rs | 28 +- runtime/src/storage/mod.rs | 60 ++--- runtime/src/storage/tokio/mod.rs | 39 +-- runtime/src/tokio/runtime.rs | 4 +- runtime/src/utils/buffer/append.rs | 15 +- runtime/src/utils/buffer/mod.rs | 267 +++++-------------- runtime/src/utils/buffer/pool.rs | 5 +- runtime/src/utils/buffer/read.rs | 2 +- runtime/src/utils/buffer/write.rs | 4 +- runtime/src/utils/cell.rs | 4 +- storage/src/archive/prunable/mod.rs | 3 +- storage/src/cache/mod.rs | 3 +- storage/src/freezer/mod.rs | 16 +- storage/src/freezer/storage.rs | 5 +- storage/src/journal/contiguous/fixed.rs | 37 +-- storage/src/journal/segmented/variable.rs | 42 ++- storage/src/metadata/mod.rs | 12 +- storage/src/metadata/storage.rs | 11 +- storage/src/mmr/journaled.rs | 5 +- storage/src/ordinal/mod.rs | 17 +- storage/src/ordinal/storage.rs | 7 +- storage/src/qmdb/any/unordered/fixed/sync.rs | 6 +- 28 files changed, 257 insertions(+), 532 deletions(-) diff --git a/runtime/fuzz/fuzz_targets/buffer.rs b/runtime/fuzz/fuzz_targets/buffer.rs index d028a891fc..45375be064 100644 --- a/runtime/fuzz/fuzz_targets/buffer.rs +++ b/runtime/fuzz/fuzz_targets/buffer.rs @@ -12,9 +12,6 @@ const MAX_SIZE: usize = 1024 * 1024; const SHARED_BLOB: &[u8] = b"buffer_blob"; const MAX_OPERATIONS: usize = 50; -/// Default version range for fuzz tests (application version 0). -const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - #[derive(Arbitrary, Debug)] struct FuzzInput { seed: u64, @@ -91,7 +88,7 @@ fn fuzz(input: FuzzInput) { let executor = deterministic::Runner::default(); executor.start(|context| async move { let (blob, initial_size, _) = context - .open("test_partition", SHARED_BLOB, TEST_VERSIONS) + .open("test_partition", SHARED_BLOB) .await .expect("cannot open context"); @@ -117,7 +114,7 @@ fn fuzz(input: FuzzInput) { let buffer_size = (buffer_size as usize).clamp(1, MAX_SIZE); let (blob, size, _) = context - .open("test_partition", b"read_blob", TEST_VERSIONS) + .open("test_partition", b"read_blob") .await .expect("cannot open context"); @@ -138,7 +135,7 @@ fn fuzz(input: FuzzInput) { let capacity = (capacity as usize).clamp(1, MAX_SIZE); let (blob, _, _) = context - .open("test_partition", b"write_blob", TEST_VERSIONS) + .open("test_partition", b"write_blob") .await .expect("cannot open context"); @@ -156,7 +153,7 @@ fn fuzz(input: FuzzInput) { let pool_capacity = NZUsize!((pool_capacity as usize).clamp(1, MAX_SIZE)); let (blob, _, _) = context - .open("test_partition", b"append_blob", TEST_VERSIONS) + .open("test_partition", b"append_blob") .await .expect("cannot open write blob"); diff --git a/runtime/src/deterministic.rs b/runtime/src/deterministic.rs index 965b2c549b..d9ccfd4441 100644 --- a/runtime/src/deterministic.rs +++ b/runtime/src/deterministic.rs @@ -1414,13 +1414,13 @@ impl CryptoRng for Context {} impl crate::Storage for Context { type Blob = ::Blob; - async fn open( + async fn open_versioned( &self, partition: &str, name: &[u8], versions: std::ops::RangeInclusive, ) -> Result<(Self::Blob, u64, u16), Error> { - self.storage.open(partition, name, versions).await + self.storage.open_versioned(partition, name, versions).await } async fn remove(&self, partition: &str, name: Option<&[u8]>) -> Result<(), Error> { @@ -1449,9 +1449,6 @@ mod tests { use futures::{channel::mpsc, SinkExt, StreamExt}; use futures::{channel::oneshot, task::noop_waker}; - /// Default version range for tests - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - fn run_with_seed(seed: u64) -> (String, Vec) { let executor = deterministic::Runner::seeded(seed); run_tasks(5, executor) @@ -1567,7 +1564,7 @@ mod tests { // Run some tasks, sync storage, and recover the runtime let (state, checkpoint) = executor1.start_and_recover(|context| async move { - let (blob, _, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); + let (blob, _, _) = context.open(partition, name).await.unwrap(); blob.write_at(Vec::from(data), 0).await.unwrap(); blob.sync().await.unwrap(); context.auditor().state() @@ -1579,7 +1576,7 @@ mod tests { // Check that synced storage persists after recovery let executor = Runner::from(checkpoint); executor.start(|context| async move { - let (blob, len, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); + let (blob, len, _) = context.open(partition, name).await.unwrap(); assert_eq!(len, data.len() as u64); let read = blob.read_at(vec![0; data.len()], 0).await.unwrap(); assert_eq!(read.as_ref(), data); @@ -1613,7 +1610,7 @@ mod tests { // Run some tasks without syncing storage let (_, checkpoint) = executor.start_and_recover(|context| async move { let context = context.clone(); - let (blob, _, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); + let (blob, _, _) = context.open(partition, name).await.unwrap(); blob.write_at(data, 0).await.unwrap(); }); @@ -1622,7 +1619,7 @@ mod tests { // Check that unsynced storage does not persist after recovery executor.start(|context| async move { - let (_, len, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); + let (_, len, _) = context.open(partition, name).await.unwrap(); assert_eq!(len, 0); }); } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 8c0fa41f50..adaf52fd34 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -545,6 +545,20 @@ pub trait Storage: Clone + Send + Sync + 'static { /// The readable/writeable storage buffer that can be opened by this Storage. type Blob: Blob; + /// [Storage::open_versioned] with [Header::DEFAULT_APPLICATION_VERSION] as the only value + /// in the version range. + fn open( + &self, + partition: &str, + name: &[u8], + ) -> impl Future> + Send { + self.open_versioned( + partition, + name, + Header::DEFAULT_APPLICATION_VERSION..=Header::DEFAULT_APPLICATION_VERSION, + ) + } + /// Open an existing blob in a given partition or create a new one, returning /// the blob and its length. /// @@ -565,7 +579,7 @@ pub trait Storage: Clone + Send + Sync + 'static { /// # Returns /// /// A tuple of (blob, logical_size, application_version). - fn open( + fn open_versioned( &self, partition: &str, name: &[u8], @@ -619,6 +633,9 @@ impl Header { /// The current version of the header format. pub const HEADER_VERSION: u16 = 0; + /// Default application version used in [Storage::open]. + pub const DEFAULT_APPLICATION_VERSION: u16 = 0; + /// Creates a new header with the given application version. pub const fn new(app_version: u16) -> Self { Self { @@ -745,20 +762,23 @@ mod tests { use tracing::{error, Level}; use utils::reschedule; - /// Default version range for tests - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - #[test] fn test_header_fields() { - let header = Header::new(*TEST_VERSIONS.end()); + let header = Header::new(Header::DEFAULT_APPLICATION_VERSION); assert_eq!(header.header_version, Header::HEADER_VERSION); - assert_eq!(header.application_version, *TEST_VERSIONS.end()); + assert_eq!( + header.application_version, + Header::DEFAULT_APPLICATION_VERSION + ); // Verify byte serialization let bytes = header.to_bytes(); assert_eq!(&bytes[..4], &Header::MAGIC); assert_eq!(&bytes[4..6], &Header::HEADER_VERSION.to_be_bytes()); - assert_eq!(&bytes[6..8], &TEST_VERSIONS.end().to_be_bytes()); + assert_eq!( + &bytes[6..8], + &Header::DEFAULT_APPLICATION_VERSION.to_be_bytes() + ); // Verify round-trip let parsed = Header::from_bytes(bytes); @@ -767,14 +787,17 @@ mod tests { #[test] fn test_header_validate_success() { - let header = Header::new(*TEST_VERSIONS.end()); - assert!(header.validate(&TEST_VERSIONS).is_ok()); + let header = Header::new(Header::DEFAULT_APPLICATION_VERSION); + assert!(header + .validate(&(Header::DEFAULT_APPLICATION_VERSION..=Header::DEFAULT_APPLICATION_VERSION)) + .is_ok()); } #[test] fn test_header_validate_magic_wrong_bytes() { let header = Header::from_bytes([0u8; Header::SIZE]); - let result = header.validate(&TEST_VERSIONS); + let result = header + .validate(&(Header::DEFAULT_APPLICATION_VERSION..=Header::DEFAULT_APPLICATION_VERSION)); match result { Err(Error::BlobMagicMismatch { found }) => { assert_eq!(found, [0u8; 4]); @@ -782,10 +805,11 @@ mod tests { _ => panic!("expected BlobMagicMismatch error"), } - let mut bytes = Header::new(*TEST_VERSIONS.end()).to_bytes(); + let mut bytes = Header::new(Header::DEFAULT_APPLICATION_VERSION).to_bytes(); bytes[0] = b'X'; // Corrupt first byte let header = Header::from_bytes(bytes); - let result = header.validate(&TEST_VERSIONS); + let result = header + .validate(&(Header::DEFAULT_APPLICATION_VERSION..=Header::DEFAULT_APPLICATION_VERSION)); match result { Err(Error::BlobMagicMismatch { found }) => { assert_eq!(found[0], b'X'); @@ -1126,15 +1150,11 @@ mod tests { // Open a new blob and verify returned version let (blob, size, app_version) = context - .open(partition, name, TEST_VERSIONS) + .open(partition, name) .await .expect("Failed to open blob"); assert_eq!(size, 0, "new blob should have size 0"); - assert_eq!( - app_version, - *TEST_VERSIONS.end(), - "new blob should have app version from end of range" - ); + assert_eq!(app_version, Header::DEFAULT_APPLICATION_VERSION); // Write data to the blob let data = b"Hello, Storage!"; @@ -1164,13 +1184,13 @@ mod tests { // Reopen the blob and verify version persists let (blob, len, app_version) = context - .open(partition, name, TEST_VERSIONS) + .open(partition, name) .await .expect("Failed to reopen blob"); assert_eq!(len, data.len() as u64); assert_eq!( app_version, - *TEST_VERSIONS.end(), + Header::DEFAULT_APPLICATION_VERSION, "reopened blob should have same app version" ); @@ -1219,7 +1239,7 @@ mod tests { // Open a new blob let (blob, _, _) = context - .open(partition, name, TEST_VERSIONS) + .open(partition, name) .await .expect("Failed to open blob"); @@ -1275,7 +1295,7 @@ mod tests { // Open and write to a new blob let (blob, _, _) = context - .open(partition, name, TEST_VERSIONS) + .open(partition, name) .await .expect("Failed to open blob"); @@ -1286,7 +1306,7 @@ mod tests { blob.sync().await.expect("Failed to sync after write"); // Re-open and check length - let (blob, len, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); + let (blob, len, _) = context.open(partition, name).await.unwrap(); assert_eq!(len, data.len() as u64); // Resize to extend the file @@ -1297,7 +1317,7 @@ mod tests { blob.sync().await.expect("Failed to sync after resize"); // Re-open and check length again - let (blob, len, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); + let (blob, len, _) = context.open(partition, name).await.unwrap(); assert_eq!(len, new_len); // Read original data @@ -1316,7 +1336,7 @@ mod tests { blob.sync().await.unwrap(); // Reopen to check truncation - let (blob, size, _) = context.open(partition, name, TEST_VERSIONS).await.unwrap(); + let (blob, size, _) = context.open(partition, name).await.unwrap(); assert_eq!(size, data.len() as u64); // Read truncated data @@ -1339,7 +1359,7 @@ mod tests { for (additional, partition) in partitions.iter().enumerate() { // Open a new blob let (blob, _, _) = context - .open(partition, name, TEST_VERSIONS) + .open(partition, name) .await .expect("Failed to open blob"); @@ -1358,7 +1378,7 @@ mod tests { for (additional, partition) in partitions.iter().enumerate() { // Open a new blob let (blob, len, _) = context - .open(partition, name, TEST_VERSIONS) + .open(partition, name) .await .expect("Failed to open blob"); assert_eq!(len, (data1.len() + data2.len() + additional) as u64); @@ -1384,7 +1404,7 @@ mod tests { // Open a new blob let (blob, _, _) = context - .open(partition, name, TEST_VERSIONS) + .open(partition, name) .await .expect("Failed to open blob"); @@ -1414,7 +1434,7 @@ mod tests { // Open a new blob let (blob, _, _) = context - .open(partition, name, TEST_VERSIONS) + .open(partition, name) .await .expect("Failed to open blob"); diff --git a/runtime/src/storage/audited.rs b/runtime/src/storage/audited.rs index 3adf531142..4c46c731a0 100644 --- a/runtime/src/storage/audited.rs +++ b/runtime/src/storage/audited.rs @@ -18,7 +18,7 @@ impl Storage { impl crate::Storage for Storage { type Blob = Blob; - async fn open( + async fn open_versioned( &self, partition: &str, name: &[u8], @@ -29,7 +29,7 @@ impl crate::Storage for Storage { hasher.update(name); }); self.inner - .open(partition, name, versions) + .open_versioned(partition, name, versions) .await .map(|(blob, len, app_version)| { ( @@ -123,9 +123,6 @@ mod tests { }; use std::sync::Arc; - /// Default version range for tests - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - #[tokio::test] async fn test_audited_storage() { let inner = MemStorage::default(); @@ -150,14 +147,8 @@ mod tests { let storage2 = AuditedStorage::new(inner2, auditor2.clone()); // Perform a sequence of operations on both storages simultaneously - let (blob1, _, _) = storage1 - .open("partition", b"test_blob", TEST_VERSIONS) - .await - .unwrap(); - let (blob2, _, _) = storage2 - .open("partition", b"test_blob", TEST_VERSIONS) - .await - .unwrap(); + let (blob1, _, _) = storage1.open("partition", b"test_blob").await.unwrap(); + let (blob2, _, _) = storage2.open("partition", b"test_blob").await.unwrap(); // Write data to the blobs blob1.write_at(b"hello world".to_vec(), 0).await.unwrap(); diff --git a/runtime/src/storage/iouring.rs b/runtime/src/storage/iouring.rs index 0bac52b72f..820c530f88 100644 --- a/runtime/src/storage/iouring.rs +++ b/runtime/src/storage/iouring.rs @@ -100,7 +100,7 @@ impl Storage { impl crate::Storage for Storage { type Blob = Blob; - async fn open( + async fn open_versioned( &self, partition: &str, name: &[u8], @@ -444,9 +444,6 @@ mod tests { use rand::{Rng as _, SeedableRng as _}; use std::env; - /// Default version range for tests - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - // Helper for creating test storage fn create_test_storage() -> (Storage, PathBuf) { let mut rng = rand::rngs::StdRng::from_entropy(); @@ -475,16 +472,9 @@ mod tests { let (storage, storage_directory) = create_test_storage(); // Test 1: New blob returns logical size 0 and correct application version - let (blob, size, app_version) = storage - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, app_version) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size, 0, "new blob should have logical size 0"); - assert_eq!( - app_version, - *TEST_VERSIONS.end(), - "new blob should have app version 0" - ); + assert_eq!(app_version, Header::DEFAULT_APPLICATION_VERSION); // Verify raw file has 8 bytes (header only) let file_path = storage_directory.join("partition").join(hex(b"test")); @@ -544,12 +534,9 @@ mod tests { blob.sync().await.unwrap(); drop(blob); - let (blob2, size2, app_version2) = storage - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob2, size2, app_version2) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size2, 9, "reopened blob should have logical size 9"); - assert_eq!(app_version2, *TEST_VERSIONS.end()); + assert_eq!(app_version2, Header::DEFAULT_APPLICATION_VERSION); let read_buf = blob2.read_at(vec![0u8; 9], 0).await.unwrap(); assert_eq!(read_buf.as_ref(), b"test data"); drop(blob2); @@ -560,12 +547,9 @@ mod tests { std::fs::write(&corrupted_path, vec![0u8; 4]).unwrap(); // Opening should truncate and write fresh header - let (blob3, size3, app_version3) = storage - .open("partition", b"corrupted", TEST_VERSIONS) - .await - .unwrap(); + let (blob3, size3, app_version3) = storage.open("partition", b"corrupted").await.unwrap(); assert_eq!(size3, 0, "corrupted blob should return logical size 0"); - assert_eq!(app_version3, *TEST_VERSIONS.end()); + assert_eq!(app_version3, Header::DEFAULT_APPLICATION_VERSION); // Verify raw file now has proper 8-byte header let metadata = std::fs::metadata(&corrupted_path).unwrap(); @@ -593,7 +577,7 @@ mod tests { std::fs::write(&bad_magic_path, vec![0u8; Header::SIZE]).unwrap(); // Opening should fail with magic mismatch error - let result = storage.open("partition", b"bad_magic", TEST_VERSIONS).await; + let result = storage.open("partition", b"bad_magic").await; match result { Err(crate::Error::BlobMagicMismatch { found }) => { assert_eq!(found, [0u8; Header::MAGIC_LENGTH]); diff --git a/runtime/src/storage/memory.rs b/runtime/src/storage/memory.rs index 8d6332dc8d..d192a2e92f 100644 --- a/runtime/src/storage/memory.rs +++ b/runtime/src/storage/memory.rs @@ -23,7 +23,7 @@ impl Default for Storage { impl crate::Storage for Storage { type Blob = Blob; - async fn open( + async fn open_versioned( &self, partition: &str, name: &[u8], @@ -206,9 +206,6 @@ mod tests { use super::*; use crate::{storage::tests::run_storage_tests, Blob, Header, Storage as _}; - /// Default version range for tests (application version 0). - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - #[tokio::test] async fn test_memory_storage() { let storage = Storage::default(); @@ -220,16 +217,9 @@ mod tests { let storage = Storage::default(); // Test 1: New blob returns logical size 0 and correct app version - let (blob, size, app_version) = storage - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, app_version) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size, 0, "new blob should have logical size 0"); - assert_eq!( - app_version, - *TEST_VERSIONS.end(), - "new blob should have default app version" - ); + assert_eq!(app_version, Header::DEFAULT_APPLICATION_VERSION); // Verify raw storage has 8 bytes (header only) { @@ -302,12 +292,9 @@ mod tests { blob.sync().await.unwrap(); drop(blob); - let (blob2, size2, app_version2) = storage - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob2, size2, app_version2) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size2, 9, "reopened blob should have logical size 9"); - assert_eq!(app_version2, *TEST_VERSIONS.end()); + assert_eq!(app_version2, Header::DEFAULT_APPLICATION_VERSION); let read_buf = blob2.read_at(vec![0u8; 9], 0).await.unwrap(); assert_eq!(read_buf.as_ref(), b"test data"); @@ -320,12 +307,9 @@ mod tests { } // Opening should truncate and write fresh header - let (_blob3, size3, app_version3) = storage - .open("partition", b"corrupted", TEST_VERSIONS) - .await - .unwrap(); + let (_blob3, size3, app_version3) = storage.open("partition", b"corrupted").await.unwrap(); assert_eq!(size3, 0, "corrupted blob should return logical size 0"); - assert_eq!(app_version3, *TEST_VERSIONS.end()); + assert_eq!(app_version3, Header::DEFAULT_APPLICATION_VERSION); // Verify raw storage now has proper 8-byte header { @@ -353,7 +337,7 @@ mod tests { } // Opening should fail with magic mismatch error - let result = storage.open("partition", b"bad_magic", TEST_VERSIONS).await; + let result = storage.open("partition", b"bad_magic").await; match result { Err(crate::Error::BlobMagicMismatch { found }) => { assert_eq!(found, [0u8; Header::MAGIC_LENGTH]); @@ -368,15 +352,21 @@ mod tests { let storage = Storage::default(); // Create blob with version 1 - let (_, _, app_version) = storage.open("partition", b"v1", 1..=1).await.unwrap(); + let (_, _, app_version) = storage + .open_versioned("partition", b"v1", 1..=1) + .await + .unwrap(); assert_eq!(app_version, 1, "new blob should have version 1"); // Reopen with a range that includes version 1 - let (_, _, app_version) = storage.open("partition", b"v1", 0..=2).await.unwrap(); + let (_, _, app_version) = storage + .open_versioned("partition", b"v1", 0..=2) + .await + .unwrap(); assert_eq!(app_version, 1, "existing blob should retain version 1"); // Try to open with version range 2..=2 (should fail) - let result = storage.open("partition", b"v1", 2..=2).await; + let result = storage.open_versioned("partition", b"v1", 2..=2).await; match result { Err(crate::Error::BlobApplicationVersionMismatch { expected, found }) => { assert_eq!(expected, 2..=2); diff --git a/runtime/src/storage/metered.rs b/runtime/src/storage/metered.rs index 540b860528..3d9015b0d5 100644 --- a/runtime/src/storage/metered.rs +++ b/runtime/src/storage/metered.rs @@ -77,14 +77,15 @@ impl Storage { impl crate::Storage for Storage { type Blob = Blob; - async fn open( + async fn open_versioned( &self, partition: &str, name: &[u8], versions: RangeInclusive, ) -> Result<(Self::Blob, u64, u16), Error> { self.metrics.open_blobs.inc(); - let (inner, len, app_version) = self.inner.open(partition, name, versions).await?; + let (inner, len, app_version) = + self.inner.open_versioned(partition, name, versions).await?; Ok(( Blob { inner, @@ -170,9 +171,6 @@ mod tests { }; use prometheus_client::registry::Registry; - /// Default version range for tests - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - #[tokio::test] async fn test_metered_storage() { let mut registry = Registry::default(); @@ -190,10 +188,7 @@ mod tests { let storage = Storage::new(inner, &mut registry); // Open a blob - let (blob, _, _) = storage - .open("partition", b"test_blob", TEST_VERSIONS) - .await - .unwrap(); + let (blob, _, _) = storage.open("partition", b"test_blob").await.unwrap(); // Verify that the open_blobs metric is incremented let open_blobs = storage.metrics.open_blobs.get(); @@ -249,14 +244,8 @@ mod tests { let storage = Storage::new(inner, &mut registry); // Open multiple blobs - let (blob1, _, _) = storage - .open("partition", b"blob1", TEST_VERSIONS) - .await - .unwrap(); - let (blob2, _, _) = storage - .open("partition", b"blob2", TEST_VERSIONS) - .await - .unwrap(); + let (blob1, _, _) = storage.open("partition", b"blob1").await.unwrap(); + let (blob2, _, _) = storage.open("partition", b"blob2").await.unwrap(); // Verify that the open_blobs metric is incremented correctly let open_blobs = storage.metrics.open_blobs.get(); @@ -296,10 +285,7 @@ mod tests { let storage = Storage::new(inner, &mut registry); // Open a blob - let (blob, _, _) = storage - .open("partition", b"test_blob", TEST_VERSIONS) - .await - .unwrap(); + let (blob, _, _) = storage.open("partition", b"test_blob").await.unwrap(); // Verify that the open_blobs metric is incremented assert_eq!( diff --git a/runtime/src/storage/mod.rs b/runtime/src/storage/mod.rs index 83f70c7cdd..36db827af8 100644 --- a/runtime/src/storage/mod.rs +++ b/runtime/src/storage/mod.rs @@ -26,9 +26,6 @@ pub fn validate_partition_name(partition: &str) -> Result<(), crate::Error> { pub(crate) mod tests { use crate::{Blob, Storage}; - /// Default version range for tests - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - /// Runs the full suite of tests on the provided storage implementation. pub(crate) async fn run_storage_tests(storage: S) where @@ -58,10 +55,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, len, _) = storage - .open("partition", b"test_blob", TEST_VERSIONS) - .await - .unwrap(); + let (blob, len, _) = storage.open("partition", b"test_blob").await.unwrap(); assert_eq!(len, 0); blob.write_at(Vec::from("hello world"), 0).await.unwrap(); @@ -80,10 +74,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - storage - .open("partition", b"test_blob", TEST_VERSIONS) - .await - .unwrap(); + storage.open("partition", b"test_blob").await.unwrap(); storage .remove("partition", Some(b"test_blob")) .await @@ -99,14 +90,8 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - storage - .open("partition", b"blob1", TEST_VERSIONS) - .await - .unwrap(); - storage - .open("partition", b"blob2", TEST_VERSIONS) - .await - .unwrap(); + storage.open("partition", b"blob1").await.unwrap(); + storage.open("partition", b"blob2").await.unwrap(); let blobs = storage.scan("partition").await.unwrap(); assert_eq!( @@ -130,10 +115,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _, _) = storage - .open("partition", b"test_blob", TEST_VERSIONS) - .await - .unwrap(); + let (blob, _, _) = storage.open("partition", b"test_blob").await.unwrap(); // Initialize blob with data of sufficient length first blob.write_at(b"concurrent write".to_vec(), 0) @@ -171,10 +153,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _, _) = storage - .open("partition", b"large_blob", TEST_VERSIONS) - .await - .unwrap(); + let (blob, _, _) = storage.open("partition", b"large_blob").await.unwrap(); let large_data = vec![42u8; 10 * 1024 * 1024]; // 10 MB blob.write_at(large_data.clone(), 0).await.unwrap(); @@ -191,7 +170,7 @@ pub(crate) mod tests { S::Blob: Send + Sync, { let (blob, _, _) = storage - .open("test_overwrite_data", b"test_blob", TEST_VERSIONS) + .open("test_overwrite_data", b"test_blob") .await .unwrap(); @@ -218,7 +197,7 @@ pub(crate) mod tests { S::Blob: Send + Sync, { let (blob, _, _) = storage - .open("test_read_beyond_written_data", b"test_blob", TEST_VERSIONS) + .open("test_read_beyond_written_data", b"test_blob") .await .unwrap(); @@ -241,7 +220,7 @@ pub(crate) mod tests { S::Blob: Send + Sync, { let (blob, _, _) = storage - .open("test_write_at_large_offset", b"test_blob", TEST_VERSIONS) + .open("test_write_at_large_offset", b"test_blob") .await .unwrap(); @@ -266,7 +245,7 @@ pub(crate) mod tests { S::Blob: Send + Sync, { let (blob, _, _) = storage - .open("test_append_data", b"test_blob", TEST_VERSIONS) + .open("test_append_data", b"test_blob") .await .unwrap(); @@ -287,10 +266,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _, _) = storage - .open("partition", b"test_blob", TEST_VERSIONS) - .await - .unwrap(); + let (blob, _, _) = storage.open("partition", b"test_blob").await.unwrap(); // Write data at different offsets blob.write_at(b"first".to_vec(), 0).await.unwrap(); @@ -311,7 +287,7 @@ pub(crate) mod tests { S::Blob: Send + Sync, { let (blob, _, _) = storage - .open("test_large_data_in_chunks", b"large_blob", TEST_VERSIONS) + .open("test_large_data_in_chunks", b"large_blob") .await .unwrap(); @@ -341,7 +317,7 @@ pub(crate) mod tests { S::Blob: Send + Sync, { let (blob, _, _) = storage - .open("test_read_empty_blob", b"empty_blob", TEST_VERSIONS) + .open("test_read_empty_blob", b"empty_blob") .await .unwrap(); @@ -359,7 +335,7 @@ pub(crate) mod tests { S::Blob: Send + Sync, { let (blob, _, _) = storage - .open("test_overlapping_writes", b"test_blob", TEST_VERSIONS) + .open("test_overlapping_writes", b"test_blob") .await .unwrap(); @@ -383,7 +359,7 @@ pub(crate) mod tests { { { let (blob, _, _) = storage - .open("test_resize_then_open", b"test_blob", TEST_VERSIONS) + .open("test_resize_then_open", b"test_blob") .await .unwrap(); @@ -399,7 +375,7 @@ pub(crate) mod tests { // Reopen the blob let (blob, len, _) = storage - .open("test_resize_then_open", b"test_blob", TEST_VERSIONS) + .open("test_resize_then_open", b"test_blob") .await .unwrap(); assert_eq!(len, 5, "Blob length after resize is incorrect"); @@ -425,7 +401,7 @@ pub(crate) mod tests { ] { assert!( !matches!( - storage.open(valid, b"blob", TEST_VERSIONS).await, + storage.open(valid, b"blob").await, Err(crate::Error::PartitionNameInvalid(_)) ), "Valid partition name '{valid}' should be accepted by open" @@ -456,7 +432,7 @@ pub(crate) mod tests { ] { assert!( matches!( - storage.open(invalid, b"blob", TEST_VERSIONS).await, + storage.open(invalid, b"blob").await, Err(crate::Error::PartitionNameInvalid(_)) ), "Invalid partition name '{invalid}' should be rejected by open" diff --git a/runtime/src/storage/tokio/mod.rs b/runtime/src/storage/tokio/mod.rs index 521a92a6a4..6a1a5d4fdd 100644 --- a/runtime/src/storage/tokio/mod.rs +++ b/runtime/src/storage/tokio/mod.rs @@ -71,7 +71,7 @@ impl crate::Storage for Storage { #[cfg(not(unix))] type Blob = fallback::Blob; - async fn open( + async fn open_versioned( &self, partition: &str, name: &[u8], @@ -253,9 +253,6 @@ mod tests { use rand::{Rng as _, SeedableRng}; use std::env; - /// Default version range for tests - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - #[tokio::test] async fn test_storage() { let mut rng = rand::rngs::StdRng::from_entropy(); @@ -274,16 +271,9 @@ mod tests { let storage = Storage::new(config); // Test 1: New blob returns logical size 0 and correct app version - let (blob, size, app_version) = storage - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, app_version) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size, 0, "new blob should have logical size 0"); - assert_eq!( - app_version, - *TEST_VERSIONS.end(), - "new blob should have app version 0" - ); + assert_eq!(app_version, Header::DEFAULT_APPLICATION_VERSION); // Verify raw file has 8 bytes (header only) let file_path = storage_directory.join("partition").join(hex(b"test")); @@ -343,12 +333,9 @@ mod tests { blob.sync().await.unwrap(); drop(blob); - let (blob2, size2, app_version2) = storage - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob2, size2, app_version2) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size2, 9, "reopened blob should have logical size 9"); - assert_eq!(app_version2, *TEST_VERSIONS.end()); + assert_eq!(app_version2, Header::DEFAULT_APPLICATION_VERSION); let read_buf = blob2.read_at(vec![0u8; 9], 0).await.unwrap(); assert_eq!(read_buf.as_ref(), b"test data"); drop(blob2); @@ -359,12 +346,9 @@ mod tests { std::fs::write(&corrupted_path, vec![0u8; 4]).unwrap(); // Opening should truncate and write fresh header - let (blob3, size3, app_version3) = storage - .open("partition", b"corrupted", TEST_VERSIONS) - .await - .unwrap(); + let (blob3, size3, app_version3) = storage.open("partition", b"corrupted").await.unwrap(); assert_eq!(size3, 0, "corrupted blob should return logical size 0"); - assert_eq!(app_version3, *TEST_VERSIONS.end()); + assert_eq!(app_version3, Header::DEFAULT_APPLICATION_VERSION); // Verify raw file now has proper 8-byte header let metadata = std::fs::metadata(&corrupted_path).unwrap(); @@ -397,7 +381,7 @@ mod tests { std::fs::write(&bad_magic_path, vec![0u8; Header::SIZE]).unwrap(); // Opening should fail with magic mismatch error - let result = storage.open("partition", b"bad_magic", TEST_VERSIONS).await; + let result = storage.open("partition", b"bad_magic").await; match result { Err(crate::Error::BlobMagicMismatch { found }) => { assert_eq!(found, [0u8; Header::MAGIC_LENGTH]); @@ -418,10 +402,13 @@ mod tests { let storage = Storage::new(config); // Create blob with version 1 - storage.open("partition", b"v1", 1..=1).await.unwrap(); + storage + .open_versioned("partition", b"v1", 1..=1) + .await + .unwrap(); // Try to open with version range 2..=2 - let result = storage.open("partition", b"v1", 2..=2).await; + let result = storage.open_versioned("partition", b"v1", 2..=2).await; match result { Err(crate::Error::BlobApplicationVersionMismatch { expected, found }) => { assert_eq!(expected, 2..=2); diff --git a/runtime/src/tokio/runtime.rs b/runtime/src/tokio/runtime.rs index 778e60589c..89ef9e96f7 100644 --- a/runtime/src/tokio/runtime.rs +++ b/runtime/src/tokio/runtime.rs @@ -684,13 +684,13 @@ impl CryptoRng for Context {} impl crate::Storage for Context { type Blob = ::Blob; - async fn open( + async fn open_versioned( &self, partition: &str, name: &[u8], versions: std::ops::RangeInclusive, ) -> Result<(Self::Blob, u64, u16), Error> { - self.storage.open(partition, name, versions).await + self.storage.open_versioned(partition, name, versions).await } async fn remove(&self, partition: &str, name: Option<&[u8]>) -> Result<(), Error> { diff --git a/runtime/src/utils/buffer/append.rs b/runtime/src/utils/buffer/append.rs index c048d1f44d..726b4f217e 100644 --- a/runtime/src/utils/buffer/append.rs +++ b/runtime/src/utils/buffer/append.rs @@ -329,9 +329,6 @@ mod tests { const PAGE_SIZE: usize = 1024; const BUFFER_SIZE: usize = PAGE_SIZE * 2; - /// Default version range for tests - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - #[test_traced] #[should_panic(expected = "not implemented")] fn test_append_blob_write_panics() { @@ -340,7 +337,7 @@ mod tests { // Start the test within the executor executor.start(|context| async move { let (blob, size, _) = context - .open("test", "blob".as_bytes(), TEST_VERSIONS) + .open("test", "blob".as_bytes()) .await .expect("Failed to open blob"); let pool_ref = PoolRef::new(NZUsize!(PAGE_SIZE), NZUsize!(10)); @@ -359,7 +356,7 @@ mod tests { // Start the test within the executor executor.start(|context| async move { let (blob, size, _) = context - .open("test", "blob".as_bytes(), TEST_VERSIONS) + .open("test", "blob".as_bytes()) .await .expect("Failed to open blob"); assert_eq!(size, 0); @@ -379,7 +376,7 @@ mod tests { // Make sure blob has expected size when reopened. let (blob, size, _) = context - .open("test", "blob".as_bytes(), TEST_VERSIONS) + .open("test", "blob".as_bytes()) .await .expect("Failed to open blob"); assert_eq!(size, 11 * PAGE_SIZE as u64); @@ -394,7 +391,7 @@ mod tests { // Start the test within the executor executor.start(|context| async move { let (blob, size, _) = context - .open("test", "blob".as_bytes(), TEST_VERSIONS) + .open("test", "blob".as_bytes()) .await .expect("Failed to open blob"); assert_eq!(size, 0); @@ -489,7 +486,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { let (blob, size, _) = context - .open("test", "blob".as_bytes(), TEST_VERSIONS) + .open("test", "blob".as_bytes()) .await .expect("Failed to open blob"); @@ -528,7 +525,7 @@ mod tests { // Close and reopen. let (blob, size, _) = context - .open("test", "blob".as_bytes(), TEST_VERSIONS) + .open("test", "blob".as_bytes()) .await .expect("Failed to reopen blob"); diff --git a/runtime/src/utils/buffer/mod.rs b/runtime/src/utils/buffer/mod.rs index 4700190f0a..d5d5458cb3 100644 --- a/runtime/src/utils/buffer/mod.rs +++ b/runtime/src/utils/buffer/mod.rs @@ -18,19 +18,13 @@ mod tests { use commonware_macros::test_traced; use commonware_utils::NZUsize; - /// Default version range for tests - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - #[test_traced] fn test_read_basic() { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test basic buffered reading functionality with sequential reads let data = b"Hello, world! This is a test."; - let (blob, size, _) = context - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -69,10 +63,7 @@ mod tests { executor.start(|context| async move { // Test reading data that spans multiple buffer refills let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - let (blob, size, _) = context - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -105,10 +96,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - let (blob, size, _) = context - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -142,10 +130,7 @@ mod tests { executor.start(|context| async move { // Test reader behavior with known blob size limits let data = b"This is a test with known size limitations."; - let (blob, size, _) = context - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -189,10 +174,7 @@ mod tests { // Test reading large amounts of data in chunks let data_size = 1024 * 256; // 256KB of data let data = vec![0x42; data_size]; - let (blob, size, _) = context - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.clone(), 0).await.unwrap(); let size = data.len() as u64; @@ -240,10 +222,7 @@ mod tests { let data_size = buffer_size * 5 / 2; // 2.5 buffers let data = vec![0x37; data_size]; - let (blob, size, _) = context - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.clone(), 0).await.unwrap(); let size = data.len() as u64; @@ -278,10 +257,7 @@ mod tests { executor.start(|context| async move { // Create a memory blob with some test data let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - let (blob, size, _) = context - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -333,10 +309,7 @@ mod tests { executor.start(|context| async move { // Create a memory blob with longer data let data = vec![0x41; 1000]; // 1000 'A' characters - let (blob, size, _) = context - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.clone(), 0).await.unwrap(); let size = data.len() as u64; @@ -373,10 +346,7 @@ mod tests { executor.start(|context| async move { // Create a memory blob with some test data let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - let (blob, size, _) = context - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let data_len = data.len() as u64; @@ -389,10 +359,7 @@ mod tests { reader.resize(resize_len).await.unwrap(); // Reopen to check truncation - let (blob, size, _) = context - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, resize_len, "Blob should be resized to half size"); // Create a new buffer and read to verify truncation @@ -415,10 +382,7 @@ mod tests { new_reader.resize(data_len * 2).await.unwrap(); // Reopen to check resize - let (blob, new_size, _) = context - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, new_size, _) = context.open("partition", b"test").await.unwrap(); assert_eq!(new_size, data_len * 2); // Create a new buffer and read to verify resize @@ -443,10 +407,7 @@ mod tests { // Create a memory blob with some test data let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; let data_len = data.len() as u64; - let (blob, size, _) = context - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); @@ -457,10 +418,7 @@ mod tests { reader.resize(0).await.unwrap(); // Reopen to check truncation - let (blob, size, _) = context - .open("partition", b"test", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0, "Blob should be resized to zero"); // Create a new buffer and try to read (should fail) @@ -478,10 +436,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test basic buffered write and sync functionality - let (blob, size, _) = context - .open("partition", b"write_basic", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"write_basic").await.unwrap(); assert_eq!(size, 0); let writer = Write::new(blob.clone(), size, NZUsize!(8)); @@ -491,10 +446,7 @@ mod tests { assert_eq!(writer.size().await, 5); // Verify data was written correctly - let (blob, size, _) = context - .open("partition", b"write_basic", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"write_basic").await.unwrap(); assert_eq!(size, 5); let mut reader = Read::new(blob, size, NZUsize!(8)); let mut buf = [0u8; 5]; @@ -508,10 +460,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writes that cause buffer flushes due to capacity limits - let (blob, size, _) = context - .open("partition", b"write_multi", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"write_multi").await.unwrap(); assert_eq!(size, 0); let writer = Write::new(blob.clone(), size, NZUsize!(4)); @@ -522,10 +471,7 @@ mod tests { writer.sync().await.unwrap(); // Verify the final result - let (blob, size, _) = context - .open("partition", b"write_multi", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"write_multi").await.unwrap(); assert_eq!(size, 7); let mut reader = Read::new(blob, size, NZUsize!(4)); let mut buf = [0u8; 7]; @@ -539,10 +485,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writing data larger than buffer capacity (direct write) - let (blob, size, _) = context - .open("partition", b"write_large", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"write_large").await.unwrap(); assert_eq!(size, 0); let writer = Write::new(blob.clone(), size, NZUsize!(4)); @@ -557,10 +500,7 @@ mod tests { assert_eq!(writer.size().await, 26); // Verify the complete data - let (blob, size, _) = context - .open("partition", b"write_large", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"write_large").await.unwrap(); assert_eq!(size, 26); let mut reader = Read::new(blob, size, NZUsize!(4)); let mut buf = [0u8; 26]; @@ -574,10 +514,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test sequential appends that exceed buffer capacity - let (blob, size, _) = context - .open("partition", b"append_buf", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"append_buf").await.unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); // Write data that fits in buffer @@ -590,10 +527,7 @@ mod tests { assert_eq!(writer.size().await, 11); // Verify the complete result - let (blob, size, _) = context - .open("partition", b"append_buf", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"append_buf").await.unwrap(); assert_eq!(size, 11); let mut reader = Read::new(blob, size, NZUsize!(10)); let mut buf = vec![0u8; 11]; @@ -607,10 +541,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test overwriting data within the buffer and extending it - let (blob, size, _) = context - .open("partition", b"middle_buf", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"middle_buf").await.unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(20)); // Initial write @@ -623,10 +554,7 @@ mod tests { writer.sync().await.unwrap(); // Verify overwrite result - let (blob, size, _) = context - .open("partition", b"middle_buf", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"middle_buf").await.unwrap(); assert_eq!(size, 10); let mut reader = Read::new(blob, size, NZUsize!(10)); let mut buf = vec![0u8; 10]; @@ -641,10 +569,7 @@ mod tests { writer.sync().await.unwrap(); // Verify final result - let (blob, size, _) = context - .open("partition", b"middle_buf", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"middle_buf").await.unwrap(); assert_eq!(size, 20); let mut reader = Read::new(blob, size, NZUsize!(20)); let mut buf = vec![0u8; 20]; @@ -658,10 +583,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writing at offsets before the current buffer position - let (blob, size, _) = context - .open("partition", b"before_buf", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"before_buf").await.unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); // Write data at a later offset first @@ -674,10 +596,7 @@ mod tests { writer.sync().await.unwrap(); // Verify data placement with gap - let (blob, size, _) = context - .open("partition", b"before_buf", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"before_buf").await.unwrap(); assert_eq!(size, 20); let mut reader = Read::new(blob, size, NZUsize!(20)); let mut buf = vec![0u8; 20]; @@ -694,10 +613,7 @@ mod tests { assert_eq!(writer.size().await, 20); // Verify gap is filled - let (blob, size, _) = context - .open("partition", b"before_buf", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"before_buf").await.unwrap(); assert_eq!(size, 20); let mut reader = Read::new(blob, size, NZUsize!(20)); let mut buf = vec![0u8; 20]; @@ -712,10 +628,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test blob resize functionality and subsequent writes - let (blob, size, _) = context - .open("partition", b"resize_write", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"resize_write").await.unwrap(); let writer = Write::new(blob, size, NZUsize!(10)); // Write initial data @@ -724,10 +637,8 @@ mod tests { writer.sync().await.unwrap(); assert_eq!(writer.size().await, 11); - let (blob_check, size_check, _) = context - .open("partition", b"resize_write", TEST_VERSIONS) - .await - .unwrap(); + let (blob_check, size_check, _) = + context.open("partition", b"resize_write").await.unwrap(); assert_eq!(size_check, 11); drop(blob_check); @@ -737,10 +648,7 @@ mod tests { writer.sync().await.unwrap(); // Verify resize - let (blob, size, _) = context - .open("partition", b"resize_write", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"resize_write").await.unwrap(); assert_eq!(size, 5); let mut reader = Read::new(blob, size, NZUsize!(5)); let mut buf = vec![0u8; 5]; @@ -753,10 +661,7 @@ mod tests { writer.sync().await.unwrap(); // Verify overwrite - let (blob, size, _) = context - .open("partition", b"resize_write", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"resize_write").await.unwrap(); assert_eq!(size, 5); let mut reader = Read::new(blob, size, NZUsize!(5)); let mut buf = vec![0u8; 5]; @@ -769,10 +674,7 @@ mod tests { writer.sync().await.unwrap(); // Verify resize - let (blob, size, _) = context - .open("partition", b"resize_write", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"resize_write").await.unwrap(); assert_eq!(size, 10); let mut reader = Read::new(blob, size, NZUsize!(10)); let mut buf = vec![0u8; 10]; @@ -781,10 +683,7 @@ mod tests { assert_eq!(&buf[5..10], [0u8; 5]); // Test resize to zero - let (blob_zero, size, _) = context - .open("partition", b"resize_zero", TEST_VERSIONS) - .await - .unwrap(); + let (blob_zero, size, _) = context.open("partition", b"resize_zero").await.unwrap(); let writer_zero = Write::new(blob_zero.clone(), size, NZUsize!(10)); writer_zero .write_at(b"some data".to_vec(), 0) @@ -799,10 +698,7 @@ mod tests { assert_eq!(writer_zero.size().await, 0); // Ensure the blob is empty - let (_, size_z, _) = context - .open("partition", b"resize_zero", TEST_VERSIONS) - .await - .unwrap(); + let (_, size_z, _) = context.open("partition", b"resize_zero").await.unwrap(); assert_eq!(size_z, 0); }); } @@ -812,10 +708,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test reading through writer's read_at method (buffer + blob reads) - let (blob, size, _) = context - .open("partition", b"read_at_writer", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"read_at_writer").await.unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); // Write data that stays in buffer @@ -866,10 +759,8 @@ mod tests { // Verify complete content by reopening writer.sync().await.unwrap(); assert_eq!(writer.size().await, 30); - let (final_blob, final_size, _) = context - .open("partition", b"read_at_writer", TEST_VERSIONS) - .await - .unwrap(); + let (final_blob, final_size, _) = + context.open("partition", b"read_at_writer").await.unwrap(); assert_eq!(final_size, 30); let mut final_reader = Read::new(final_blob, final_size, NZUsize!(30)); let mut full_content = vec![0u8; 30]; @@ -886,10 +777,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writes that cannot be merged into buffer (non-contiguous/too large) - let (blob, size, _) = context - .open("partition", b"write_straddle", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"write_straddle").await.unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); // Fill buffer completely @@ -903,10 +791,8 @@ mod tests { assert_eq!(writer.size().await, 18); // Verify data with gap - let (blob_check, size_check, _) = context - .open("partition", b"write_straddle", TEST_VERSIONS) - .await - .unwrap(); + let (blob_check, size_check, _) = + context.open("partition", b"write_straddle").await.unwrap(); assert_eq!(size_check, 18); let mut reader = Read::new(blob_check, size_check, NZUsize!(20)); let mut buf = vec![0u8; 18]; @@ -918,10 +804,7 @@ mod tests { assert_eq!(buf, expected); // Test write that exceeds buffer capacity - let (blob2, size, _) = context - .open("partition", b"write_straddle2", TEST_VERSIONS) - .await - .unwrap(); + let (blob2, size, _) = context.open("partition", b"write_straddle2").await.unwrap(); let writer2 = Write::new(blob2.clone(), size, NZUsize!(10)); writer2.write_at(b"0123456789".to_vec(), 0).await.unwrap(); assert_eq!(writer2.size().await, 10); @@ -933,10 +816,8 @@ mod tests { assert_eq!(writer2.size().await, 17); // Verify overwrite result - let (blob_check2, size_check2, _) = context - .open("partition", b"write_straddle2", TEST_VERSIONS) - .await - .unwrap(); + let (blob_check2, size_check2, _) = + context.open("partition", b"write_straddle2").await.unwrap(); assert_eq!(size_check2, 17); let mut reader2 = Read::new(blob_check2, size_check2, NZUsize!(20)); let mut buf2 = vec![0u8; 17]; @@ -950,10 +831,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test that closing writer flushes and persists buffered data - let (blob_orig, size, _) = context - .open("partition", b"write_close", TEST_VERSIONS) - .await - .unwrap(); + let (blob_orig, size, _) = context.open("partition", b"write_close").await.unwrap(); let writer = Write::new(blob_orig.clone(), size, NZUsize!(8)); writer.write_at(b"pending".to_vec(), 0).await.unwrap(); assert_eq!(writer.size().await, 7); @@ -962,10 +840,8 @@ mod tests { writer.sync().await.unwrap(); // Verify data persistence - let (blob_check, size_check, _) = context - .open("partition", b"write_close", TEST_VERSIONS) - .await - .unwrap(); + let (blob_check, size_check, _) = + context.open("partition", b"write_close").await.unwrap(); assert_eq!(size_check, 7); let mut reader = Read::new(blob_check, size_check, NZUsize!(8)); let mut buf = [0u8; 7]; @@ -980,7 +856,7 @@ mod tests { executor.start(|context| async move { // Test direct writes when data exceeds buffer capacity let (blob, size, _) = context - .open("partition", b"write_direct_size", TEST_VERSIONS) + .open("partition", b"write_direct_size") .await .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(5)); @@ -995,7 +871,7 @@ mod tests { // Verify direct write worked let (blob_check, size_check, _) = context - .open("partition", b"write_direct_size", TEST_VERSIONS) + .open("partition", b"write_direct_size") .await .unwrap(); assert_eq!(size_check, 10); @@ -1017,7 +893,7 @@ mod tests { // Verify final state let (blob_check2, size_check2, _) = context - .open("partition", b"write_direct_size", TEST_VERSIONS) + .open("partition", b"write_direct_size") .await .unwrap(); assert_eq!(size_check2, 13); @@ -1034,7 +910,7 @@ mod tests { executor.start(|context| async move { // Test complex buffer operations: overwrite and extend within capacity let (blob, size, _) = context - .open("partition", b"overwrite_extend_buf", TEST_VERSIONS) + .open("partition", b"overwrite_extend_buf") .await .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(15)); @@ -1056,7 +932,7 @@ mod tests { // Verify persisted result let (blob_check, size_check, _) = context - .open("partition", b"overwrite_extend_buf", TEST_VERSIONS) + .open("partition", b"overwrite_extend_buf") .await .unwrap(); assert_eq!(size_check, 15); @@ -1072,10 +948,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writing at the current logical end of the blob - let (blob, size, _) = context - .open("partition", b"write_end", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open("partition", b"write_end").await.unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(20)); // Write initial data @@ -1092,10 +965,8 @@ mod tests { writer.sync().await.unwrap(); // Verify complete result - let (blob_check, size_check, _) = context - .open("partition", b"write_end", TEST_VERSIONS) - .await - .unwrap(); + let (blob_check, size_check, _) = + context.open("partition", b"write_end").await.unwrap(); assert_eq!(size_check, 13); let mut reader = Read::new(blob_check, size_check, NZUsize!(13)); let mut buf = vec![0u8; 13]; @@ -1110,11 +981,7 @@ mod tests { executor.start(|context| async move { // Test multiple appends using writer.size() let (blob, size, _) = context - .open( - "partition", - b"write_multiple_appends_at_size", - TEST_VERSIONS, - ) + .open("partition", b"write_multiple_appends_at_size") .await .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(5)); // Small buffer @@ -1145,11 +1012,7 @@ mod tests { // Verify final content let (blob_check, size_check, _) = context - .open( - "partition", - b"write_multiple_appends_at_size", - TEST_VERSIONS, - ) + .open("partition", b"write_multiple_appends_at_size") .await .unwrap(); assert_eq!(size_check, 9); @@ -1166,11 +1029,7 @@ mod tests { executor.start(|context| async move { // Test writing non-contiguously, then appending at the new size let (blob, size, _) = context - .open( - "partition", - b"write_non_contiguous_then_append", - TEST_VERSIONS, - ) + .open("partition", b"write_non_contiguous_then_append") .await .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); @@ -1197,11 +1056,7 @@ mod tests { // Verify final content let (blob_check, size_check, _) = context - .open( - "partition", - b"write_non_contiguous_then_append", - TEST_VERSIONS, - ) + .open("partition", b"write_non_contiguous_then_append") .await .unwrap(); assert_eq!(size_check, 35); @@ -1223,7 +1078,7 @@ mod tests { executor.start(|context| async move { // Test truncating, then appending at the new size let (blob, size, _) = context - .open("partition", b"resize_then_append_at_size", TEST_VERSIONS) + .open("partition", b"resize_then_append_at_size") .await .unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); @@ -1258,7 +1113,7 @@ mod tests { // Verify final content let (blob_check, size_check, _) = context - .open("partition", b"resize_then_append_at_size", TEST_VERSIONS) + .open("partition", b"resize_then_append_at_size") .await .unwrap(); assert_eq!(size_check, 10); diff --git a/runtime/src/utils/buffer/pool.rs b/runtime/src/utils/buffer/pool.rs index 7ae12eac28..c7e82a6c04 100644 --- a/runtime/src/utils/buffer/pool.rs +++ b/runtime/src/utils/buffer/pool.rs @@ -407,9 +407,6 @@ mod tests { const PAGE_SIZE: usize = 1024; - /// Default version range for tests - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - #[test_traced] fn test_pool_basic() { let mut pool: Pool = Pool::new(10); @@ -459,7 +456,7 @@ mod tests { executor.start(|context| async move { // Populate a blob with 11 consecutive pages of data. let (blob, size, _) = context - .open("test", "blob".as_bytes(), TEST_VERSIONS) + .open("test", "blob".as_bytes()) .await .expect("Failed to open blob"); assert_eq!(size, 0); diff --git a/runtime/src/utils/buffer/read.rs b/runtime/src/utils/buffer/read.rs index 55b51879cd..db1f53fdac 100644 --- a/runtime/src/utils/buffer/read.rs +++ b/runtime/src/utils/buffer/read.rs @@ -16,7 +16,7 @@ use std::num::NonZeroUsize; /// let executor = deterministic::Runner::default(); /// executor.start(|context| async move { /// // Open a blob and add some data (e.g., a journal file) -/// let (blob, size, blob_version) = context.open("my_partition", b"my_data", BLOB_VERSION..=BLOB_VERSION).await.expect("unable to open blob"); +/// let (blob, size, blob_version) = context.open("my_partition", b"my_data").await.expect("unable to open blob"); /// assert_eq!(blob_version, BLOB_VERSION); /// let data = b"Hello, world! This is a test.".to_vec(); /// let size = data.len() as u64; diff --git a/runtime/src/utils/buffer/write.rs b/runtime/src/utils/buffer/write.rs index a9e7fd9c70..8cfc99af6e 100644 --- a/runtime/src/utils/buffer/write.rs +++ b/runtime/src/utils/buffer/write.rs @@ -14,7 +14,7 @@ use std::{num::NonZeroUsize, sync::Arc}; /// let executor = deterministic::Runner::default(); /// executor.start(|context| async move { /// // Open a blob for writing -/// let (blob, size, _) = context.open("my_partition", b"my_data", 0..=0).await.expect("unable to open blob"); +/// let (blob, size, _) = context.open("my_partition", b"my_data").await.expect("unable to open blob"); /// assert_eq!(size, 0); /// /// // Create a buffered writer with 16-byte buffer @@ -28,7 +28,7 @@ use std::{num::NonZeroUsize, sync::Arc}; /// blob.sync().await.expect("sync failed"); /// /// // Read back the data to verify -/// let (blob, size, _) = context.open("my_partition", b"my_data", 0..=0).await.expect("unable to reopen blob"); +/// let (blob, size, _) = context.open("my_partition", b"my_data").await.expect("unable to reopen blob"); /// let mut reader = Read::new(blob, size, NZUsize!(8)); /// let mut buf = vec![0u8; size as usize]; /// reader.read_exact(&mut buf, size as usize).await.expect("read failed"); diff --git a/runtime/src/utils/cell.rs b/runtime/src/utils/cell.rs index 06cd4b9773..510005fc90 100644 --- a/runtime/src/utils/cell.rs +++ b/runtime/src/utils/cell.rs @@ -231,13 +231,13 @@ where { type Blob = ::Blob; - fn open( + fn open_versioned( &self, partition: &str, name: &[u8], versions: RangeInclusive, ) -> impl Future> + Send { - self.as_present().open(partition, name, versions) + self.as_present().open_versioned(partition, name, versions) } fn remove( diff --git a/storage/src/archive/prunable/mod.rs b/storage/src/archive/prunable/mod.rs index c111ebeb9d..61544fab15 100644 --- a/storage/src/archive/prunable/mod.rs +++ b/storage/src/archive/prunable/mod.rs @@ -198,7 +198,6 @@ mod tests { const DEFAULT_REPLAY_BUFFER: usize = 4096; const PAGE_SIZE: NonZeroUsize = NZUsize!(1024); const PAGE_CACHE_SIZE: NonZeroUsize = NZUsize!(10); - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; fn test_key(key: &str) -> FixedBytes<64> { let mut buf = [0u8; 64]; @@ -297,7 +296,7 @@ mod tests { // Corrupt the value let section = (index / DEFAULT_ITEMS_PER_SECTION) * DEFAULT_ITEMS_PER_SECTION; let (blob, _, _) = context - .open("test_partition", §ion.to_be_bytes(), TEST_VERSIONS) + .open("test_partition", §ion.to_be_bytes()) .await .unwrap(); let value_location = 4 /* journal size */ + UInt(1u64).encode_size() as u64 /* index */ + 64 + 4 /* value length */; diff --git a/storage/src/cache/mod.rs b/storage/src/cache/mod.rs index ca2280ae18..9b845346b7 100644 --- a/storage/src/cache/mod.rs +++ b/storage/src/cache/mod.rs @@ -138,7 +138,6 @@ mod tests { const DEFAULT_REPLAY_BUFFER: usize = 4096; const PAGE_SIZE: NonZeroUsize = NZUsize!(1024); const PAGE_CACHE_SIZE: NonZeroUsize = NZUsize!(10); - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; #[test_traced] fn test_cache_compression_then_none() { @@ -221,7 +220,7 @@ mod tests { // Corrupt the value let section = (index / DEFAULT_ITEMS_PER_BLOB) * DEFAULT_ITEMS_PER_BLOB; let (blob, _, _) = context - .open("test_partition", §ion.to_be_bytes(), TEST_VERSIONS) + .open("test_partition", §ion.to_be_bytes()) .await .unwrap(); let value_location = 4 /* journal size */ + UInt(1u64).encode_size() as u64 /* index */ + 4 /* value length */; diff --git a/storage/src/freezer/mod.rs b/storage/src/freezer/mod.rs index d27e8d9214..627c8d2f01 100644 --- a/storage/src/freezer/mod.rs +++ b/storage/src/freezer/mod.rs @@ -255,7 +255,6 @@ mod tests { const DEFAULT_TABLE_REPLAY_BUFFER: usize = 64 * 1024; // 64KB const PAGE_SIZE: NonZeroUsize = NZUsize!(1024); const PAGE_CACHE_SIZE: NonZeroUsize = NZUsize!(10); - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; fn test_key(key: &str) -> FixedBytes<64> { let mut buf = [0u8; 64]; @@ -704,10 +703,7 @@ mod tests { // Corrupt the table by writing partial entry { - let (blob, _, _) = context - .open(&cfg.table_partition, b"table", TEST_VERSIONS) - .await - .unwrap(); + let (blob, _, _) = context.open(&cfg.table_partition, b"table").await.unwrap(); // Write incomplete table entry (only 10 bytes instead of 24) blob.write_at(vec![0xFF; 10], 0).await.unwrap(); blob.sync().await.unwrap(); @@ -767,10 +763,7 @@ mod tests { // Corrupt the CRC in the index entry { - let (blob, _, _) = context - .open(&cfg.table_partition, b"table", TEST_VERSIONS) - .await - .unwrap(); + let (blob, _, _) = context.open(&cfg.table_partition, b"table").await.unwrap(); // Read the first entry let entry_data = blob.read_at(vec![0u8; 24], 0).await.unwrap(); let mut corrupted = entry_data.as_ref().to_vec(); @@ -834,10 +827,7 @@ mod tests { // Add extra bytes to the table blob { - let (blob, size, _) = context - .open(&cfg.table_partition, b"table", TEST_VERSIONS) - .await - .unwrap(); + let (blob, size, _) = context.open(&cfg.table_partition, b"table").await.unwrap(); // Append garbage data blob.write_at(hex!("0xdeadbeef").to_vec(), size) .await diff --git a/storage/src/freezer/storage.rs b/storage/src/freezer/storage.rs index e14f5dd588..8ebf33929e 100644 --- a/storage/src/freezer/storage.rs +++ b/storage/src/freezer/storage.rs @@ -165,9 +165,6 @@ impl FixedSize for Checkpoint { /// Name of the table blob. const TABLE_BLOB_NAME: &[u8] = b"table"; -/// Current version of the freezer blob format. -const BLOB_VERSION: std::ops::RangeInclusive = 0..=0; - /// Single table entry stored in the table blob. #[derive(Debug, Clone, PartialEq)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] @@ -571,7 +568,7 @@ impl Freezer { // Open table blob let (table, table_len, _) = context - .open(&config.table_partition, TABLE_BLOB_NAME, BLOB_VERSION) + .open(&config.table_partition, TABLE_BLOB_NAME) .await?; // Determine checkpoint based on initialization scenario diff --git a/storage/src/journal/contiguous/fixed.rs b/storage/src/journal/contiguous/fixed.rs index 55746c6fb4..4503c74bcc 100644 --- a/storage/src/journal/contiguous/fixed.rs +++ b/storage/src/journal/contiguous/fixed.rs @@ -80,9 +80,6 @@ use std::{ }; use tracing::{debug, trace, warn}; -/// Current version of the fixed journal blob format. -pub(crate) const BLOB_VERSION: std::ops::RangeInclusive = 0..=0; - /// Configuration for `Journal` storage. #[derive(Clone)] pub struct Config { @@ -164,7 +161,7 @@ impl> Journal { }; for name in stored_blobs { let (blob, size, _) = context - .open(&cfg.partition, &name, BLOB_VERSION) + .open(&cfg.partition, &name) .await .map_err(Error::Runtime)?; let index = match name.try_into() { @@ -193,9 +190,7 @@ impl> Journal { } } else { debug!("no blobs found"); - let (blob, size, _) = context - .open(&cfg.partition, &0u64.to_be_bytes(), BLOB_VERSION) - .await?; + let (blob, size, _) = context.open(&cfg.partition, &0u64.to_be_bytes()).await?; assert_eq!(size, 0); blobs.insert(0, (blob, size)); } @@ -228,7 +223,7 @@ impl> Journal { blobs.insert(tail_index, (tail, tail_size)); tail_index += 1; (tail, tail_size, _) = context - .open(&cfg.partition, &tail_index.to_be_bytes(), BLOB_VERSION) + .open(&cfg.partition, &tail_index.to_be_bytes()) .await?; assert_eq!(tail_size, 0); tracked.inc(); @@ -362,11 +357,7 @@ impl> Journal { debug!(blob = next_blob_index, "creating next blob"); let (next_blob, size, _) = self .context - .open( - &self.cfg.partition, - &next_blob_index.to_be_bytes(), - BLOB_VERSION, - ) + .open(&self.cfg.partition, &next_blob_index.to_be_bytes()) .await?; assert_eq!(size, 0); let next_blob = Append::new( @@ -730,8 +721,6 @@ mod tests { } } - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - #[test_traced] fn test_fixed_journal_append_and_prune() { // Initialize the deterministic context @@ -983,7 +972,7 @@ mod tests { let checksum_offset = Digest::SIZE as u64 + (ITEMS_PER_BLOB.get() / 2) * (Digest::SIZE + u32::SIZE) as u64; let (blob, _, _) = context - .open(&cfg.partition, &40u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &40u64.to_be_bytes()) .await .expect("Failed to open blob"); // Write incorrect checksum @@ -1063,7 +1052,7 @@ mod tests { // Manually truncate a non-tail blob to make sure it's detected during initialization. let (blob, size, _) = context - .open(&cfg.partition, &40u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &40u64.to_be_bytes()) .await .expect("Failed to open blob"); blob.resize(size - 1).await.expect("Failed to corrupt blob"); @@ -1112,7 +1101,7 @@ mod tests { // Truncate the tail blob by one byte, which should result in the 3rd item being // trimmed. let (blob, size, _) = context - .open(&cfg.partition, &1u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &1u64.to_be_bytes()) .await .expect("Failed to open blob"); blob.resize(size - 1).await.expect("Failed to corrupt blob"); @@ -1136,7 +1125,7 @@ mod tests { // Corrupt the last item, ensuring last blob is trimmed to empty state. let (blob, size, _) = context - .open(&cfg.partition, &1u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &1u64.to_be_bytes()) .await .expect("Failed to open blob"); blob.resize(size - 1).await.expect("Failed to corrupt blob"); @@ -1248,7 +1237,7 @@ mod tests { // Manually truncate most recent blob to simulate a partial write. let (blob, size, _) = context - .open(&cfg.partition, &1u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &1u64.to_be_bytes()) .await .expect("Failed to open blob"); // truncate the most recent blob by 1 byte which corrupts the most recent item @@ -1306,7 +1295,7 @@ mod tests { // Manually truncate most recent blob to simulate a partial write. let (blob, size, _) = context - .open(&cfg.partition, &0u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &0u64.to_be_bytes()) .await .expect("Failed to open blob"); // Truncate the most recent blob by 1 byte which corrupts the one appended item @@ -1356,7 +1345,7 @@ mod tests { // simulate a failure where the file was extended, but no bytes were written due to // failure. let (blob, size, _) = context - .open(&cfg.partition, &0u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &0u64.to_be_bytes()) .await .expect("Failed to open blob"); blob.write_at(vec![0u8; Digest::SIZE * 3 - 1], size) @@ -1531,7 +1520,7 @@ mod tests { // Hash blob contents let (blob, size, _) = context - .open(&cfg.partition, &0u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &0u64.to_be_bytes()) .await .expect("Failed to open blob"); assert!(size > 0); @@ -1546,7 +1535,7 @@ mod tests { ); blob.sync().await.expect("Failed to sync blob"); let (blob, size, _) = context - .open(&cfg.partition, &1u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &1u64.to_be_bytes()) .await .expect("Failed to open blob"); assert!(size > 0); diff --git a/storage/src/journal/segmented/variable.rs b/storage/src/journal/segmented/variable.rs index 8ff40083f9..cd9eb677bd 100644 --- a/storage/src/journal/segmented/variable.rs +++ b/storage/src/journal/segmented/variable.rs @@ -114,9 +114,6 @@ use std::{ use tracing::{debug, trace, warn}; use zstd::{bulk::compress, decode_all}; -/// Current version of the variable journal blob format. -pub(crate) const BLOB_VERSION: std::ops::RangeInclusive = 0..=0; - /// Configuration for `Journal` storage. #[derive(Clone)] pub struct Config { @@ -191,7 +188,7 @@ impl Journal { Err(err) => return Err(Error::Runtime(err)), }; for name in stored_blobs { - let (blob, size, _) = context.open(&cfg.partition, &name, BLOB_VERSION).await?; + let (blob, size, _) = context.open(&cfg.partition, &name).await?; let hex_name = hex(&name); let section = match name.try_into() { Ok(section) => u64::from_be_bytes(section), @@ -536,10 +533,7 @@ impl Journal { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => { let name = section.to_be_bytes(); - let (blob, size, _) = self - .context - .open(&self.cfg.partition, &name, BLOB_VERSION) - .await?; + let (blob, size, _) = self.context.open(&self.cfg.partition, &name).await?; let blob = Append::new( blob, size, @@ -1230,7 +1224,7 @@ mod tests { // Manually create a blob with an invalid name (not 8 bytes) let invalid_blob_name = b"invalid"; // Less than 8 bytes let (blob, _, _) = context - .open(&cfg.partition, invalid_blob_name, TEST_VERSIONS) + .open(&cfg.partition, invalid_blob_name) .await .expect("Failed to create blob with invalid name"); blob.sync().await.expect("Failed to sync blob"); @@ -1263,7 +1257,7 @@ mod tests { let section = 1u64; let blob_name = section.to_be_bytes(); let (blob, _, _) = context - .open(&cfg.partition, &blob_name, TEST_VERSIONS) + .open(&cfg.partition, &blob_name) .await .expect("Failed to create blob"); @@ -1318,7 +1312,7 @@ mod tests { let section = 1u64; let blob_name = section.to_be_bytes(); let (blob, _, _) = context - .open(&cfg.partition, &blob_name, TEST_VERSIONS) + .open(&cfg.partition, &blob_name) .await .expect("Failed to create blob"); @@ -1375,7 +1369,7 @@ mod tests { let section = 1u64; let blob_name = section.to_be_bytes(); let (blob, _, _) = context - .open(&cfg.partition, &blob_name, TEST_VERSIONS) + .open(&cfg.partition, &blob_name) .await .expect("Failed to create blob"); @@ -1437,7 +1431,7 @@ mod tests { let section = 1u64; let blob_name = section.to_be_bytes(); let (blob, _, _) = context - .open(&cfg.partition, &blob_name, TEST_VERSIONS) + .open(&cfg.partition, &blob_name) .await .expect("Failed to create blob"); @@ -1482,7 +1476,7 @@ mod tests { // Confirm blob is expected length let (_, blob_size, _) = context - .open(&cfg.partition, §ion.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, §ion.to_be_bytes()) .await .expect("Failed to open blob"); assert_eq!(blob_size, 0); @@ -1529,7 +1523,7 @@ mod tests { // Manually corrupt the end of the second blob let (blob, blob_size, _) = context - .open(&cfg.partition, &2u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &2u64.to_be_bytes()) .await .expect("Failed to open blob"); blob.resize(blob_size - 4) @@ -1572,7 +1566,7 @@ mod tests { // entry = 1 (varint for 4) + 4 (data) + 4 (checksum) = 9 bytes // Item 2 ends at position 16 + 9 = 25 let (_, blob_size, _) = context - .open(&cfg.partition, &2u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &2u64.to_be_bytes()) .await .expect("Failed to open blob"); assert_eq!(blob_size, 25); @@ -1622,7 +1616,7 @@ mod tests { // Items 1 and 2 at positions 0 and 16, item 3 (value 5) at position 32 // Item 3 = 1 (varint) + 4 (data) + 4 (checksum) = 9 bytes, ends at 41 let (_, blob_size, _) = context - .open(&cfg.partition, &2u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &2u64.to_be_bytes()) .await .expect("Failed to open blob"); assert_eq!(blob_size, 41); @@ -1701,7 +1695,7 @@ mod tests { // Manually corrupt the end of the second blob let (blob, blob_size, _) = context - .open(&cfg.partition, &2u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &2u64.to_be_bytes()) .await .expect("Failed to open blob"); blob.resize(blob_size - 4) @@ -1754,7 +1748,7 @@ mod tests { // entry = 1 (varint for 8) + 8 (u64 data) + 4 (checksum) = 13 bytes // Items at positions 0, 16, 32; item 3 ends at 32 + 13 = 45 let (_, blob_size, _) = context - .open(&cfg.partition, &2u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &2u64.to_be_bytes()) .await .expect("Failed to open blob"); assert_eq!(blob_size, 45); @@ -1833,7 +1827,7 @@ mod tests { // Manually add extra data to the end of the second blob let (blob, blob_size, _) = context - .open(&cfg.partition, &2u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &2u64.to_be_bytes()) .await .expect("Failed to open blob"); blob.write_at(vec![0u8; 16], blob_size) @@ -1862,8 +1856,6 @@ mod tests { }); } - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - // Define `MockBlob` that returns an offset length that should overflow #[derive(Clone)] struct MockBlob {} @@ -1903,13 +1895,13 @@ mod tests { impl Storage for MockStorage { type Blob = MockBlob; - async fn open( + async fn open_versioned( &self, _partition: &str, _name: &[u8], _versions: std::ops::RangeInclusive, ) -> Result<(MockBlob, u64, u16), RError> { - Ok((MockBlob {}, self.len, *TEST_VERSIONS.end())) + Ok((MockBlob {}, self.len, 0)) } async fn remove(&self, _partition: &str, _name: Option<&[u8]>) -> Result<(), RError> { @@ -2137,7 +2129,7 @@ mod tests { // Hash blob contents let (blob, size, _) = context - .open(&cfg.partition, &1u64.to_be_bytes(), TEST_VERSIONS) + .open(&cfg.partition, &1u64.to_be_bytes()) .await .expect("Failed to open blob"); assert!(size > 0); diff --git a/storage/src/metadata/mod.rs b/storage/src/metadata/mod.rs index 3746a39e16..4df9ea4388 100644 --- a/storage/src/metadata/mod.rs +++ b/storage/src/metadata/mod.rs @@ -94,8 +94,6 @@ mod tests { use commonware_utils::{hex, sequence::U64}; use rand::{Rng, RngCore}; - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; - #[test_traced] fn test_put_get_clear() { // Initialize the deterministic context @@ -317,7 +315,7 @@ mod tests { drop(metadata); // Corrupt the metadata store - let (blob, _, _) = context.open("test", b"left", TEST_VERSIONS).await.unwrap(); + let (blob, _, _) = context.open("test", b"left").await.unwrap(); blob.write_at(b"corrupted".to_vec(), 0).await.unwrap(); blob.sync().await.unwrap(); @@ -372,10 +370,10 @@ mod tests { drop(metadata); // Corrupt the metadata store - let (blob, _, _) = context.open("test", b"left", TEST_VERSIONS).await.unwrap(); + let (blob, _, _) = context.open("test", b"left").await.unwrap(); blob.write_at(b"corrupted".to_vec(), 0).await.unwrap(); blob.sync().await.unwrap(); - let (blob, _, _) = context.open("test", b"right", TEST_VERSIONS).await.unwrap(); + let (blob, _, _) = context.open("test", b"right").await.unwrap(); blob.write_at(b"corrupted".to_vec(), 0).await.unwrap(); blob.sync().await.unwrap(); @@ -434,7 +432,7 @@ mod tests { drop(metadata); // Corrupt the metadata store - let (blob, len, _) = context.open("test", b"left", TEST_VERSIONS).await.unwrap(); + let (blob, len, _) = context.open("test", b"left").await.unwrap(); blob.resize(len - 8).await.unwrap(); blob.sync().await.unwrap(); @@ -487,7 +485,7 @@ mod tests { drop(metadata); // Corrupt the metadata store - let (blob, _, _) = context.open("test", b"left", TEST_VERSIONS).await.unwrap(); + let (blob, _, _) = context.open("test", b"left").await.unwrap(); blob.resize(5).await.unwrap(); blob.sync().await.unwrap(); diff --git a/storage/src/metadata/storage.rs b/storage/src/metadata/storage.rs index 7e4d3a4eb9..14c1aa62f9 100644 --- a/storage/src/metadata/storage.rs +++ b/storage/src/metadata/storage.rs @@ -13,9 +13,6 @@ use tracing::{debug, warn}; /// The names of the two blobs that store metadata. const BLOB_NAMES: [&[u8]; 2] = [b"left", b"right"]; -/// Current version of the metadata blob format. -const BLOB_VERSION: std::ops::RangeInclusive = 0..=0; - /// Information about a value in a [Wrapper]. struct Info { start: usize, @@ -82,12 +79,8 @@ impl Metadata { /// Initialize a new [Metadata] instance. pub async fn init(context: E, cfg: Config) -> Result { // Open dedicated blobs - let (left_blob, left_len, _) = context - .open(&cfg.partition, BLOB_NAMES[0], BLOB_VERSION) - .await?; - let (right_blob, right_len, _) = context - .open(&cfg.partition, BLOB_NAMES[1], BLOB_VERSION) - .await?; + let (left_blob, left_len, _) = context.open(&cfg.partition, BLOB_NAMES[0]).await?; + let (right_blob, right_len, _) = context.open(&cfg.partition, BLOB_NAMES[1]).await?; // Find latest blob (check which includes a hash of the other) let (left_map, left_wrapper) = diff --git a/storage/src/mmr/journaled.rs b/storage/src/mmr/journaled.rs index c6f74c8f3a..e3f89be0d9 100644 --- a/storage/src/mmr/journaled.rs +++ b/storage/src/mmr/journaled.rs @@ -843,7 +843,6 @@ mod tests { const PAGE_SIZE: usize = 111; const PAGE_CACHE_SIZE: usize = 5; - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; fn test_config() -> Config { Config { @@ -1139,7 +1138,7 @@ mod tests { // the last blob by a single byte. let partition: String = "journal_partition".into(); let (blob, len, _) = context - .open(&partition, &71u64.to_be_bytes(), TEST_VERSIONS) + .open(&partition, &71u64.to_be_bytes()) .await .expect("Failed to open blob"); assert_eq!(len, 36); // N+4 = 36 bytes per node, 1 node in the last blob @@ -1172,7 +1171,7 @@ mod tests { .await .expect("Failed to remove blob"); let (blob, len, _) = context - .open(&partition, &70u64.to_be_bytes(), TEST_VERSIONS) + .open(&partition, &70u64.to_be_bytes()) .await .expect("Failed to open blob"); assert_eq!(len, 36 * 7); // this blob should be full. diff --git a/storage/src/ordinal/mod.rs b/storage/src/ordinal/mod.rs index 3030b9a422..6dc105f8ec 100644 --- a/storage/src/ordinal/mod.rs +++ b/storage/src/ordinal/mod.rs @@ -144,7 +144,6 @@ mod tests { const DEFAULT_ITEMS_PER_BLOB: u64 = 1000; const DEFAULT_WRITE_BUFFER: usize = 4096; const DEFAULT_REPLAY_BUFFER: usize = 1024 * 1024; - const TEST_VERSIONS: std::ops::RangeInclusive = 0..=0; #[test_traced] fn test_put_get() { @@ -520,7 +519,7 @@ mod tests { // Corrupt the data { let (blob, _, _) = context - .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) + .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); // Corrupt the CRC by changing a byte @@ -647,7 +646,7 @@ mod tests { // Corrupt by writing partial record (only value, no CRC) { let (blob, _, _) = context - .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) + .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); // Overwrite second record with partial data (32 bytes instead of 36) @@ -714,7 +713,7 @@ mod tests { // Corrupt the value portion of a record { let (blob, _, _) = context - .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) + .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); // Corrupt some bytes in the value of the first record @@ -773,7 +772,7 @@ mod tests { { // Corrupt CRC in first blob let (blob, _, _) = context - .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) + .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); blob.write_at(vec![0xFF], 32).await.unwrap(); // Corrupt CRC of index 0 @@ -781,7 +780,7 @@ mod tests { // Corrupt value in second blob (which will invalidate CRC) let (blob, _, _) = context - .open("test_ordinal", &1u64.to_be_bytes(), TEST_VERSIONS) + .open("test_ordinal", &1u64.to_be_bytes()) .await .unwrap(); blob.write_at(vec![0xFF; 4], 5).await.unwrap(); // Corrupt value of index 10 @@ -845,7 +844,7 @@ mod tests { // Add extra bytes at the end of blob { let (blob, size, _) = context - .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) + .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); // Add garbage data that forms a complete but invalid record @@ -902,7 +901,7 @@ mod tests { // Create blob with zero-filled space { let (blob, _, _) = context - .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) + .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); @@ -1953,7 +1952,7 @@ mod tests { // Corrupt record at index 2 { let (blob, _, _) = context - .open("test_ordinal", &0u64.to_be_bytes(), TEST_VERSIONS) + .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); // Corrupt the CRC of record at index 2 diff --git a/storage/src/ordinal/storage.rs b/storage/src/ordinal/storage.rs index 847046366c..d3c3789b02 100644 --- a/storage/src/ordinal/storage.rs +++ b/storage/src/ordinal/storage.rs @@ -15,9 +15,6 @@ use std::{ }; use tracing::{debug, warn}; -/// Current version of the ordinal blob format. -const BLOB_VERSION: std::ops::RangeInclusive = 0..=0; - /// Value stored in the index file. #[derive(Debug, Clone)] struct Record> { @@ -123,7 +120,7 @@ impl> Ordinal { // Open all blobs and check for partial records for name in stored_blobs { - let (blob, mut len, _) = context.open(&config.partition, &name, BLOB_VERSION).await?; + let (blob, mut len, _) = context.open(&config.partition, &name).await?; let index = match name.try_into() { Ok(index) => u64::from_be_bytes(index), Err(nm) => Err(Error::InvalidBlobName(hex(&nm)))?, @@ -260,7 +257,7 @@ impl> Ordinal { if let Entry::Vacant(entry) = self.blobs.entry(section) { let (blob, len, _) = self .context - .open(&self.config.partition, §ion.to_be_bytes(), BLOB_VERSION) + .open(&self.config.partition, §ion.to_be_bytes()) .await?; entry.insert(Write::new(blob, len, self.config.write_buffer)); debug!(section, "created blob"); diff --git a/storage/src/qmdb/any/unordered/fixed/sync.rs b/storage/src/qmdb/any/unordered/fixed/sync.rs index b81e34267e..41bb0cfbac 100644 --- a/storage/src/qmdb/any/unordered/fixed/sync.rs +++ b/storage/src/qmdb/any/unordered/fixed/sync.rs @@ -218,11 +218,7 @@ pub(crate) async fn init_journal_at_size Date: Tue, 6 Jan 2026 20:25:42 -0500 Subject: [PATCH 03/17] remove application version return value from open --- runtime/fuzz/fuzz_targets/buffer.rs | 8 +- runtime/src/deterministic.rs | 8 +- runtime/src/lib.rs | 52 ++++----- runtime/src/storage/audited.rs | 4 +- runtime/src/storage/iouring.rs | 6 +- runtime/src/storage/memory.rs | 9 +- runtime/src/storage/metered.rs | 8 +- runtime/src/storage/mod.rs | 26 ++--- runtime/src/storage/tokio/mod.rs | 9 +- runtime/src/utils/buffer/append.rs | 12 +- runtime/src/utils/buffer/mod.rs | 112 +++++++++---------- runtime/src/utils/buffer/pool.rs | 2 +- runtime/src/utils/buffer/read.rs | 3 +- runtime/src/utils/buffer/write.rs | 4 +- storage/src/archive/prunable/mod.rs | 2 +- storage/src/cache/mod.rs | 2 +- storage/src/freezer/mod.rs | 6 +- storage/src/freezer/storage.rs | 2 +- storage/src/journal/contiguous/fixed.rs | 26 ++--- storage/src/journal/segmented/variable.rs | 30 ++--- storage/src/metadata/mod.rs | 10 +- storage/src/metadata/storage.rs | 4 +- storage/src/mmr/journaled.rs | 4 +- storage/src/ordinal/mod.rs | 16 +-- storage/src/ordinal/storage.rs | 4 +- storage/src/qmdb/any/unordered/fixed/sync.rs | 2 +- 26 files changed, 182 insertions(+), 189 deletions(-) diff --git a/runtime/fuzz/fuzz_targets/buffer.rs b/runtime/fuzz/fuzz_targets/buffer.rs index 45375be064..de596ae15b 100644 --- a/runtime/fuzz/fuzz_targets/buffer.rs +++ b/runtime/fuzz/fuzz_targets/buffer.rs @@ -87,7 +87,7 @@ enum FuzzOperation { fn fuzz(input: FuzzInput) { let executor = deterministic::Runner::default(); executor.start(|context| async move { - let (blob, initial_size, _) = context + let (blob, initial_size) = context .open("test_partition", SHARED_BLOB) .await .expect("cannot open context"); @@ -113,7 +113,7 @@ fn fuzz(input: FuzzInput) { let blob_size = blob_size as u64; let buffer_size = (buffer_size as usize).clamp(1, MAX_SIZE); - let (blob, size, _) = context + let (blob, size) = context .open("test_partition", b"read_blob") .await .expect("cannot open context"); @@ -134,7 +134,7 @@ fn fuzz(input: FuzzInput) { } => { let capacity = (capacity as usize).clamp(1, MAX_SIZE); - let (blob, _, _) = context + let (blob, _) = context .open("test_partition", b"write_blob") .await .expect("cannot open context"); @@ -152,7 +152,7 @@ fn fuzz(input: FuzzInput) { let pool_page_size = NZUsize!((pool_page_size as usize).clamp(1, MAX_SIZE)); let pool_capacity = NZUsize!((pool_capacity as usize).clamp(1, MAX_SIZE)); - let (blob, _, _) = context + let (blob, _) = context .open("test_partition", b"append_blob") .await .expect("cannot open write blob"); diff --git a/runtime/src/deterministic.rs b/runtime/src/deterministic.rs index d9ccfd4441..71aa8ee710 100644 --- a/runtime/src/deterministic.rs +++ b/runtime/src/deterministic.rs @@ -1564,7 +1564,7 @@ mod tests { // Run some tasks, sync storage, and recover the runtime let (state, checkpoint) = executor1.start_and_recover(|context| async move { - let (blob, _, _) = context.open(partition, name).await.unwrap(); + let (blob, _) = context.open(partition, name).await.unwrap(); blob.write_at(Vec::from(data), 0).await.unwrap(); blob.sync().await.unwrap(); context.auditor().state() @@ -1576,7 +1576,7 @@ mod tests { // Check that synced storage persists after recovery let executor = Runner::from(checkpoint); executor.start(|context| async move { - let (blob, len, _) = context.open(partition, name).await.unwrap(); + let (blob, len) = context.open(partition, name).await.unwrap(); assert_eq!(len, data.len() as u64); let read = blob.read_at(vec![0; data.len()], 0).await.unwrap(); assert_eq!(read.as_ref(), data); @@ -1610,7 +1610,7 @@ mod tests { // Run some tasks without syncing storage let (_, checkpoint) = executor.start_and_recover(|context| async move { let context = context.clone(); - let (blob, _, _) = context.open(partition, name).await.unwrap(); + let (blob, _) = context.open(partition, name).await.unwrap(); blob.write_at(data, 0).await.unwrap(); }); @@ -1619,7 +1619,7 @@ mod tests { // Check that unsynced storage does not persist after recovery executor.start(|context| async move { - let (_, len, _) = context.open(partition, name).await.unwrap(); + let (_, len) = context.open(partition, name).await.unwrap(); assert_eq!(len, 0); }); } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index adaf52fd34..26c0ad1832 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -546,17 +546,25 @@ pub trait Storage: Clone + Send + Sync + 'static { type Blob: Blob; /// [Storage::open_versioned] with [Header::DEFAULT_APPLICATION_VERSION] as the only value - /// in the version range. + /// in the versions range. The application version is omitted from the return value as it + /// is always [Header::DEFAULT_APPLICATION_VERSION]. fn open( &self, partition: &str, name: &[u8], - ) -> impl Future> + Send { - self.open_versioned( - partition, - name, - Header::DEFAULT_APPLICATION_VERSION..=Header::DEFAULT_APPLICATION_VERSION, - ) + ) -> impl Future> + Send { + let partition = partition.to_string(); + let name = name.to_vec(); + async move { + let (blob, size, _) = self + .open_versioned( + &partition, + &name, + Header::DEFAULT_APPLICATION_VERSION..=Header::DEFAULT_APPLICATION_VERSION, + ) + .await?; + Ok((blob, size)) + } } /// Open an existing blob in a given partition or create a new one, returning @@ -1149,12 +1157,11 @@ mod tests { let name = b"test_blob"; // Open a new blob and verify returned version - let (blob, size, app_version) = context + let (blob, size) = context .open(partition, name) .await .expect("Failed to open blob"); assert_eq!(size, 0, "new blob should have size 0"); - assert_eq!(app_version, Header::DEFAULT_APPLICATION_VERSION); // Write data to the blob let data = b"Hello, Storage!"; @@ -1182,17 +1189,12 @@ mod tests { .expect("Failed to scan partition"); assert!(blobs.contains(&name.to_vec())); - // Reopen the blob and verify version persists - let (blob, len, app_version) = context + // Reopen the blob + let (blob, len) = context .open(partition, name) .await .expect("Failed to reopen blob"); assert_eq!(len, data.len() as u64); - assert_eq!( - app_version, - Header::DEFAULT_APPLICATION_VERSION, - "reopened blob should have same app version" - ); // Read data part of message back let read = blob @@ -1238,7 +1240,7 @@ mod tests { let name = b"test_blob_rw"; // Open a new blob - let (blob, _, _) = context + let (blob, _) = context .open(partition, name) .await .expect("Failed to open blob"); @@ -1294,7 +1296,7 @@ mod tests { let name = b"test_blob_resize"; // Open and write to a new blob - let (blob, _, _) = context + let (blob, _) = context .open(partition, name) .await .expect("Failed to open blob"); @@ -1306,7 +1308,7 @@ mod tests { blob.sync().await.expect("Failed to sync after write"); // Re-open and check length - let (blob, len, _) = context.open(partition, name).await.unwrap(); + let (blob, len) = context.open(partition, name).await.unwrap(); assert_eq!(len, data.len() as u64); // Resize to extend the file @@ -1317,7 +1319,7 @@ mod tests { blob.sync().await.expect("Failed to sync after resize"); // Re-open and check length again - let (blob, len, _) = context.open(partition, name).await.unwrap(); + let (blob, len) = context.open(partition, name).await.unwrap(); assert_eq!(len, new_len); // Read original data @@ -1336,7 +1338,7 @@ mod tests { blob.sync().await.unwrap(); // Reopen to check truncation - let (blob, size, _) = context.open(partition, name).await.unwrap(); + let (blob, size) = context.open(partition, name).await.unwrap(); assert_eq!(size, data.len() as u64); // Read truncated data @@ -1358,7 +1360,7 @@ mod tests { for (additional, partition) in partitions.iter().enumerate() { // Open a new blob - let (blob, _, _) = context + let (blob, _) = context .open(partition, name) .await .expect("Failed to open blob"); @@ -1377,7 +1379,7 @@ mod tests { for (additional, partition) in partitions.iter().enumerate() { // Open a new blob - let (blob, len, _) = context + let (blob, len) = context .open(partition, name) .await .expect("Failed to open blob"); @@ -1403,7 +1405,7 @@ mod tests { let name = b"test_blob_rw"; // Open a new blob - let (blob, _, _) = context + let (blob, _) = context .open(partition, name) .await .expect("Failed to open blob"); @@ -1433,7 +1435,7 @@ mod tests { let name = b"test_blob_rw"; // Open a new blob - let (blob, _, _) = context + let (blob, _) = context .open(partition, name) .await .expect("Failed to open blob"); diff --git a/runtime/src/storage/audited.rs b/runtime/src/storage/audited.rs index 4c46c731a0..e17d45f7a3 100644 --- a/runtime/src/storage/audited.rs +++ b/runtime/src/storage/audited.rs @@ -147,8 +147,8 @@ mod tests { let storage2 = AuditedStorage::new(inner2, auditor2.clone()); // Perform a sequence of operations on both storages simultaneously - let (blob1, _, _) = storage1.open("partition", b"test_blob").await.unwrap(); - let (blob2, _, _) = storage2.open("partition", b"test_blob").await.unwrap(); + let (blob1, _) = storage1.open("partition", b"test_blob").await.unwrap(); + let (blob2, _) = storage2.open("partition", b"test_blob").await.unwrap(); // Write data to the blobs blob1.write_at(b"hello world".to_vec(), 0).await.unwrap(); diff --git a/runtime/src/storage/iouring.rs b/runtime/src/storage/iouring.rs index 820c530f88..7b3fdf1c32 100644 --- a/runtime/src/storage/iouring.rs +++ b/runtime/src/storage/iouring.rs @@ -472,7 +472,7 @@ mod tests { let (storage, storage_directory) = create_test_storage(); // Test 1: New blob returns logical size 0 and correct application version - let (blob, size, app_version) = storage.open("partition", b"test").await.unwrap(); + let (blob, size) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size, 0, "new blob should have logical size 0"); assert_eq!(app_version, Header::DEFAULT_APPLICATION_VERSION); @@ -534,7 +534,7 @@ mod tests { blob.sync().await.unwrap(); drop(blob); - let (blob2, size2, app_version2) = storage.open("partition", b"test").await.unwrap(); + let (blob2, size2) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size2, 9, "reopened blob should have logical size 9"); assert_eq!(app_version2, Header::DEFAULT_APPLICATION_VERSION); let read_buf = blob2.read_at(vec![0u8; 9], 0).await.unwrap(); @@ -547,7 +547,7 @@ mod tests { std::fs::write(&corrupted_path, vec![0u8; 4]).unwrap(); // Opening should truncate and write fresh header - let (blob3, size3, app_version3) = storage.open("partition", b"corrupted").await.unwrap(); + let (blob3, size3) = storage.open("partition", b"corrupted").await.unwrap(); assert_eq!(size3, 0, "corrupted blob should return logical size 0"); assert_eq!(app_version3, Header::DEFAULT_APPLICATION_VERSION); diff --git a/runtime/src/storage/memory.rs b/runtime/src/storage/memory.rs index d192a2e92f..29d3994045 100644 --- a/runtime/src/storage/memory.rs +++ b/runtime/src/storage/memory.rs @@ -217,9 +217,8 @@ mod tests { let storage = Storage::default(); // Test 1: New blob returns logical size 0 and correct app version - let (blob, size, app_version) = storage.open("partition", b"test").await.unwrap(); + let (blob, size) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size, 0, "new blob should have logical size 0"); - assert_eq!(app_version, Header::DEFAULT_APPLICATION_VERSION); // Verify raw storage has 8 bytes (header only) { @@ -292,9 +291,8 @@ mod tests { blob.sync().await.unwrap(); drop(blob); - let (blob2, size2, app_version2) = storage.open("partition", b"test").await.unwrap(); + let (blob2, size2) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size2, 9, "reopened blob should have logical size 9"); - assert_eq!(app_version2, Header::DEFAULT_APPLICATION_VERSION); let read_buf = blob2.read_at(vec![0u8; 9], 0).await.unwrap(); assert_eq!(read_buf.as_ref(), b"test data"); @@ -307,9 +305,8 @@ mod tests { } // Opening should truncate and write fresh header - let (_blob3, size3, app_version3) = storage.open("partition", b"corrupted").await.unwrap(); + let (_blob3, size3) = storage.open("partition", b"corrupted").await.unwrap(); assert_eq!(size3, 0, "corrupted blob should return logical size 0"); - assert_eq!(app_version3, Header::DEFAULT_APPLICATION_VERSION); // Verify raw storage now has proper 8-byte header { diff --git a/runtime/src/storage/metered.rs b/runtime/src/storage/metered.rs index 3d9015b0d5..c973d479d5 100644 --- a/runtime/src/storage/metered.rs +++ b/runtime/src/storage/metered.rs @@ -188,7 +188,7 @@ mod tests { let storage = Storage::new(inner, &mut registry); // Open a blob - let (blob, _, _) = storage.open("partition", b"test_blob").await.unwrap(); + let (blob, _) = storage.open("partition", b"test_blob").await.unwrap(); // Verify that the open_blobs metric is incremented let open_blobs = storage.metrics.open_blobs.get(); @@ -244,8 +244,8 @@ mod tests { let storage = Storage::new(inner, &mut registry); // Open multiple blobs - let (blob1, _, _) = storage.open("partition", b"blob1").await.unwrap(); - let (blob2, _, _) = storage.open("partition", b"blob2").await.unwrap(); + let (blob1, _) = storage.open("partition", b"blob1").await.unwrap(); + let (blob2, _) = storage.open("partition", b"blob2").await.unwrap(); // Verify that the open_blobs metric is incremented correctly let open_blobs = storage.metrics.open_blobs.get(); @@ -285,7 +285,7 @@ mod tests { let storage = Storage::new(inner, &mut registry); // Open a blob - let (blob, _, _) = storage.open("partition", b"test_blob").await.unwrap(); + let (blob, _) = storage.open("partition", b"test_blob").await.unwrap(); // Verify that the open_blobs metric is incremented assert_eq!( diff --git a/runtime/src/storage/mod.rs b/runtime/src/storage/mod.rs index 36db827af8..c7a8f5501e 100644 --- a/runtime/src/storage/mod.rs +++ b/runtime/src/storage/mod.rs @@ -55,7 +55,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, len, _) = storage.open("partition", b"test_blob").await.unwrap(); + let (blob, len) = storage.open("partition", b"test_blob").await.unwrap(); assert_eq!(len, 0); blob.write_at(Vec::from("hello world"), 0).await.unwrap(); @@ -115,7 +115,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _, _) = storage.open("partition", b"test_blob").await.unwrap(); + let (blob, _) = storage.open("partition", b"test_blob").await.unwrap(); // Initialize blob with data of sufficient length first blob.write_at(b"concurrent write".to_vec(), 0) @@ -153,7 +153,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _, _) = storage.open("partition", b"large_blob").await.unwrap(); + let (blob, _) = storage.open("partition", b"large_blob").await.unwrap(); let large_data = vec![42u8; 10 * 1024 * 1024]; // 10 MB blob.write_at(large_data.clone(), 0).await.unwrap(); @@ -169,7 +169,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _, _) = storage + let (blob, _) = storage .open("test_overwrite_data", b"test_blob") .await .unwrap(); @@ -196,7 +196,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _, _) = storage + let (blob, _) = storage .open("test_read_beyond_written_data", b"test_blob") .await .unwrap(); @@ -219,7 +219,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _, _) = storage + let (blob, _) = storage .open("test_write_at_large_offset", b"test_blob") .await .unwrap(); @@ -244,7 +244,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _, _) = storage + let (blob, _) = storage .open("test_append_data", b"test_blob") .await .unwrap(); @@ -266,7 +266,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _, _) = storage.open("partition", b"test_blob").await.unwrap(); + let (blob, _) = storage.open("partition", b"test_blob").await.unwrap(); // Write data at different offsets blob.write_at(b"first".to_vec(), 0).await.unwrap(); @@ -286,7 +286,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _, _) = storage + let (blob, _) = storage .open("test_large_data_in_chunks", b"large_blob") .await .unwrap(); @@ -316,7 +316,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _, _) = storage + let (blob, _) = storage .open("test_read_empty_blob", b"empty_blob") .await .unwrap(); @@ -334,7 +334,7 @@ pub(crate) mod tests { S: Storage + Send + Sync, S::Blob: Send + Sync, { - let (blob, _, _) = storage + let (blob, _) = storage .open("test_overlapping_writes", b"test_blob") .await .unwrap(); @@ -358,7 +358,7 @@ pub(crate) mod tests { S::Blob: Send + Sync, { { - let (blob, _, _) = storage + let (blob, _) = storage .open("test_resize_then_open", b"test_blob") .await .unwrap(); @@ -374,7 +374,7 @@ pub(crate) mod tests { } // Reopen the blob - let (blob, len, _) = storage + let (blob, len) = storage .open("test_resize_then_open", b"test_blob") .await .unwrap(); diff --git a/runtime/src/storage/tokio/mod.rs b/runtime/src/storage/tokio/mod.rs index 6a1a5d4fdd..efc92c942b 100644 --- a/runtime/src/storage/tokio/mod.rs +++ b/runtime/src/storage/tokio/mod.rs @@ -271,9 +271,8 @@ mod tests { let storage = Storage::new(config); // Test 1: New blob returns logical size 0 and correct app version - let (blob, size, app_version) = storage.open("partition", b"test").await.unwrap(); + let (blob, size) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size, 0, "new blob should have logical size 0"); - assert_eq!(app_version, Header::DEFAULT_APPLICATION_VERSION); // Verify raw file has 8 bytes (header only) let file_path = storage_directory.join("partition").join(hex(b"test")); @@ -333,9 +332,8 @@ mod tests { blob.sync().await.unwrap(); drop(blob); - let (blob2, size2, app_version2) = storage.open("partition", b"test").await.unwrap(); + let (blob2, size2) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size2, 9, "reopened blob should have logical size 9"); - assert_eq!(app_version2, Header::DEFAULT_APPLICATION_VERSION); let read_buf = blob2.read_at(vec![0u8; 9], 0).await.unwrap(); assert_eq!(read_buf.as_ref(), b"test data"); drop(blob2); @@ -346,9 +344,8 @@ mod tests { std::fs::write(&corrupted_path, vec![0u8; 4]).unwrap(); // Opening should truncate and write fresh header - let (blob3, size3, app_version3) = storage.open("partition", b"corrupted").await.unwrap(); + let (blob3, size3) = storage.open("partition", b"corrupted").await.unwrap(); assert_eq!(size3, 0, "corrupted blob should return logical size 0"); - assert_eq!(app_version3, Header::DEFAULT_APPLICATION_VERSION); // Verify raw file now has proper 8-byte header let metadata = std::fs::metadata(&corrupted_path).unwrap(); diff --git a/runtime/src/utils/buffer/append.rs b/runtime/src/utils/buffer/append.rs index 726b4f217e..27b35d7e17 100644 --- a/runtime/src/utils/buffer/append.rs +++ b/runtime/src/utils/buffer/append.rs @@ -336,7 +336,7 @@ mod tests { let executor = deterministic::Runner::default(); // Start the test within the executor executor.start(|context| async move { - let (blob, size, _) = context + let (blob, size) = context .open("test", "blob".as_bytes()) .await .expect("Failed to open blob"); @@ -355,7 +355,7 @@ mod tests { let executor = deterministic::Runner::default(); // Start the test within the executor executor.start(|context| async move { - let (blob, size, _) = context + let (blob, size) = context .open("test", "blob".as_bytes()) .await .expect("Failed to open blob"); @@ -375,7 +375,7 @@ mod tests { blob.sync().await.expect("Failed to sync blob"); // Make sure blob has expected size when reopened. - let (blob, size, _) = context + let (blob, size) = context .open("test", "blob".as_bytes()) .await .expect("Failed to open blob"); @@ -390,7 +390,7 @@ mod tests { let executor = deterministic::Runner::default(); // Start the test within the executor executor.start(|context| async move { - let (blob, size, _) = context + let (blob, size) = context .open("test", "blob".as_bytes()) .await .expect("Failed to open blob"); @@ -485,7 +485,7 @@ mod tests { fn test_append_blob_tracks_physical_size() { let executor = deterministic::Runner::default(); executor.start(|context| async move { - let (blob, size, _) = context + let (blob, size) = context .open("test", "blob".as_bytes()) .await .expect("Failed to open blob"); @@ -524,7 +524,7 @@ mod tests { assert_eq!(blob.buffer.read().await.1, 250); // Close and reopen. - let (blob, size, _) = context + let (blob, size) = context .open("test", "blob".as_bytes()) .await .expect("Failed to reopen blob"); diff --git a/runtime/src/utils/buffer/mod.rs b/runtime/src/utils/buffer/mod.rs index d5d5458cb3..87083648b8 100644 --- a/runtime/src/utils/buffer/mod.rs +++ b/runtime/src/utils/buffer/mod.rs @@ -24,7 +24,7 @@ mod tests { executor.start(|context| async move { // Test basic buffered reading functionality with sequential reads let data = b"Hello, world! This is a test."; - let (blob, size, _) = context.open("partition", b"test").await.unwrap(); + let (blob, size) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -63,7 +63,7 @@ mod tests { executor.start(|context| async move { // Test reading data that spans multiple buffer refills let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - let (blob, size, _) = context.open("partition", b"test").await.unwrap(); + let (blob, size) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -96,7 +96,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - let (blob, size, _) = context.open("partition", b"test").await.unwrap(); + let (blob, size) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -130,7 +130,7 @@ mod tests { executor.start(|context| async move { // Test reader behavior with known blob size limits let data = b"This is a test with known size limitations."; - let (blob, size, _) = context.open("partition", b"test").await.unwrap(); + let (blob, size) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -174,7 +174,7 @@ mod tests { // Test reading large amounts of data in chunks let data_size = 1024 * 256; // 256KB of data let data = vec![0x42; data_size]; - let (blob, size, _) = context.open("partition", b"test").await.unwrap(); + let (blob, size) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.clone(), 0).await.unwrap(); let size = data.len() as u64; @@ -222,7 +222,7 @@ mod tests { let data_size = buffer_size * 5 / 2; // 2.5 buffers let data = vec![0x37; data_size]; - let (blob, size, _) = context.open("partition", b"test").await.unwrap(); + let (blob, size) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.clone(), 0).await.unwrap(); let size = data.len() as u64; @@ -257,7 +257,7 @@ mod tests { executor.start(|context| async move { // Create a memory blob with some test data let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - let (blob, size, _) = context.open("partition", b"test").await.unwrap(); + let (blob, size) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let size = data.len() as u64; @@ -309,7 +309,7 @@ mod tests { executor.start(|context| async move { // Create a memory blob with longer data let data = vec![0x41; 1000]; // 1000 'A' characters - let (blob, size, _) = context.open("partition", b"test").await.unwrap(); + let (blob, size) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.clone(), 0).await.unwrap(); let size = data.len() as u64; @@ -346,7 +346,7 @@ mod tests { executor.start(|context| async move { // Create a memory blob with some test data let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; - let (blob, size, _) = context.open("partition", b"test").await.unwrap(); + let (blob, size) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); let data_len = data.len() as u64; @@ -359,7 +359,7 @@ mod tests { reader.resize(resize_len).await.unwrap(); // Reopen to check truncation - let (blob, size, _) = context.open("partition", b"test").await.unwrap(); + let (blob, size) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, resize_len, "Blob should be resized to half size"); // Create a new buffer and read to verify truncation @@ -382,7 +382,7 @@ mod tests { new_reader.resize(data_len * 2).await.unwrap(); // Reopen to check resize - let (blob, new_size, _) = context.open("partition", b"test").await.unwrap(); + let (blob, new_size) = context.open("partition", b"test").await.unwrap(); assert_eq!(new_size, data_len * 2); // Create a new buffer and read to verify resize @@ -407,7 +407,7 @@ mod tests { // Create a memory blob with some test data let data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"; let data_len = data.len() as u64; - let (blob, size, _) = context.open("partition", b"test").await.unwrap(); + let (blob, size) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0); blob.write_at(data.to_vec(), 0).await.unwrap(); @@ -418,7 +418,7 @@ mod tests { reader.resize(0).await.unwrap(); // Reopen to check truncation - let (blob, size, _) = context.open("partition", b"test").await.unwrap(); + let (blob, size) = context.open("partition", b"test").await.unwrap(); assert_eq!(size, 0, "Blob should be resized to zero"); // Create a new buffer and try to read (should fail) @@ -436,7 +436,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test basic buffered write and sync functionality - let (blob, size, _) = context.open("partition", b"write_basic").await.unwrap(); + let (blob, size) = context.open("partition", b"write_basic").await.unwrap(); assert_eq!(size, 0); let writer = Write::new(blob.clone(), size, NZUsize!(8)); @@ -446,7 +446,7 @@ mod tests { assert_eq!(writer.size().await, 5); // Verify data was written correctly - let (blob, size, _) = context.open("partition", b"write_basic").await.unwrap(); + let (blob, size) = context.open("partition", b"write_basic").await.unwrap(); assert_eq!(size, 5); let mut reader = Read::new(blob, size, NZUsize!(8)); let mut buf = [0u8; 5]; @@ -460,7 +460,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writes that cause buffer flushes due to capacity limits - let (blob, size, _) = context.open("partition", b"write_multi").await.unwrap(); + let (blob, size) = context.open("partition", b"write_multi").await.unwrap(); assert_eq!(size, 0); let writer = Write::new(blob.clone(), size, NZUsize!(4)); @@ -471,7 +471,7 @@ mod tests { writer.sync().await.unwrap(); // Verify the final result - let (blob, size, _) = context.open("partition", b"write_multi").await.unwrap(); + let (blob, size) = context.open("partition", b"write_multi").await.unwrap(); assert_eq!(size, 7); let mut reader = Read::new(blob, size, NZUsize!(4)); let mut buf = [0u8; 7]; @@ -485,7 +485,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writing data larger than buffer capacity (direct write) - let (blob, size, _) = context.open("partition", b"write_large").await.unwrap(); + let (blob, size) = context.open("partition", b"write_large").await.unwrap(); assert_eq!(size, 0); let writer = Write::new(blob.clone(), size, NZUsize!(4)); @@ -500,7 +500,7 @@ mod tests { assert_eq!(writer.size().await, 26); // Verify the complete data - let (blob, size, _) = context.open("partition", b"write_large").await.unwrap(); + let (blob, size) = context.open("partition", b"write_large").await.unwrap(); assert_eq!(size, 26); let mut reader = Read::new(blob, size, NZUsize!(4)); let mut buf = [0u8; 26]; @@ -514,7 +514,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test sequential appends that exceed buffer capacity - let (blob, size, _) = context.open("partition", b"append_buf").await.unwrap(); + let (blob, size) = context.open("partition", b"append_buf").await.unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); // Write data that fits in buffer @@ -527,7 +527,7 @@ mod tests { assert_eq!(writer.size().await, 11); // Verify the complete result - let (blob, size, _) = context.open("partition", b"append_buf").await.unwrap(); + let (blob, size) = context.open("partition", b"append_buf").await.unwrap(); assert_eq!(size, 11); let mut reader = Read::new(blob, size, NZUsize!(10)); let mut buf = vec![0u8; 11]; @@ -541,7 +541,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test overwriting data within the buffer and extending it - let (blob, size, _) = context.open("partition", b"middle_buf").await.unwrap(); + let (blob, size) = context.open("partition", b"middle_buf").await.unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(20)); // Initial write @@ -554,7 +554,7 @@ mod tests { writer.sync().await.unwrap(); // Verify overwrite result - let (blob, size, _) = context.open("partition", b"middle_buf").await.unwrap(); + let (blob, size) = context.open("partition", b"middle_buf").await.unwrap(); assert_eq!(size, 10); let mut reader = Read::new(blob, size, NZUsize!(10)); let mut buf = vec![0u8; 10]; @@ -569,7 +569,7 @@ mod tests { writer.sync().await.unwrap(); // Verify final result - let (blob, size, _) = context.open("partition", b"middle_buf").await.unwrap(); + let (blob, size) = context.open("partition", b"middle_buf").await.unwrap(); assert_eq!(size, 20); let mut reader = Read::new(blob, size, NZUsize!(20)); let mut buf = vec![0u8; 20]; @@ -583,7 +583,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writing at offsets before the current buffer position - let (blob, size, _) = context.open("partition", b"before_buf").await.unwrap(); + let (blob, size) = context.open("partition", b"before_buf").await.unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); // Write data at a later offset first @@ -596,7 +596,7 @@ mod tests { writer.sync().await.unwrap(); // Verify data placement with gap - let (blob, size, _) = context.open("partition", b"before_buf").await.unwrap(); + let (blob, size) = context.open("partition", b"before_buf").await.unwrap(); assert_eq!(size, 20); let mut reader = Read::new(blob, size, NZUsize!(20)); let mut buf = vec![0u8; 20]; @@ -613,7 +613,7 @@ mod tests { assert_eq!(writer.size().await, 20); // Verify gap is filled - let (blob, size, _) = context.open("partition", b"before_buf").await.unwrap(); + let (blob, size) = context.open("partition", b"before_buf").await.unwrap(); assert_eq!(size, 20); let mut reader = Read::new(blob, size, NZUsize!(20)); let mut buf = vec![0u8; 20]; @@ -628,7 +628,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test blob resize functionality and subsequent writes - let (blob, size, _) = context.open("partition", b"resize_write").await.unwrap(); + let (blob, size) = context.open("partition", b"resize_write").await.unwrap(); let writer = Write::new(blob, size, NZUsize!(10)); // Write initial data @@ -637,7 +637,7 @@ mod tests { writer.sync().await.unwrap(); assert_eq!(writer.size().await, 11); - let (blob_check, size_check, _) = + let (blob_check, size_check) = context.open("partition", b"resize_write").await.unwrap(); assert_eq!(size_check, 11); drop(blob_check); @@ -648,7 +648,7 @@ mod tests { writer.sync().await.unwrap(); // Verify resize - let (blob, size, _) = context.open("partition", b"resize_write").await.unwrap(); + let (blob, size) = context.open("partition", b"resize_write").await.unwrap(); assert_eq!(size, 5); let mut reader = Read::new(blob, size, NZUsize!(5)); let mut buf = vec![0u8; 5]; @@ -661,7 +661,7 @@ mod tests { writer.sync().await.unwrap(); // Verify overwrite - let (blob, size, _) = context.open("partition", b"resize_write").await.unwrap(); + let (blob, size) = context.open("partition", b"resize_write").await.unwrap(); assert_eq!(size, 5); let mut reader = Read::new(blob, size, NZUsize!(5)); let mut buf = vec![0u8; 5]; @@ -674,7 +674,7 @@ mod tests { writer.sync().await.unwrap(); // Verify resize - let (blob, size, _) = context.open("partition", b"resize_write").await.unwrap(); + let (blob, size) = context.open("partition", b"resize_write").await.unwrap(); assert_eq!(size, 10); let mut reader = Read::new(blob, size, NZUsize!(10)); let mut buf = vec![0u8; 10]; @@ -683,7 +683,7 @@ mod tests { assert_eq!(&buf[5..10], [0u8; 5]); // Test resize to zero - let (blob_zero, size, _) = context.open("partition", b"resize_zero").await.unwrap(); + let (blob_zero, size) = context.open("partition", b"resize_zero").await.unwrap(); let writer_zero = Write::new(blob_zero.clone(), size, NZUsize!(10)); writer_zero .write_at(b"some data".to_vec(), 0) @@ -698,7 +698,7 @@ mod tests { assert_eq!(writer_zero.size().await, 0); // Ensure the blob is empty - let (_, size_z, _) = context.open("partition", b"resize_zero").await.unwrap(); + let (_, size_z) = context.open("partition", b"resize_zero").await.unwrap(); assert_eq!(size_z, 0); }); } @@ -708,7 +708,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test reading through writer's read_at method (buffer + blob reads) - let (blob, size, _) = context.open("partition", b"read_at_writer").await.unwrap(); + let (blob, size) = context.open("partition", b"read_at_writer").await.unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); // Write data that stays in buffer @@ -759,7 +759,7 @@ mod tests { // Verify complete content by reopening writer.sync().await.unwrap(); assert_eq!(writer.size().await, 30); - let (final_blob, final_size, _) = + let (final_blob, final_size) = context.open("partition", b"read_at_writer").await.unwrap(); assert_eq!(final_size, 30); let mut final_reader = Read::new(final_blob, final_size, NZUsize!(30)); @@ -777,7 +777,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writes that cannot be merged into buffer (non-contiguous/too large) - let (blob, size, _) = context.open("partition", b"write_straddle").await.unwrap(); + let (blob, size) = context.open("partition", b"write_straddle").await.unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(10)); // Fill buffer completely @@ -791,7 +791,7 @@ mod tests { assert_eq!(writer.size().await, 18); // Verify data with gap - let (blob_check, size_check, _) = + let (blob_check, size_check) = context.open("partition", b"write_straddle").await.unwrap(); assert_eq!(size_check, 18); let mut reader = Read::new(blob_check, size_check, NZUsize!(20)); @@ -804,7 +804,7 @@ mod tests { assert_eq!(buf, expected); // Test write that exceeds buffer capacity - let (blob2, size, _) = context.open("partition", b"write_straddle2").await.unwrap(); + let (blob2, size) = context.open("partition", b"write_straddle2").await.unwrap(); let writer2 = Write::new(blob2.clone(), size, NZUsize!(10)); writer2.write_at(b"0123456789".to_vec(), 0).await.unwrap(); assert_eq!(writer2.size().await, 10); @@ -816,7 +816,7 @@ mod tests { assert_eq!(writer2.size().await, 17); // Verify overwrite result - let (blob_check2, size_check2, _) = + let (blob_check2, size_check2) = context.open("partition", b"write_straddle2").await.unwrap(); assert_eq!(size_check2, 17); let mut reader2 = Read::new(blob_check2, size_check2, NZUsize!(20)); @@ -831,7 +831,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test that closing writer flushes and persists buffered data - let (blob_orig, size, _) = context.open("partition", b"write_close").await.unwrap(); + let (blob_orig, size) = context.open("partition", b"write_close").await.unwrap(); let writer = Write::new(blob_orig.clone(), size, NZUsize!(8)); writer.write_at(b"pending".to_vec(), 0).await.unwrap(); assert_eq!(writer.size().await, 7); @@ -840,8 +840,7 @@ mod tests { writer.sync().await.unwrap(); // Verify data persistence - let (blob_check, size_check, _) = - context.open("partition", b"write_close").await.unwrap(); + let (blob_check, size_check) = context.open("partition", b"write_close").await.unwrap(); assert_eq!(size_check, 7); let mut reader = Read::new(blob_check, size_check, NZUsize!(8)); let mut buf = [0u8; 7]; @@ -855,7 +854,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test direct writes when data exceeds buffer capacity - let (blob, size, _) = context + let (blob, size) = context .open("partition", b"write_direct_size") .await .unwrap(); @@ -870,7 +869,7 @@ mod tests { writer.sync().await.unwrap(); // Verify direct write worked - let (blob_check, size_check, _) = context + let (blob_check, size_check) = context .open("partition", b"write_direct_size") .await .unwrap(); @@ -892,7 +891,7 @@ mod tests { writer.sync().await.unwrap(); // Verify final state - let (blob_check2, size_check2, _) = context + let (blob_check2, size_check2) = context .open("partition", b"write_direct_size") .await .unwrap(); @@ -909,7 +908,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test complex buffer operations: overwrite and extend within capacity - let (blob, size, _) = context + let (blob, size) = context .open("partition", b"overwrite_extend_buf") .await .unwrap(); @@ -931,7 +930,7 @@ mod tests { writer.sync().await.unwrap(); // Verify persisted result - let (blob_check, size_check, _) = context + let (blob_check, size_check) = context .open("partition", b"overwrite_extend_buf") .await .unwrap(); @@ -948,7 +947,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writing at the current logical end of the blob - let (blob, size, _) = context.open("partition", b"write_end").await.unwrap(); + let (blob, size) = context.open("partition", b"write_end").await.unwrap(); let writer = Write::new(blob.clone(), size, NZUsize!(20)); // Write initial data @@ -965,8 +964,7 @@ mod tests { writer.sync().await.unwrap(); // Verify complete result - let (blob_check, size_check, _) = - context.open("partition", b"write_end").await.unwrap(); + let (blob_check, size_check) = context.open("partition", b"write_end").await.unwrap(); assert_eq!(size_check, 13); let mut reader = Read::new(blob_check, size_check, NZUsize!(13)); let mut buf = vec![0u8; 13]; @@ -980,7 +978,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test multiple appends using writer.size() - let (blob, size, _) = context + let (blob, size) = context .open("partition", b"write_multiple_appends_at_size") .await .unwrap(); @@ -1011,7 +1009,7 @@ mod tests { assert_eq!(writer.size().await, 9); // Verify final content - let (blob_check, size_check, _) = context + let (blob_check, size_check) = context .open("partition", b"write_multiple_appends_at_size") .await .unwrap(); @@ -1028,7 +1026,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test writing non-contiguously, then appending at the new size - let (blob, size, _) = context + let (blob, size) = context .open("partition", b"write_non_contiguous_then_append") .await .unwrap(); @@ -1055,7 +1053,7 @@ mod tests { assert_eq!(writer.size().await, 35); // Verify final content - let (blob_check, size_check, _) = context + let (blob_check, size_check) = context .open("partition", b"write_non_contiguous_then_append") .await .unwrap(); @@ -1077,7 +1075,7 @@ mod tests { let executor = deterministic::Runner::default(); executor.start(|context| async move { // Test truncating, then appending at the new size - let (blob, size, _) = context + let (blob, size) = context .open("partition", b"resize_then_append_at_size") .await .unwrap(); @@ -1112,7 +1110,7 @@ mod tests { assert_eq!(writer.size().await, 10); // Verify final content - let (blob_check, size_check, _) = context + let (blob_check, size_check) = context .open("partition", b"resize_then_append_at_size") .await .unwrap(); diff --git a/runtime/src/utils/buffer/pool.rs b/runtime/src/utils/buffer/pool.rs index c7e82a6c04..7ab0381b24 100644 --- a/runtime/src/utils/buffer/pool.rs +++ b/runtime/src/utils/buffer/pool.rs @@ -455,7 +455,7 @@ mod tests { // Start the test within the executor executor.start(|context| async move { // Populate a blob with 11 consecutive pages of data. - let (blob, size, _) = context + let (blob, size) = context .open("test", "blob".as_bytes()) .await .expect("Failed to open blob"); diff --git a/runtime/src/utils/buffer/read.rs b/runtime/src/utils/buffer/read.rs index db1f53fdac..c9c48fe694 100644 --- a/runtime/src/utils/buffer/read.rs +++ b/runtime/src/utils/buffer/read.rs @@ -16,8 +16,7 @@ use std::num::NonZeroUsize; /// let executor = deterministic::Runner::default(); /// executor.start(|context| async move { /// // Open a blob and add some data (e.g., a journal file) -/// let (blob, size, blob_version) = context.open("my_partition", b"my_data").await.expect("unable to open blob"); -/// assert_eq!(blob_version, BLOB_VERSION); +/// let (blob, size) = context.open("my_partition", b"my_data").await.expect("unable to open blob"); /// let data = b"Hello, world! This is a test.".to_vec(); /// let size = data.len() as u64; /// blob.write_at(data, 0).await.expect("unable to write data"); diff --git a/runtime/src/utils/buffer/write.rs b/runtime/src/utils/buffer/write.rs index 8cfc99af6e..e18189f3c2 100644 --- a/runtime/src/utils/buffer/write.rs +++ b/runtime/src/utils/buffer/write.rs @@ -14,7 +14,7 @@ use std::{num::NonZeroUsize, sync::Arc}; /// let executor = deterministic::Runner::default(); /// executor.start(|context| async move { /// // Open a blob for writing -/// let (blob, size, _) = context.open("my_partition", b"my_data").await.expect("unable to open blob"); +/// let (blob, size) = context.open("my_partition", b"my_data").await.expect("unable to open blob"); /// assert_eq!(size, 0); /// /// // Create a buffered writer with 16-byte buffer @@ -28,7 +28,7 @@ use std::{num::NonZeroUsize, sync::Arc}; /// blob.sync().await.expect("sync failed"); /// /// // Read back the data to verify -/// let (blob, size, _) = context.open("my_partition", b"my_data").await.expect("unable to reopen blob"); +/// let (blob, size) = context.open("my_partition", b"my_data").await.expect("unable to reopen blob"); /// let mut reader = Read::new(blob, size, NZUsize!(8)); /// let mut buf = vec![0u8; size as usize]; /// reader.read_exact(&mut buf, size as usize).await.expect("read failed"); diff --git a/storage/src/archive/prunable/mod.rs b/storage/src/archive/prunable/mod.rs index 61544fab15..dcbb79fe6f 100644 --- a/storage/src/archive/prunable/mod.rs +++ b/storage/src/archive/prunable/mod.rs @@ -295,7 +295,7 @@ mod tests { // Corrupt the value let section = (index / DEFAULT_ITEMS_PER_SECTION) * DEFAULT_ITEMS_PER_SECTION; - let (blob, _, _) = context + let (blob, _) = context .open("test_partition", §ion.to_be_bytes()) .await .unwrap(); diff --git a/storage/src/cache/mod.rs b/storage/src/cache/mod.rs index 9b845346b7..0c4f170707 100644 --- a/storage/src/cache/mod.rs +++ b/storage/src/cache/mod.rs @@ -219,7 +219,7 @@ mod tests { // Corrupt the value let section = (index / DEFAULT_ITEMS_PER_BLOB) * DEFAULT_ITEMS_PER_BLOB; - let (blob, _, _) = context + let (blob, _) = context .open("test_partition", §ion.to_be_bytes()) .await .unwrap(); diff --git a/storage/src/freezer/mod.rs b/storage/src/freezer/mod.rs index 627c8d2f01..7dda5392ae 100644 --- a/storage/src/freezer/mod.rs +++ b/storage/src/freezer/mod.rs @@ -703,7 +703,7 @@ mod tests { // Corrupt the table by writing partial entry { - let (blob, _, _) = context.open(&cfg.table_partition, b"table").await.unwrap(); + let (blob, _) = context.open(&cfg.table_partition, b"table").await.unwrap(); // Write incomplete table entry (only 10 bytes instead of 24) blob.write_at(vec![0xFF; 10], 0).await.unwrap(); blob.sync().await.unwrap(); @@ -763,7 +763,7 @@ mod tests { // Corrupt the CRC in the index entry { - let (blob, _, _) = context.open(&cfg.table_partition, b"table").await.unwrap(); + let (blob, _) = context.open(&cfg.table_partition, b"table").await.unwrap(); // Read the first entry let entry_data = blob.read_at(vec![0u8; 24], 0).await.unwrap(); let mut corrupted = entry_data.as_ref().to_vec(); @@ -827,7 +827,7 @@ mod tests { // Add extra bytes to the table blob { - let (blob, size, _) = context.open(&cfg.table_partition, b"table").await.unwrap(); + let (blob, size) = context.open(&cfg.table_partition, b"table").await.unwrap(); // Append garbage data blob.write_at(hex!("0xdeadbeef").to_vec(), size) .await diff --git a/storage/src/freezer/storage.rs b/storage/src/freezer/storage.rs index 8ebf33929e..51a0fd74a8 100644 --- a/storage/src/freezer/storage.rs +++ b/storage/src/freezer/storage.rs @@ -567,7 +567,7 @@ impl Freezer { let mut journal = Journal::init(context.with_label("journal"), journal_config).await?; // Open table blob - let (table, table_len, _) = context + let (table, table_len) = context .open(&config.table_partition, TABLE_BLOB_NAME) .await?; diff --git a/storage/src/journal/contiguous/fixed.rs b/storage/src/journal/contiguous/fixed.rs index 4503c74bcc..d2b70df875 100644 --- a/storage/src/journal/contiguous/fixed.rs +++ b/storage/src/journal/contiguous/fixed.rs @@ -160,7 +160,7 @@ impl> Journal { Err(err) => return Err(Error::Runtime(err)), }; for name in stored_blobs { - let (blob, size, _) = context + let (blob, size) = context .open(&cfg.partition, &name) .await .map_err(Error::Runtime)?; @@ -190,7 +190,7 @@ impl> Journal { } } else { debug!("no blobs found"); - let (blob, size, _) = context.open(&cfg.partition, &0u64.to_be_bytes()).await?; + let (blob, size) = context.open(&cfg.partition, &0u64.to_be_bytes()).await?; assert_eq!(size, 0); blobs.insert(0, (blob, size)); } @@ -222,7 +222,7 @@ impl> Journal { ); blobs.insert(tail_index, (tail, tail_size)); tail_index += 1; - (tail, tail_size, _) = context + (tail, tail_size) = context .open(&cfg.partition, &tail_index.to_be_bytes()) .await?; assert_eq!(tail_size, 0); @@ -355,7 +355,7 @@ impl> Journal { // Create a new empty blob. let next_blob_index = self.tail_index + 1; debug!(blob = next_blob_index, "creating next blob"); - let (next_blob, size, _) = self + let (next_blob, size) = self .context .open(&self.cfg.partition, &next_blob_index.to_be_bytes()) .await?; @@ -971,7 +971,7 @@ mod tests { // Corrupt one of the checksums and make sure it's detected. let checksum_offset = Digest::SIZE as u64 + (ITEMS_PER_BLOB.get() / 2) * (Digest::SIZE + u32::SIZE) as u64; - let (blob, _, _) = context + let (blob, _) = context .open(&cfg.partition, &40u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1051,7 +1051,7 @@ mod tests { assert!(buffer.contains("tracked 101")); // Manually truncate a non-tail blob to make sure it's detected during initialization. - let (blob, size, _) = context + let (blob, size) = context .open(&cfg.partition, &40u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1100,7 +1100,7 @@ mod tests { // Truncate the tail blob by one byte, which should result in the 3rd item being // trimmed. - let (blob, size, _) = context + let (blob, size) = context .open(&cfg.partition, &1u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1124,7 +1124,7 @@ mod tests { assert_eq!(journal.size(), item_count - 2); // Corrupt the last item, ensuring last blob is trimmed to empty state. - let (blob, size, _) = context + let (blob, size) = context .open(&cfg.partition, &1u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1236,7 +1236,7 @@ mod tests { drop(journal); // Manually truncate most recent blob to simulate a partial write. - let (blob, size, _) = context + let (blob, size) = context .open(&cfg.partition, &1u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1294,7 +1294,7 @@ mod tests { drop(journal); // Manually truncate most recent blob to simulate a partial write. - let (blob, size, _) = context + let (blob, size) = context .open(&cfg.partition, &0u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1344,7 +1344,7 @@ mod tests { // Manually extend the blob by an amount at least some multiple of the chunk size to // simulate a failure where the file was extended, but no bytes were written due to // failure. - let (blob, size, _) = context + let (blob, size) = context .open(&cfg.partition, &0u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1519,7 +1519,7 @@ mod tests { drop(journal); // Hash blob contents - let (blob, size, _) = context + let (blob, size) = context .open(&cfg.partition, &0u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1534,7 +1534,7 @@ mod tests { "ed2ea67208cde2ee8c16cca5aa4f369f55b1402258c6b7760e5baf134e38944a", ); blob.sync().await.expect("Failed to sync blob"); - let (blob, size, _) = context + let (blob, size) = context .open(&cfg.partition, &1u64.to_be_bytes()) .await .expect("Failed to open blob"); diff --git a/storage/src/journal/segmented/variable.rs b/storage/src/journal/segmented/variable.rs index cd9eb677bd..17c97ed7d8 100644 --- a/storage/src/journal/segmented/variable.rs +++ b/storage/src/journal/segmented/variable.rs @@ -188,7 +188,7 @@ impl Journal { Err(err) => return Err(Error::Runtime(err)), }; for name in stored_blobs { - let (blob, size, _) = context.open(&cfg.partition, &name).await?; + let (blob, size) = context.open(&cfg.partition, &name).await?; let hex_name = hex(&name); let section = match name.try_into() { Ok(section) => u64::from_be_bytes(section), @@ -533,7 +533,7 @@ impl Journal { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => { let name = section.to_be_bytes(); - let (blob, size, _) = self.context.open(&self.cfg.partition, &name).await?; + let (blob, size) = self.context.open(&self.cfg.partition, &name).await?; let blob = Append::new( blob, size, @@ -1223,7 +1223,7 @@ mod tests { // Manually create a blob with an invalid name (not 8 bytes) let invalid_blob_name = b"invalid"; // Less than 8 bytes - let (blob, _, _) = context + let (blob, _) = context .open(&cfg.partition, invalid_blob_name) .await .expect("Failed to create blob with invalid name"); @@ -1256,7 +1256,7 @@ mod tests { // Manually create a blob with incomplete size data let section = 1u64; let blob_name = section.to_be_bytes(); - let (blob, _, _) = context + let (blob, _) = context .open(&cfg.partition, &blob_name) .await .expect("Failed to create blob"); @@ -1311,7 +1311,7 @@ mod tests { // Manually create a blob with missing item data let section = 1u64; let blob_name = section.to_be_bytes(); - let (blob, _, _) = context + let (blob, _) = context .open(&cfg.partition, &blob_name) .await .expect("Failed to create blob"); @@ -1368,7 +1368,7 @@ mod tests { // Manually create a blob with missing checksum let section = 1u64; let blob_name = section.to_be_bytes(); - let (blob, _, _) = context + let (blob, _) = context .open(&cfg.partition, &blob_name) .await .expect("Failed to create blob"); @@ -1430,7 +1430,7 @@ mod tests { // Manually create a blob with incorrect checksum let section = 1u64; let blob_name = section.to_be_bytes(); - let (blob, _, _) = context + let (blob, _) = context .open(&cfg.partition, &blob_name) .await .expect("Failed to create blob"); @@ -1475,7 +1475,7 @@ mod tests { drop(journal); // Confirm blob is expected length - let (_, blob_size, _) = context + let (_, blob_size) = context .open(&cfg.partition, §ion.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1522,7 +1522,7 @@ mod tests { drop(journal); // Manually corrupt the end of the second blob - let (blob, blob_size, _) = context + let (blob, blob_size) = context .open(&cfg.partition, &2u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1565,7 +1565,7 @@ mod tests { // Confirm blob is expected length // entry = 1 (varint for 4) + 4 (data) + 4 (checksum) = 9 bytes // Item 2 ends at position 16 + 9 = 25 - let (_, blob_size, _) = context + let (_, blob_size) = context .open(&cfg.partition, &2u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1615,7 +1615,7 @@ mod tests { // Confirm blob is expected length // Items 1 and 2 at positions 0 and 16, item 3 (value 5) at position 32 // Item 3 = 1 (varint) + 4 (data) + 4 (checksum) = 9 bytes, ends at 41 - let (_, blob_size, _) = context + let (_, blob_size) = context .open(&cfg.partition, &2u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1694,7 +1694,7 @@ mod tests { drop(journal); // Manually corrupt the end of the second blob - let (blob, blob_size, _) = context + let (blob, blob_size) = context .open(&cfg.partition, &2u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1747,7 +1747,7 @@ mod tests { // Confirm blob is expected length // entry = 1 (varint for 8) + 8 (u64 data) + 4 (checksum) = 13 bytes // Items at positions 0, 16, 32; item 3 ends at 32 + 13 = 45 - let (_, blob_size, _) = context + let (_, blob_size) = context .open(&cfg.partition, &2u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1826,7 +1826,7 @@ mod tests { drop(journal); // Manually add extra data to the end of the second blob - let (blob, blob_size, _) = context + let (blob, blob_size) = context .open(&cfg.partition, &2u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -2128,7 +2128,7 @@ mod tests { drop(journal); // Hash blob contents - let (blob, size, _) = context + let (blob, size) = context .open(&cfg.partition, &1u64.to_be_bytes()) .await .expect("Failed to open blob"); diff --git a/storage/src/metadata/mod.rs b/storage/src/metadata/mod.rs index 4df9ea4388..5b085265ff 100644 --- a/storage/src/metadata/mod.rs +++ b/storage/src/metadata/mod.rs @@ -315,7 +315,7 @@ mod tests { drop(metadata); // Corrupt the metadata store - let (blob, _, _) = context.open("test", b"left").await.unwrap(); + let (blob, _) = context.open("test", b"left").await.unwrap(); blob.write_at(b"corrupted".to_vec(), 0).await.unwrap(); blob.sync().await.unwrap(); @@ -370,10 +370,10 @@ mod tests { drop(metadata); // Corrupt the metadata store - let (blob, _, _) = context.open("test", b"left").await.unwrap(); + let (blob, _) = context.open("test", b"left").await.unwrap(); blob.write_at(b"corrupted".to_vec(), 0).await.unwrap(); blob.sync().await.unwrap(); - let (blob, _, _) = context.open("test", b"right").await.unwrap(); + let (blob, _) = context.open("test", b"right").await.unwrap(); blob.write_at(b"corrupted".to_vec(), 0).await.unwrap(); blob.sync().await.unwrap(); @@ -432,7 +432,7 @@ mod tests { drop(metadata); // Corrupt the metadata store - let (blob, len, _) = context.open("test", b"left").await.unwrap(); + let (blob, len) = context.open("test", b"left").await.unwrap(); blob.resize(len - 8).await.unwrap(); blob.sync().await.unwrap(); @@ -485,7 +485,7 @@ mod tests { drop(metadata); // Corrupt the metadata store - let (blob, _, _) = context.open("test", b"left").await.unwrap(); + let (blob, _) = context.open("test", b"left").await.unwrap(); blob.resize(5).await.unwrap(); blob.sync().await.unwrap(); diff --git a/storage/src/metadata/storage.rs b/storage/src/metadata/storage.rs index 14c1aa62f9..d4cee20853 100644 --- a/storage/src/metadata/storage.rs +++ b/storage/src/metadata/storage.rs @@ -79,8 +79,8 @@ impl Metadata { /// Initialize a new [Metadata] instance. pub async fn init(context: E, cfg: Config) -> Result { // Open dedicated blobs - let (left_blob, left_len, _) = context.open(&cfg.partition, BLOB_NAMES[0]).await?; - let (right_blob, right_len, _) = context.open(&cfg.partition, BLOB_NAMES[1]).await?; + let (left_blob, left_len) = context.open(&cfg.partition, BLOB_NAMES[0]).await?; + let (right_blob, right_len) = context.open(&cfg.partition, BLOB_NAMES[1]).await?; // Find latest blob (check which includes a hash of the other) let (left_map, left_wrapper) = diff --git a/storage/src/mmr/journaled.rs b/storage/src/mmr/journaled.rs index e3f89be0d9..4ddb5cc82f 100644 --- a/storage/src/mmr/journaled.rs +++ b/storage/src/mmr/journaled.rs @@ -1137,7 +1137,7 @@ mod tests { // 497. Simulate a partial write by corrupting the last parent's checksum by truncating // the last blob by a single byte. let partition: String = "journal_partition".into(); - let (blob, len, _) = context + let (blob, len) = context .open(&partition, &71u64.to_be_bytes()) .await .expect("Failed to open blob"); @@ -1170,7 +1170,7 @@ mod tests { .remove(&partition, Some(&71u64.to_be_bytes())) .await .expect("Failed to remove blob"); - let (blob, len, _) = context + let (blob, len) = context .open(&partition, &70u64.to_be_bytes()) .await .expect("Failed to open blob"); diff --git a/storage/src/ordinal/mod.rs b/storage/src/ordinal/mod.rs index 6dc105f8ec..193dc609c5 100644 --- a/storage/src/ordinal/mod.rs +++ b/storage/src/ordinal/mod.rs @@ -518,7 +518,7 @@ mod tests { // Corrupt the data { - let (blob, _, _) = context + let (blob, _) = context .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); @@ -645,7 +645,7 @@ mod tests { // Corrupt by writing partial record (only value, no CRC) { - let (blob, _, _) = context + let (blob, _) = context .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); @@ -712,7 +712,7 @@ mod tests { // Corrupt the value portion of a record { - let (blob, _, _) = context + let (blob, _) = context .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); @@ -771,7 +771,7 @@ mod tests { // Corrupt CRCs in different blobs { // Corrupt CRC in first blob - let (blob, _, _) = context + let (blob, _) = context .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); @@ -779,7 +779,7 @@ mod tests { blob.sync().await.unwrap(); // Corrupt value in second blob (which will invalidate CRC) - let (blob, _, _) = context + let (blob, _) = context .open("test_ordinal", &1u64.to_be_bytes()) .await .unwrap(); @@ -843,7 +843,7 @@ mod tests { // Add extra bytes at the end of blob { - let (blob, size, _) = context + let (blob, size) = context .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); @@ -900,7 +900,7 @@ mod tests { // Create blob with zero-filled space { - let (blob, _, _) = context + let (blob, _) = context .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); @@ -1951,7 +1951,7 @@ mod tests { // Corrupt record at index 2 { - let (blob, _, _) = context + let (blob, _) = context .open("test_ordinal", &0u64.to_be_bytes()) .await .unwrap(); diff --git a/storage/src/ordinal/storage.rs b/storage/src/ordinal/storage.rs index d3c3789b02..68d62d0539 100644 --- a/storage/src/ordinal/storage.rs +++ b/storage/src/ordinal/storage.rs @@ -120,7 +120,7 @@ impl> Ordinal { // Open all blobs and check for partial records for name in stored_blobs { - let (blob, mut len, _) = context.open(&config.partition, &name).await?; + let (blob, mut len) = context.open(&config.partition, &name).await?; let index = match name.try_into() { Ok(index) => u64::from_be_bytes(index), Err(nm) => Err(Error::InvalidBlobName(hex(&nm)))?, @@ -255,7 +255,7 @@ impl> Ordinal { let items_per_blob = self.config.items_per_blob.get(); let section = index / items_per_blob; if let Entry::Vacant(entry) = self.blobs.entry(section) { - let (blob, len, _) = self + let (blob, len) = self .context .open(&self.config.partition, §ion.to_be_bytes()) .await?; diff --git a/storage/src/qmdb/any/unordered/fixed/sync.rs b/storage/src/qmdb/any/unordered/fixed/sync.rs index 41bb0cfbac..d3462c390c 100644 --- a/storage/src/qmdb/any/unordered/fixed/sync.rs +++ b/storage/src/qmdb/any/unordered/fixed/sync.rs @@ -217,7 +217,7 @@ pub(crate) async fn init_journal_at_size Date: Tue, 6 Jan 2026 20:27:49 -0500 Subject: [PATCH 04/17] nits; reduce diff --- runtime/src/lib.rs | 2 +- runtime/src/utils/buffer/read.rs | 2 -- storage/src/journal/segmented/variable.rs | 4 ++-- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 26c0ad1832..3358142568 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1156,7 +1156,7 @@ mod tests { let partition = "test_partition"; let name = b"test_blob"; - // Open a new blob and verify returned version + // Open a new blob let (blob, size) = context .open(partition, name) .await diff --git a/runtime/src/utils/buffer/read.rs b/runtime/src/utils/buffer/read.rs index c9c48fe694..fc91de1299 100644 --- a/runtime/src/utils/buffer/read.rs +++ b/runtime/src/utils/buffer/read.rs @@ -11,8 +11,6 @@ use std::num::NonZeroUsize; /// use commonware_utils::NZUsize; /// use commonware_runtime::{Runner, buffer::Read, Blob, Error, Storage, deterministic}; /// -/// const BLOB_VERSION: u16 = 0; -/// /// let executor = deterministic::Runner::default(); /// executor.start(|context| async move { /// // Open a blob and add some data (e.g., a journal file) diff --git a/storage/src/journal/segmented/variable.rs b/storage/src/journal/segmented/variable.rs index 17c97ed7d8..60e6f3b651 100644 --- a/storage/src/journal/segmented/variable.rs +++ b/storage/src/journal/segmented/variable.rs @@ -793,7 +793,7 @@ mod tests { use bytes::BufMut; use commonware_cryptography::{Hasher, Sha256}; use commonware_macros::test_traced; - use commonware_runtime::{deterministic, Blob, Error as RError, Runner, Storage}; + use commonware_runtime::{deterministic, Blob, Error as RError, Header, Runner, Storage}; use commonware_utils::{NZUsize, StableBuf}; use futures::{pin_mut, StreamExt}; use prometheus_client::registry::Metric; @@ -1901,7 +1901,7 @@ mod tests { _name: &[u8], _versions: std::ops::RangeInclusive, ) -> Result<(MockBlob, u64, u16), RError> { - Ok((MockBlob {}, self.len, 0)) + Ok((MockBlob {}, self.len, Header::DEFAULT_APPLICATION_VERSION)) } async fn remove(&self, _partition: &str, _name: Option<&[u8]>) -> Result<(), RError> { From 679c7c7360847de91cf088f739d8bf3c65d772b7 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 6 Jan 2026 20:31:17 -0500 Subject: [PATCH 05/17] fix iouring build --- runtime/src/storage/iouring.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/runtime/src/storage/iouring.rs b/runtime/src/storage/iouring.rs index 7b3fdf1c32..2fdaa3a448 100644 --- a/runtime/src/storage/iouring.rs +++ b/runtime/src/storage/iouring.rs @@ -474,7 +474,6 @@ mod tests { // Test 1: New blob returns logical size 0 and correct application version let (blob, size) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size, 0, "new blob should have logical size 0"); - assert_eq!(app_version, Header::DEFAULT_APPLICATION_VERSION); // Verify raw file has 8 bytes (header only) let file_path = storage_directory.join("partition").join(hex(b"test")); @@ -536,7 +535,6 @@ mod tests { let (blob2, size2) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size2, 9, "reopened blob should have logical size 9"); - assert_eq!(app_version2, Header::DEFAULT_APPLICATION_VERSION); let read_buf = blob2.read_at(vec![0u8; 9], 0).await.unwrap(); assert_eq!(read_buf.as_ref(), b"test data"); drop(blob2); @@ -549,7 +547,6 @@ mod tests { // Opening should truncate and write fresh header let (blob3, size3) = storage.open("partition", b"corrupted").await.unwrap(); assert_eq!(size3, 0, "corrupted blob should return logical size 0"); - assert_eq!(app_version3, Header::DEFAULT_APPLICATION_VERSION); // Verify raw file now has proper 8-byte header let metadata = std::fs::metadata(&corrupted_path).unwrap(); From 6dd0b87c49a78c554f019ed96b049b316f4d9981 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 6 Jan 2026 20:32:08 -0500 Subject: [PATCH 06/17] nit use const MAGIC_LENGTH in array definition --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 3358142568..c1417836cd 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -617,7 +617,7 @@ pub trait Storage: Clone + Send + Sync + 'static { /// - Bytes 6-7: Application Version (u16) #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct Header { - magic: [u8; 4], + magic: [u8; Self::MAGIC_LENGTH], header_version: u16, application_version: u16, } From fe1f66cf2e3a3170595982b164db956124b958bc Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Tue, 6 Jan 2026 18:16:25 -0800 Subject: [PATCH 07/17] address PR feedback: conformance, auditor versions, dedup header logic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add versions to auditor hash in audited.rs - Add Header to conformance testing with codec traits and Arbitrary impl - Deduplicate header handling across storage implementations: - Header::missing() checks if blob needs fresh header - Header::for_new_blob() creates header for new blob - Header::from_existing() validates existing header - Remove to_bytes()/from_bytes() in favor of codec encode()/decode() - Propagate arbitrary feature to dependent crates 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- Cargo.lock | 2 + consensus/Cargo.toml | 1 + p2p/Cargo.toml | 1 + resolver/Cargo.toml | 1 + runtime/Cargo.toml | 8 +++ runtime/conformance.toml | 3 + runtime/src/lib.rs | 111 +++++++++++++++++++++++-------- runtime/src/storage/audited.rs | 2 + runtime/src/storage/iouring.rs | 13 ++-- runtime/src/storage/memory.rs | 13 ++-- runtime/src/storage/tokio/mod.rs | 13 ++-- storage/Cargo.toml | 1 + 12 files changed, 119 insertions(+), 50 deletions(-) create mode 100644 runtime/conformance.toml diff --git a/Cargo.lock b/Cargo.lock index 3e1a1390d0..f5d0c6a1a4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1443,11 +1443,13 @@ dependencies = [ name = "commonware-runtime" version = "0.0.64" dependencies = [ + "arbitrary", "async-lock", "axum", "bytes", "cfg-if", "commonware-codec", + "commonware-conformance", "commonware-macros", "commonware-parallel", "commonware-utils", diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 707e48da9d..76dc0fe8d0 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -64,6 +64,7 @@ arbitrary = [ "commonware-math/arbitrary", "commonware-p2p/arbitrary", "commonware-resolver/arbitrary", + "commonware-runtime/arbitrary", "commonware-storage/arbitrary", "commonware-utils/arbitrary", "dep:arbitrary", diff --git a/p2p/Cargo.toml b/p2p/Cargo.toml index 9abe897ee5..79a8d9b6f7 100644 --- a/p2p/Cargo.toml +++ b/p2p/Cargo.toml @@ -44,6 +44,7 @@ tracing-subscriber.workspace = true arbitrary = [ "commonware-codec/arbitrary", "commonware-cryptography/arbitrary", + "commonware-runtime/arbitrary", "commonware-utils/arbitrary", "dep:arbitrary", "num-bigint/arbitrary", diff --git a/resolver/Cargo.toml b/resolver/Cargo.toml index da8d895a29..6d0b015162 100644 --- a/resolver/Cargo.toml +++ b/resolver/Cargo.toml @@ -48,6 +48,7 @@ arbitrary = [ "commonware-codec/arbitrary", "commonware-cryptography/arbitrary", "commonware-p2p/arbitrary", + "commonware-runtime/arbitrary", "commonware-utils/arbitrary", "dep:arbitrary", ] diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 5fcccf7eeb..0d07c2783a 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -14,10 +14,12 @@ documentation = "https://docs.rs/commonware-runtime" workspace = true [dependencies] +arbitrary = { workspace = true, optional = true } async-lock.workspace = true bytes.workspace = true cfg-if.workspace = true commonware-codec.workspace = true +commonware-conformance = { workspace = true, optional = true } commonware-macros.workspace = true commonware-parallel = { workspace = true, features = ["std"] } commonware-utils = { workspace = true, features = ["std"] } @@ -53,6 +55,12 @@ tokio = { workspace = true, features = ["full"] } [features] default = [] +arbitrary = [ + "commonware-codec/arbitrary", + "commonware-utils/arbitrary", + "dep:arbitrary", + "dep:commonware-conformance", +] external = [ "pin-project" ] test-utils = [] iouring-storage = [ "io-uring" ] diff --git a/runtime/conformance.toml b/runtime/conformance.toml new file mode 100644 index 0000000000..10d6d043dd --- /dev/null +++ b/runtime/conformance.toml @@ -0,0 +1,3 @@ +["commonware_runtime::tests::conformance::CodecConformance
"] +n_cases = 65536 +hash = "541c356728d47b13f1d3ac800926ef3ae2396c82f5d4e043f5c7641c4c22b4b9" diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index c1417836cd..de3718c502 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -23,6 +23,7 @@ )] use bytes::{Buf, BufMut}; +use commonware_codec::{DecodeExt, FixedSize, Read as CodecRead, Write as CodecWrite}; use commonware_macros::select; use commonware_parallel::{Rayon, ThreadPool}; use commonware_utils::StableBuf; @@ -653,22 +654,28 @@ impl Header { } } - /// Parses a header from bytes (big-endian format). - pub fn from_bytes(bytes: [u8; Self::SIZE]) -> Self { - Self { - magic: bytes[..4].try_into().unwrap(), - header_version: u16::from_be_bytes(bytes[4..6].try_into().unwrap()), - application_version: u16::from_be_bytes(bytes[6..8].try_into().unwrap()), - } + /// Returns true if a blob is missing a valid header (new or corrupted). + pub const fn missing(raw_len: u64) -> bool { + raw_len < Self::SIZE_U64 + } + + /// Creates a header for a new blob using the latest version from the range. + /// Returns (header, app_version). + pub const fn for_new_blob(versions: &std::ops::RangeInclusive) -> (Self, u16) { + let app_version = *versions.end(); + (Self::new(app_version), app_version) } - /// Serializes the header to bytes (big-endian format). - pub fn to_bytes(&self) -> [u8; Self::SIZE] { - let mut bytes = [0u8; Self::SIZE]; - bytes[..4].copy_from_slice(&self.magic); - bytes[4..6].copy_from_slice(&self.header_version.to_be_bytes()); - bytes[6..8].copy_from_slice(&self.application_version.to_be_bytes()); - bytes + /// Validates an existing header and computes the logical size. + /// Returns (app_version, logical_len) on success. + pub fn from_existing( + raw_bytes: [u8; Self::SIZE], + raw_len: u64, + versions: &std::ops::RangeInclusive, + ) -> Result<(u16, u64), Error> { + let header: Self = Self::decode(raw_bytes.as_slice()).map_err(|_| Error::ReadFailed)?; + header.validate(versions)?; + Ok((header.application_version, raw_len - Self::SIZE_U64)) } /// Validates the magic bytes, header version, and application version. @@ -693,6 +700,43 @@ impl Header { } } +impl FixedSize for Header { + const SIZE: usize = Self::SIZE; +} + +impl CodecWrite for Header { + fn write(&self, buf: &mut impl BufMut) { + buf.put_slice(&self.magic); + buf.put_u16(self.header_version); + buf.put_u16(self.application_version); + } +} + +impl CodecRead for Header { + type Cfg = (); + fn read_cfg(buf: &mut impl Buf, _cfg: &Self::Cfg) -> Result { + if buf.remaining() < Self::SIZE { + return Err(commonware_codec::Error::EndOfBuffer); + } + let mut magic = [0u8; Self::MAGIC_LENGTH]; + buf.copy_to_slice(&mut magic); + let header_version = buf.get_u16(); + let application_version = buf.get_u16(); + Ok(Self { + magic, + header_version, + application_version, + }) + } +} + +#[cfg(feature = "arbitrary")] +impl arbitrary::Arbitrary<'_> for Header { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + Ok(Self::new(u.arbitrary()?)) + } +} + /// Interface to read and write to a blob. /// /// To support blob implementations that enable concurrent reads and @@ -747,6 +791,7 @@ mod tests { use super::*; use crate::telemetry::traces::collector::TraceStorage; use bytes::Bytes; + use commonware_codec::Encode; use commonware_macros::{select, test_collect_traces}; use commonware_utils::NZUsize; use futures::{ @@ -780,7 +825,7 @@ mod tests { ); // Verify byte serialization - let bytes = header.to_bytes(); + let bytes = header.encode(); assert_eq!(&bytes[..4], &Header::MAGIC); assert_eq!(&bytes[4..6], &Header::HEADER_VERSION.to_be_bytes()); assert_eq!( @@ -789,7 +834,7 @@ mod tests { ); // Verify round-trip - let parsed = Header::from_bytes(bytes); + let parsed: Header = Header::decode(bytes.as_ref()).unwrap(); assert_eq!(parsed, header); } @@ -803,7 +848,7 @@ mod tests { #[test] fn test_header_validate_magic_wrong_bytes() { - let header = Header::from_bytes([0u8; Header::SIZE]); + let header: Header = Header::decode([0u8; Header::SIZE].as_slice()).unwrap(); let result = header .validate(&(Header::DEFAULT_APPLICATION_VERSION..=Header::DEFAULT_APPLICATION_VERSION)); match result { @@ -813,9 +858,13 @@ mod tests { _ => panic!("expected BlobMagicMismatch error"), } - let mut bytes = Header::new(Header::DEFAULT_APPLICATION_VERSION).to_bytes(); + let mut bytes: [u8; Header::SIZE] = Header::new(Header::DEFAULT_APPLICATION_VERSION) + .encode() + .as_ref() + .try_into() + .unwrap(); bytes[0] = b'X'; // Corrupt first byte - let header = Header::from_bytes(bytes); + let header: Header = Header::decode(bytes.as_slice()).unwrap(); let result = header .validate(&(Header::DEFAULT_APPLICATION_VERSION..=Header::DEFAULT_APPLICATION_VERSION)); match result { @@ -843,27 +892,27 @@ mod tests { fn test_header_bytes_round_trip() { // Test round-trip with default version let original = Header::new(0); - let bytes = original.to_bytes(); - let restored = Header::from_bytes(bytes); + let bytes = original.encode(); + let restored: Header = Header::decode(bytes.as_ref()).unwrap(); assert_eq!(original, restored); // Test round-trip with non-zero version let original = Header::new(42); - let bytes = original.to_bytes(); - let restored = Header::from_bytes(bytes); + let bytes = original.encode(); + let restored: Header = Header::decode(bytes.as_ref()).unwrap(); assert_eq!(original, restored); assert_eq!(restored.application_version, 42); // Test round-trip with max version let original = Header::new(u16::MAX); - let bytes = original.to_bytes(); - let restored = Header::from_bytes(bytes); + let bytes = original.encode(); + let restored: Header = Header::decode(bytes.as_ref()).unwrap(); assert_eq!(original, restored); assert_eq!(restored.application_version, u16::MAX); // Verify byte layout explicitly let header = Header::new(0x1234); - let bytes = header.to_bytes(); + let bytes = header.encode(); assert_eq!(&bytes[..4], &Header::MAGIC); assert_eq!(&bytes[4..6], &Header::HEADER_VERSION.to_be_bytes()); assert_eq!(&bytes[6..8], &0x1234u16.to_be_bytes()); @@ -3127,4 +3176,14 @@ mod tests { }); }); } + + #[cfg(feature = "arbitrary")] + mod conformance { + use super::Header; + use commonware_codec::conformance::CodecConformance; + + commonware_conformance::conformance_tests! { + CodecConformance
+ } + } } diff --git a/runtime/src/storage/audited.rs b/runtime/src/storage/audited.rs index e17d45f7a3..026ba2b23c 100644 --- a/runtime/src/storage/audited.rs +++ b/runtime/src/storage/audited.rs @@ -27,6 +27,8 @@ impl crate::Storage for Storage { self.auditor.event(b"open", |hasher| { hasher.update(partition.as_bytes()); hasher.update(name); + hasher.update(&versions.start().to_be_bytes()); + hasher.update(&versions.end().to_be_bytes()); }); self.inner .open_versioned(partition, name, versions) diff --git a/runtime/src/storage/iouring.rs b/runtime/src/storage/iouring.rs index 2fdaa3a448..357cfa0b43 100644 --- a/runtime/src/storage/iouring.rs +++ b/runtime/src/storage/iouring.rs @@ -24,6 +24,7 @@ use crate::{ iouring::{self, should_retry}, Error, Header, }; +use commonware_codec::Encode; use commonware_utils::{from_hex, hex, StableBuf}; use futures::{ channel::{mpsc, oneshot}, @@ -134,15 +135,14 @@ impl crate::Storage for Storage { // Handle header: new/corrupted blobs get a fresh header written, // existing blobs have their header read. - let (app_version, logical_len) = if raw_len < Header::SIZE_U64 { + let (app_version, logical_len) = if Header::missing(raw_len) { // New (or corrupted) blob - truncate and write header with latest version - let app_version = *versions.end(); - let header = Header::new(app_version); + let (header, app_version) = Header::for_new_blob(&versions); file.set_len(Header::SIZE_U64) .map_err(|e| Error::BlobResizeFailed(partition.into(), hex(name), e))?; file.seek(SeekFrom::Start(0)) .map_err(|_| Error::WriteFailed)?; - file.write_all(&header.to_bytes()) + file.write_all(&header.encode()) .map_err(|_| Error::WriteFailed)?; file.sync_all() .map_err(|e| Error::BlobSyncFailed(partition.into(), hex(name), e))?; @@ -163,10 +163,7 @@ impl crate::Storage for Storage { let mut header_bytes = [0u8; Header::SIZE]; file.read_exact(&mut header_bytes) .map_err(|_| Error::ReadFailed)?; - let header = Header::from_bytes(header_bytes); - header.validate(&versions)?; - - (header.application_version, raw_len - Header::SIZE_U64) + Header::from_existing(header_bytes, raw_len, &versions)? }; let blob = Blob::new(partition.into(), name, file, self.io_sender.clone()); diff --git a/runtime/src/storage/memory.rs b/runtime/src/storage/memory.rs index 29d3994045..6ef0d191b9 100644 --- a/runtime/src/storage/memory.rs +++ b/runtime/src/storage/memory.rs @@ -1,4 +1,5 @@ use crate::Header; +use commonware_codec::Encode; use commonware_utils::{hex, StableBuf}; use std::{ collections::BTreeMap, @@ -36,21 +37,17 @@ impl crate::Storage for Storage { let content = partition_entry.entry(name.into()).or_default(); let raw_len = content.len() as u64; - let (app_version, logical_len) = if raw_len < Header::SIZE_U64 { + let (app_version, logical_len) = if Header::missing(raw_len) { // New or corrupted blob - truncate and write default header with latest version - let app_version = *versions.end(); - let header = Header::new(app_version); + let (header, app_version) = Header::for_new_blob(&versions); content.clear(); - content.extend_from_slice(&header.to_bytes()); + content.extend_from_slice(&header.encode()); (app_version, 0) } else { // Existing blob - read and validate header let mut header_bytes = [0u8; Header::SIZE]; header_bytes.copy_from_slice(&content[..Header::SIZE]); - let header = Header::from_bytes(header_bytes); - header.validate(&versions)?; - - (header.application_version, raw_len - Header::SIZE_U64) + Header::from_existing(header_bytes, raw_len, &versions)? }; Ok(( diff --git a/runtime/src/storage/tokio/mod.rs b/runtime/src/storage/tokio/mod.rs index efc92c942b..ea2c418e0b 100644 --- a/runtime/src/storage/tokio/mod.rs +++ b/runtime/src/storage/tokio/mod.rs @@ -1,4 +1,5 @@ use crate::{Error, Header}; +use commonware_codec::Encode; use commonware_utils::{from_hex, hex}; #[cfg(unix)] use std::path::Path; @@ -138,14 +139,13 @@ impl crate::Storage for Storage { // Handle header: new/corrupted blobs get a fresh header written, // existing blobs have their header read. - let (app_version, logical_size) = if len < Header::SIZE_U64 { + let (app_version, logical_size) = if Header::missing(len) { // New or corrupted blob - truncate and write header with latest version - let app_version = *versions.end(); - let header = Header::new(app_version); + let (header, app_version) = Header::for_new_blob(&versions); file.set_len(Header::SIZE_U64) .await .map_err(|e| Error::BlobResizeFailed(partition.into(), hex(name), e))?; - file.write_all(&header.to_bytes()) + file.write_all(&header.encode()) .await .map_err(|e| Error::BlobSyncFailed(partition.into(), hex(name), e))?; file.sync_all() @@ -158,10 +158,7 @@ impl crate::Storage for Storage { file.read_exact(&mut header_bytes) .await .map_err(|_| Error::ReadFailed)?; - let header = Header::from_bytes(header_bytes); - header.validate(&versions)?; - - (header.application_version, len - Header::SIZE_U64) + Header::from_existing(header_bytes, len, &versions)? }; #[cfg(unix)] diff --git a/storage/Cargo.toml b/storage/Cargo.toml index 83db5924ed..db57ea1a7b 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -65,6 +65,7 @@ std = [ arbitrary = [ "commonware-codec/arbitrary", "commonware-cryptography/arbitrary", + "commonware-runtime?/arbitrary", "commonware-utils/arbitrary", "dep:arbitrary", ] From ed8da0bee77f9ae95657ef83bef495beab37700e Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Tue, 6 Jan 2026 18:19:52 -0800 Subject: [PATCH 08/17] add assert for DEFAULT_APPLICATION_VERSION in mock storage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- storage/src/journal/segmented/variable.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/storage/src/journal/segmented/variable.rs b/storage/src/journal/segmented/variable.rs index 60e6f3b651..c0292c13a6 100644 --- a/storage/src/journal/segmented/variable.rs +++ b/storage/src/journal/segmented/variable.rs @@ -1899,8 +1899,9 @@ mod tests { &self, _partition: &str, _name: &[u8], - _versions: std::ops::RangeInclusive, + versions: std::ops::RangeInclusive, ) -> Result<(MockBlob, u64, u16), RError> { + assert!(versions.contains(&Header::DEFAULT_APPLICATION_VERSION)); Ok((MockBlob {}, self.len, Header::DEFAULT_APPLICATION_VERSION)) } From fcc76b794affb5e628565e8557f58b028e128045 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Tue, 6 Jan 2026 18:24:19 -0800 Subject: [PATCH 09/17] add missing test coverage for header validation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add test_header_validate_header_version_mismatch in lib.rs - Add test_blob_version_mismatch in iouring.rs (matching memory.rs and tokio) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- runtime/src/lib.rs | 16 ++++++++++++++++ runtime/src/storage/iouring.rs | 35 ++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index de3718c502..4fd5b63ee1 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -888,6 +888,22 @@ mod tests { } } + #[test] + fn test_header_validate_header_version_mismatch() { + let mut bytes: [u8; Header::SIZE] = Header::new(0).encode().as_ref().try_into().unwrap(); + bytes[4] = 0xFF; + bytes[5] = 0xFF; + let header: Header = Header::decode(bytes.as_slice()).unwrap(); + let result = header.validate(&(0..=0)); + match result { + Err(Error::BlobHeaderVersionMismatch { expected, found }) => { + assert_eq!(expected, Header::HEADER_VERSION); + assert_eq!(found, 0xFFFF); + } + _ => panic!("expected BlobHeaderVersionMismatch error"), + } + } + #[test] fn test_header_bytes_round_trip() { // Test round-trip with default version diff --git a/runtime/src/storage/iouring.rs b/runtime/src/storage/iouring.rs index 357cfa0b43..bd8ab62c50 100644 --- a/runtime/src/storage/iouring.rs +++ b/runtime/src/storage/iouring.rs @@ -582,4 +582,39 @@ mod tests { let _ = std::fs::remove_dir_all(&storage_directory); } + + #[tokio::test] + async fn test_blob_version_mismatch() { + let (storage, storage_directory) = create_test_storage(); + + // Create blob with version 1 + let (_, _, app_version) = storage + .open_versioned("partition", b"v1", 1..=1) + .await + .unwrap(); + assert_eq!(app_version, 1, "new blob should have version 1"); + + // Reopen with a range that includes version 1 + let (_, _, app_version) = storage + .open_versioned("partition", b"v1", 0..=2) + .await + .unwrap(); + assert_eq!(app_version, 1, "existing blob should retain version 1"); + + // Try to open with version range 2..=2 (should fail) + let result = storage.open_versioned("partition", b"v1", 2..=2).await; + match result { + Err(crate::Error::BlobApplicationVersionMismatch { expected, found }) => { + assert_eq!(expected, 2..=2); + assert_eq!(found, 1); + } + Err(err) => panic!( + "expected BlobApplicationVersionMismatch error, got: {:?}", + err + ), + Ok(_) => panic!("expected error, got Ok"), + } + + let _ = std::fs::remove_dir_all(&storage_directory); + } } From 16c207dc07476ae355ab17022d52919db4615b90 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Tue, 6 Jan 2026 18:57:41 -0800 Subject: [PATCH 10/17] refactor: make Header private with opaque error handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Move Header struct to storage/mod.rs with pub(crate) visibility - Rename fields: application_version -> blob_version, header_version -> runtime_version - Rename methods: for_new_blob -> new, from_existing -> from - Consolidate BlobMagicMismatch and BlobRuntimeVersionMismatch into opaque BlobCorrupt error - Add standalone DEFAULT_BLOB_VERSION const in lib.rs - Add versions to auditor hash in audited.rs - Move conformance tests to storage/mod.rs 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- runtime/src/lib.rs | 295 ++-------------------- runtime/src/storage/audited.rs | 4 +- runtime/src/storage/iouring.rs | 39 ++- runtime/src/storage/memory.rs | 41 ++- runtime/src/storage/metered.rs | 4 +- runtime/src/storage/mod.rs | 212 ++++++++++++++++ runtime/src/storage/tokio/fallback.rs | 3 +- runtime/src/storage/tokio/mod.rs | 36 ++- runtime/src/storage/tokio/unix.rs | 3 +- storage/src/journal/segmented/variable.rs | 8 +- 10 files changed, 298 insertions(+), 347 deletions(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 4fd5b63ee1..169c5abf93 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -23,7 +23,6 @@ )] use bytes::{Buf, BufMut}; -use commonware_codec::{DecodeExt, FixedSize, Read as CodecRead, Write as CodecWrite}; use commonware_macros::select; use commonware_parallel::{Rayon, ThreadPool}; use commonware_utils::StableBuf; @@ -61,6 +60,9 @@ mod iouring; /// Prefix for runtime metrics. const METRICS_PREFIX: &str = "runtime"; +/// Default blob version used when no version is specified. +pub const DEFAULT_BLOB_VERSION: u16 = 0; + /// Errors that can occur when interacting with the runtime. #[derive(Error, Debug)] pub enum Error { @@ -102,12 +104,10 @@ pub enum Error { BlobSyncFailed(String, String, IoError), #[error("blob insufficient length")] BlobInsufficientLength, - #[error("blob magic mismatch: expected {:?}, found {found:?}", Header::MAGIC)] - BlobMagicMismatch { found: [u8; Header::MAGIC_LENGTH] }, - #[error("blob header version mismatch: expected {expected}, found {found}")] - BlobHeaderVersionMismatch { expected: u16, found: u16 }, - #[error("blob application version mismatch: expected one of {expected:?}, found {found}")] - BlobApplicationVersionMismatch { + #[error("blob corrupt: {0}/{1} reason: {2}")] + BlobCorrupt(String, String, String), + #[error("blob version mismatch: expected one of {expected:?}, found {found}")] + BlobVersionMismatch { expected: std::ops::RangeInclusive, found: u16, }, @@ -546,9 +546,8 @@ pub trait Storage: Clone + Send + Sync + 'static { /// The readable/writeable storage buffer that can be opened by this Storage. type Blob: Blob; - /// [Storage::open_versioned] with [Header::DEFAULT_APPLICATION_VERSION] as the only value - /// in the versions range. The application version is omitted from the return value as it - /// is always [Header::DEFAULT_APPLICATION_VERSION]. + /// [Storage::open_versioned] with the default blob version (0) as the only value + /// in the versions range. The blob version is omitted from the return value. fn open( &self, partition: &str, @@ -561,7 +560,7 @@ pub trait Storage: Clone + Send + Sync + 'static { .open_versioned( &partition, &name, - Header::DEFAULT_APPLICATION_VERSION..=Header::DEFAULT_APPLICATION_VERSION, + DEFAULT_BLOB_VERSION..=DEFAULT_BLOB_VERSION, ) .await?; Ok((blob, size)) @@ -578,16 +577,16 @@ pub trait Storage: Clone + Send + Sync + 'static { /// /// # Versions /// - /// The blob's [Header] contains an application version. - /// `versions` specifies the range of acceptable application versions for an opened blob. + /// Each blob has an associated blob version stored in its header. + /// `versions` specifies the range of acceptable blob versions for an opened blob. /// If the blob already exists and its version is not in `versions`, returns - /// [Error::BlobApplicationVersionMismatch]. - /// If the blob does not exist, it is created with the application version set to the last + /// [Error::BlobVersionMismatch]. + /// If the blob does not exist, it is created with the blob version set to the last /// value in `versions`. /// /// # Returns /// - /// A tuple of (blob, logical_size, application_version). + /// A tuple of (blob, logical_size, blob_version). fn open_versioned( &self, partition: &str, @@ -610,133 +609,6 @@ pub trait Storage: Clone + Send + Sync + 'static { fn scan(&self, partition: &str) -> impl Future>, Error>> + Send; } -/// Fixed-size header at the start of each [Blob]. -/// -/// On-disk layout (8 bytes, big-endian): -/// - Bytes 0-3: [Header::MAGIC] -/// - Bytes 4-5: Header Version (u16) -/// - Bytes 6-7: Application Version (u16) -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct Header { - magic: [u8; Self::MAGIC_LENGTH], - header_version: u16, - application_version: u16, -} - -impl Header { - /// Size of the header in bytes. - pub const SIZE: usize = 8; - - /// Size of the header as u64 for offset calculations. - pub const SIZE_U64: u64 = Self::SIZE as u64; - - /// Length of magic bytes. - pub const MAGIC_LENGTH: usize = 4; - - /// Length of version fields. - pub const VERSION_LENGTH: usize = 2; - - /// Magic bytes identifying a valid commonware blob. - pub const MAGIC: [u8; Self::MAGIC_LENGTH] = *b"CWIC"; // Commonware Is CWIC - - /// The current version of the header format. - pub const HEADER_VERSION: u16 = 0; - - /// Default application version used in [Storage::open]. - pub const DEFAULT_APPLICATION_VERSION: u16 = 0; - - /// Creates a new header with the given application version. - pub const fn new(app_version: u16) -> Self { - Self { - magic: Self::MAGIC, - header_version: Self::HEADER_VERSION, - application_version: app_version, - } - } - - /// Returns true if a blob is missing a valid header (new or corrupted). - pub const fn missing(raw_len: u64) -> bool { - raw_len < Self::SIZE_U64 - } - - /// Creates a header for a new blob using the latest version from the range. - /// Returns (header, app_version). - pub const fn for_new_blob(versions: &std::ops::RangeInclusive) -> (Self, u16) { - let app_version = *versions.end(); - (Self::new(app_version), app_version) - } - - /// Validates an existing header and computes the logical size. - /// Returns (app_version, logical_len) on success. - pub fn from_existing( - raw_bytes: [u8; Self::SIZE], - raw_len: u64, - versions: &std::ops::RangeInclusive, - ) -> Result<(u16, u64), Error> { - let header: Self = Self::decode(raw_bytes.as_slice()).map_err(|_| Error::ReadFailed)?; - header.validate(versions)?; - Ok((header.application_version, raw_len - Self::SIZE_U64)) - } - - /// Validates the magic bytes, header version, and application version. - /// `app_versions` is the range of acceptable application versions. - pub fn validate(&self, app_versions: &std::ops::RangeInclusive) -> Result<(), Error> { - if self.magic != Self::MAGIC { - return Err(Error::BlobMagicMismatch { found: self.magic }); - } - if self.header_version != Self::HEADER_VERSION { - return Err(Error::BlobHeaderVersionMismatch { - expected: Self::HEADER_VERSION, - found: self.header_version, - }); - } - if !app_versions.contains(&self.application_version) { - return Err(Error::BlobApplicationVersionMismatch { - expected: app_versions.clone(), - found: self.application_version, - }); - } - Ok(()) - } -} - -impl FixedSize for Header { - const SIZE: usize = Self::SIZE; -} - -impl CodecWrite for Header { - fn write(&self, buf: &mut impl BufMut) { - buf.put_slice(&self.magic); - buf.put_u16(self.header_version); - buf.put_u16(self.application_version); - } -} - -impl CodecRead for Header { - type Cfg = (); - fn read_cfg(buf: &mut impl Buf, _cfg: &Self::Cfg) -> Result { - if buf.remaining() < Self::SIZE { - return Err(commonware_codec::Error::EndOfBuffer); - } - let mut magic = [0u8; Self::MAGIC_LENGTH]; - buf.copy_to_slice(&mut magic); - let header_version = buf.get_u16(); - let application_version = buf.get_u16(); - Ok(Self { - magic, - header_version, - application_version, - }) - } -} - -#[cfg(feature = "arbitrary")] -impl arbitrary::Arbitrary<'_> for Header { - fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - Ok(Self::new(u.arbitrary()?)) - } -} - /// Interface to read and write to a blob. /// /// To support blob implementations that enable concurrent reads and @@ -754,9 +626,10 @@ impl arbitrary::Arbitrary<'_> for Header { /// /// # Header /// -/// All blobs have a [`Header`] at the start. The header is read on open -/// (for existing blobs) or written (for new blobs). All I/O operations use logical -/// offsets that start after the header; the header offset is handled internally. +/// All blobs have an 8-byte header at the start containing magic bytes and version +/// information. The header is read on open (for existing blobs) or written (for new +/// blobs). All I/O operations use logical offsets that start after the header; the +/// header offset is handled internally. #[allow(clippy::len_without_is_empty)] pub trait Blob: Clone + Send + Sync + 'static { /// Read from the blob at the given offset. @@ -791,7 +664,6 @@ mod tests { use super::*; use crate::telemetry::traces::collector::TraceStorage; use bytes::Bytes; - use commonware_codec::Encode; use commonware_macros::{select, test_collect_traces}; use commonware_utils::NZUsize; use futures::{ @@ -815,125 +687,6 @@ mod tests { use tracing::{error, Level}; use utils::reschedule; - #[test] - fn test_header_fields() { - let header = Header::new(Header::DEFAULT_APPLICATION_VERSION); - assert_eq!(header.header_version, Header::HEADER_VERSION); - assert_eq!( - header.application_version, - Header::DEFAULT_APPLICATION_VERSION - ); - - // Verify byte serialization - let bytes = header.encode(); - assert_eq!(&bytes[..4], &Header::MAGIC); - assert_eq!(&bytes[4..6], &Header::HEADER_VERSION.to_be_bytes()); - assert_eq!( - &bytes[6..8], - &Header::DEFAULT_APPLICATION_VERSION.to_be_bytes() - ); - - // Verify round-trip - let parsed: Header = Header::decode(bytes.as_ref()).unwrap(); - assert_eq!(parsed, header); - } - - #[test] - fn test_header_validate_success() { - let header = Header::new(Header::DEFAULT_APPLICATION_VERSION); - assert!(header - .validate(&(Header::DEFAULT_APPLICATION_VERSION..=Header::DEFAULT_APPLICATION_VERSION)) - .is_ok()); - } - - #[test] - fn test_header_validate_magic_wrong_bytes() { - let header: Header = Header::decode([0u8; Header::SIZE].as_slice()).unwrap(); - let result = header - .validate(&(Header::DEFAULT_APPLICATION_VERSION..=Header::DEFAULT_APPLICATION_VERSION)); - match result { - Err(Error::BlobMagicMismatch { found }) => { - assert_eq!(found, [0u8; 4]); - } - _ => panic!("expected BlobMagicMismatch error"), - } - - let mut bytes: [u8; Header::SIZE] = Header::new(Header::DEFAULT_APPLICATION_VERSION) - .encode() - .as_ref() - .try_into() - .unwrap(); - bytes[0] = b'X'; // Corrupt first byte - let header: Header = Header::decode(bytes.as_slice()).unwrap(); - let result = header - .validate(&(Header::DEFAULT_APPLICATION_VERSION..=Header::DEFAULT_APPLICATION_VERSION)); - match result { - Err(Error::BlobMagicMismatch { found }) => { - assert_eq!(found[0], b'X'); - } - _ => panic!("expected BlobMagicMismatch error"), - } - } - - #[test] - fn test_header_validate_app_version_mismatch() { - let header = Header::new(5); - let result = header.validate(&(10..=20)); - match result { - Err(Error::BlobApplicationVersionMismatch { expected, found }) => { - assert_eq!(expected, 10..=20); - assert_eq!(found, 5); - } - _ => panic!("expected BlobApplicationVersionMismatch error"), - } - } - - #[test] - fn test_header_validate_header_version_mismatch() { - let mut bytes: [u8; Header::SIZE] = Header::new(0).encode().as_ref().try_into().unwrap(); - bytes[4] = 0xFF; - bytes[5] = 0xFF; - let header: Header = Header::decode(bytes.as_slice()).unwrap(); - let result = header.validate(&(0..=0)); - match result { - Err(Error::BlobHeaderVersionMismatch { expected, found }) => { - assert_eq!(expected, Header::HEADER_VERSION); - assert_eq!(found, 0xFFFF); - } - _ => panic!("expected BlobHeaderVersionMismatch error"), - } - } - - #[test] - fn test_header_bytes_round_trip() { - // Test round-trip with default version - let original = Header::new(0); - let bytes = original.encode(); - let restored: Header = Header::decode(bytes.as_ref()).unwrap(); - assert_eq!(original, restored); - - // Test round-trip with non-zero version - let original = Header::new(42); - let bytes = original.encode(); - let restored: Header = Header::decode(bytes.as_ref()).unwrap(); - assert_eq!(original, restored); - assert_eq!(restored.application_version, 42); - - // Test round-trip with max version - let original = Header::new(u16::MAX); - let bytes = original.encode(); - let restored: Header = Header::decode(bytes.as_ref()).unwrap(); - assert_eq!(original, restored); - assert_eq!(restored.application_version, u16::MAX); - - // Verify byte layout explicitly - let header = Header::new(0x1234); - let bytes = header.encode(); - assert_eq!(&bytes[..4], &Header::MAGIC); - assert_eq!(&bytes[4..6], &Header::HEADER_VERSION.to_be_bytes()); - assert_eq!(&bytes[6..8], &0x1234u16.to_be_bytes()); - } - fn test_error_future(runner: R) { async fn error_future() -> Result<&'static str, &'static str> { Err("An error occurred") @@ -3192,14 +2945,4 @@ mod tests { }); }); } - - #[cfg(feature = "arbitrary")] - mod conformance { - use super::Header; - use commonware_codec::conformance::CodecConformance; - - commonware_conformance::conformance_tests! { - CodecConformance
- } - } } diff --git a/runtime/src/storage/audited.rs b/runtime/src/storage/audited.rs index 026ba2b23c..484df67003 100644 --- a/runtime/src/storage/audited.rs +++ b/runtime/src/storage/audited.rs @@ -33,7 +33,7 @@ impl crate::Storage for Storage { self.inner .open_versioned(partition, name, versions) .await - .map(|(blob, len, app_version)| { + .map(|(blob, len, blob_version)| { ( Blob { auditor: self.auditor.clone(), @@ -42,7 +42,7 @@ impl crate::Storage for Storage { name: name.to_vec(), }, len, - app_version, + blob_version, ) }) } diff --git a/runtime/src/storage/iouring.rs b/runtime/src/storage/iouring.rs index bd8ab62c50..8d1e34bae8 100644 --- a/runtime/src/storage/iouring.rs +++ b/runtime/src/storage/iouring.rs @@ -135,9 +135,9 @@ impl crate::Storage for Storage { // Handle header: new/corrupted blobs get a fresh header written, // existing blobs have their header read. - let (app_version, logical_len) = if Header::missing(raw_len) { + let (blob_version, logical_len) = if Header::missing(raw_len) { // New (or corrupted) blob - truncate and write header with latest version - let (header, app_version) = Header::for_new_blob(&versions); + let (header, blob_version) = Header::new(&versions); file.set_len(Header::SIZE_U64) .map_err(|e| Error::BlobResizeFailed(partition.into(), hex(name), e))?; file.seek(SeekFrom::Start(0)) @@ -155,7 +155,7 @@ impl crate::Storage for Storage { } } - (app_version, 0) + (blob_version, 0) } else { // Existing blob - read and validate header file.seek(SeekFrom::Start(0)) @@ -163,11 +163,11 @@ impl crate::Storage for Storage { let mut header_bytes = [0u8; Header::SIZE]; file.read_exact(&mut header_bytes) .map_err(|_| Error::ReadFailed)?; - Header::from_existing(header_bytes, raw_len, &versions)? + Header::from(header_bytes, raw_len, &versions, partition, name)? }; let blob = Blob::new(partition.into(), name, file, self.io_sender.clone()); - Ok((blob, logical_len, app_version)) + Ok((blob, logical_len, blob_version)) } async fn remove(&self, partition: &str, name: Option<&[u8]>) -> Result<(), Error> { @@ -436,8 +436,8 @@ impl crate::Blob for Blob { #[cfg(test)] mod tests { - use super::*; - use crate::{storage::tests::run_storage_tests, Blob, Header, Storage as _}; + use super::{Header, *}; + use crate::{storage::tests::run_storage_tests, Blob, Storage as _}; use rand::{Rng as _, SeedableRng as _}; use std::env; @@ -496,7 +496,7 @@ mod tests { // Header version (bytes 4-5) and App version (bytes 6-7) assert_eq!( &raw_content[Header::MAGIC_LENGTH..Header::MAGIC_LENGTH + Header::VERSION_LENGTH], - &Header::HEADER_VERSION.to_be_bytes() + &Header::RUNTIME_VERSION.to_be_bytes() ); // Data should start at offset 8 assert_eq!(&raw_content[Header::SIZE..], data); @@ -570,13 +570,13 @@ mod tests { let bad_magic_path = partition_path.join(hex(b"bad_magic")); std::fs::write(&bad_magic_path, vec![0u8; Header::SIZE]).unwrap(); - // Opening should fail with magic mismatch error + // Opening should fail with corrupt error let result = storage.open("partition", b"bad_magic").await; match result { - Err(crate::Error::BlobMagicMismatch { found }) => { - assert_eq!(found, [0u8; Header::MAGIC_LENGTH]); + Err(crate::Error::BlobCorrupt(_, _, reason)) => { + assert!(reason.contains("invalid magic")); } - Err(err) => panic!("expected BlobMagicMismatch error, got: {:?}", err), + Err(err) => panic!("expected BlobCorrupt error, got: {:?}", err), Ok(_) => panic!("expected error, got Ok"), } @@ -588,30 +588,27 @@ mod tests { let (storage, storage_directory) = create_test_storage(); // Create blob with version 1 - let (_, _, app_version) = storage + let (_, _, blob_version) = storage .open_versioned("partition", b"v1", 1..=1) .await .unwrap(); - assert_eq!(app_version, 1, "new blob should have version 1"); + assert_eq!(blob_version, 1, "new blob should have version 1"); // Reopen with a range that includes version 1 - let (_, _, app_version) = storage + let (_, _, blob_version) = storage .open_versioned("partition", b"v1", 0..=2) .await .unwrap(); - assert_eq!(app_version, 1, "existing blob should retain version 1"); + assert_eq!(blob_version, 1, "existing blob should retain version 1"); // Try to open with version range 2..=2 (should fail) let result = storage.open_versioned("partition", b"v1", 2..=2).await; match result { - Err(crate::Error::BlobApplicationVersionMismatch { expected, found }) => { + Err(crate::Error::BlobVersionMismatch { expected, found }) => { assert_eq!(expected, 2..=2); assert_eq!(found, 1); } - Err(err) => panic!( - "expected BlobApplicationVersionMismatch error, got: {:?}", - err - ), + Err(err) => panic!("expected BlobVersionMismatch error, got: {:?}", err), Ok(_) => panic!("expected error, got Ok"), } diff --git a/runtime/src/storage/memory.rs b/runtime/src/storage/memory.rs index 6ef0d191b9..48b8d7e5bc 100644 --- a/runtime/src/storage/memory.rs +++ b/runtime/src/storage/memory.rs @@ -1,4 +1,4 @@ -use crate::Header; +use super::Header; use commonware_codec::Encode; use commonware_utils::{hex, StableBuf}; use std::{ @@ -37,17 +37,17 @@ impl crate::Storage for Storage { let content = partition_entry.entry(name.into()).or_default(); let raw_len = content.len() as u64; - let (app_version, logical_len) = if Header::missing(raw_len) { + let (blob_version, logical_len) = if Header::missing(raw_len) { // New or corrupted blob - truncate and write default header with latest version - let (header, app_version) = Header::for_new_blob(&versions); + let (header, blob_version) = Header::new(&versions); content.clear(); content.extend_from_slice(&header.encode()); - (app_version, 0) + (blob_version, 0) } else { // Existing blob - read and validate header let mut header_bytes = [0u8; Header::SIZE]; header_bytes.copy_from_slice(&content[..Header::SIZE]); - Header::from_existing(header_bytes, raw_len, &versions)? + Header::from(header_bytes, raw_len, &versions, partition, name)? }; Ok(( @@ -58,7 +58,7 @@ impl crate::Storage for Storage { content.clone(), ), logical_len, - app_version, + blob_version, )) } @@ -200,8 +200,8 @@ impl crate::Blob for Blob { #[cfg(test)] mod tests { - use super::*; - use crate::{storage::tests::run_storage_tests, Blob, Header, Storage as _}; + use super::{Header, *}; + use crate::{storage::tests::run_storage_tests, Blob, Storage as _}; #[tokio::test] async fn test_memory_storage() { @@ -245,7 +245,7 @@ mod tests { // Next 2 bytes should be header version assert_eq!( &raw_content[Header::MAGIC_LENGTH..Header::MAGIC_LENGTH + Header::VERSION_LENGTH], - &Header::HEADER_VERSION.to_be_bytes() + &Header::RUNTIME_VERSION.to_be_bytes() ); // Data should start at offset 8 assert_eq!(&raw_content[Header::SIZE..], data); @@ -330,13 +330,13 @@ mod tests { partition.insert(b"bad_magic".to_vec(), vec![0u8; Header::SIZE]); } - // Opening should fail with magic mismatch error + // Opening should fail with corrupt error let result = storage.open("partition", b"bad_magic").await; match result { - Err(crate::Error::BlobMagicMismatch { found }) => { - assert_eq!(found, [0u8; Header::MAGIC_LENGTH]); + Err(crate::Error::BlobCorrupt(_, _, reason)) => { + assert!(reason.contains("invalid magic")); } - Err(err) => panic!("expected BlobMagicMismatch error, got: {:?}", err), + Err(err) => panic!("expected BlobCorrupt error, got: {:?}", err), Ok(_) => panic!("expected error, got Ok"), } } @@ -346,30 +346,27 @@ mod tests { let storage = Storage::default(); // Create blob with version 1 - let (_, _, app_version) = storage + let (_, _, blob_version) = storage .open_versioned("partition", b"v1", 1..=1) .await .unwrap(); - assert_eq!(app_version, 1, "new blob should have version 1"); + assert_eq!(blob_version, 1, "new blob should have version 1"); // Reopen with a range that includes version 1 - let (_, _, app_version) = storage + let (_, _, blob_version) = storage .open_versioned("partition", b"v1", 0..=2) .await .unwrap(); - assert_eq!(app_version, 1, "existing blob should retain version 1"); + assert_eq!(blob_version, 1, "existing blob should retain version 1"); // Try to open with version range 2..=2 (should fail) let result = storage.open_versioned("partition", b"v1", 2..=2).await; match result { - Err(crate::Error::BlobApplicationVersionMismatch { expected, found }) => { + Err(crate::Error::BlobVersionMismatch { expected, found }) => { assert_eq!(expected, 2..=2); assert_eq!(found, 1); } - Err(err) => panic!( - "expected BlobApplicationVersionMismatch error, got: {:?}", - err - ), + Err(err) => panic!("expected BlobVersionMismatch error, got: {:?}", err), Ok(_) => panic!("expected error, got Ok"), } } diff --git a/runtime/src/storage/metered.rs b/runtime/src/storage/metered.rs index c973d479d5..5380e56de1 100644 --- a/runtime/src/storage/metered.rs +++ b/runtime/src/storage/metered.rs @@ -84,7 +84,7 @@ impl crate::Storage for Storage { versions: RangeInclusive, ) -> Result<(Self::Blob, u64, u16), Error> { self.metrics.open_blobs.inc(); - let (inner, len, app_version) = + let (inner, len, blob_version) = self.inner.open_versioned(partition, name, versions).await?; Ok(( Blob { @@ -92,7 +92,7 @@ impl crate::Storage for Storage { metrics: Arc::new(MetricsHandle(self.metrics.clone())), }, len, - app_version, + blob_version, )) } diff --git a/runtime/src/storage/mod.rs b/runtime/src/storage/mod.rs index c7a8f5501e..4014070a7f 100644 --- a/runtime/src/storage/mod.rs +++ b/runtime/src/storage/mod.rs @@ -1,4 +1,9 @@ //! Implementations of the `Storage` trait that can be used by the runtime. + +use bytes::{Buf, BufMut}; +use commonware_codec::{DecodeExt, FixedSize, Read as CodecRead, Write as CodecWrite}; +use commonware_utils::hex; + pub mod audited; #[cfg(feature = "iouring-storage")] pub mod iouring; @@ -7,6 +12,148 @@ pub mod metered; #[cfg(all(not(target_arch = "wasm32"), not(feature = "iouring-storage")))] pub mod tokio; +/// Fixed-size header at the start of each [crate::Blob]. +/// +/// On-disk layout (8 bytes, big-endian): +/// - Bytes 0-3: [Header::MAGIC] +/// - Bytes 4-5: Runtime Version (u16) +/// - Bytes 6-7: Blob Version (u16) +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) struct Header { + magic: [u8; Self::MAGIC_LENGTH], + runtime_version: u16, + pub(crate) blob_version: u16, +} + +impl Header { + /// Size of the header in bytes. + pub(crate) const SIZE: usize = 8; + + /// Size of the header as u64 for offset calculations. + pub(crate) const SIZE_U64: u64 = Self::SIZE as u64; + + /// Length of magic bytes. + pub(crate) const MAGIC_LENGTH: usize = 4; + + /// Length of version fields. + #[cfg(test)] + pub(crate) const VERSION_LENGTH: usize = 2; + + /// Magic bytes identifying a valid commonware blob. + pub(crate) const MAGIC: [u8; Self::MAGIC_LENGTH] = *b"CWIC"; // Commonware Is CWIC + + /// The current version of the header format. + pub(crate) const RUNTIME_VERSION: u16 = 0; + + /// Returns true if a blob is missing a valid header (new or corrupted). + pub(crate) const fn missing(raw_len: u64) -> bool { + raw_len < Self::SIZE_U64 + } + + /// Creates a header for a new blob using the latest version from the range. + /// Returns (header, blob_version). + pub(crate) const fn new(versions: &std::ops::RangeInclusive) -> (Self, u16) { + let blob_version = *versions.end(); + let header = Self { + magic: Self::MAGIC, + runtime_version: Self::RUNTIME_VERSION, + blob_version, + }; + (header, blob_version) + } + + /// Validates an existing header and computes the logical size. + /// Returns (blob_version, logical_len) on success. + pub(crate) fn from( + raw_bytes: [u8; Self::SIZE], + raw_len: u64, + versions: &std::ops::RangeInclusive, + partition: &str, + name: &[u8], + ) -> Result<(u16, u64), crate::Error> { + let header: Self = + Self::decode(raw_bytes.as_slice()).map_err(|_| crate::Error::ReadFailed)?; + header.validate(versions, partition, name)?; + Ok((header.blob_version, raw_len - Self::SIZE_U64)) + } + + /// Validates the magic bytes, runtime version, and blob version. + pub(crate) fn validate( + &self, + blob_versions: &std::ops::RangeInclusive, + partition: &str, + name: &[u8], + ) -> Result<(), crate::Error> { + if self.magic != Self::MAGIC { + return Err(crate::Error::BlobCorrupt( + partition.into(), + hex(name), + format!( + "invalid magic: expected {:?}, found {:?}", + Self::MAGIC, + self.magic + ), + )); + } + if self.runtime_version != Self::RUNTIME_VERSION { + return Err(crate::Error::BlobCorrupt( + partition.into(), + hex(name), + format!( + "unsupported runtime version: expected {}, found {}", + Self::RUNTIME_VERSION, + self.runtime_version + ), + )); + } + if !blob_versions.contains(&self.blob_version) { + return Err(crate::Error::BlobVersionMismatch { + expected: blob_versions.clone(), + found: self.blob_version, + }); + } + Ok(()) + } +} + +impl FixedSize for Header { + const SIZE: usize = Self::SIZE; +} + +impl CodecWrite for Header { + fn write(&self, buf: &mut impl BufMut) { + buf.put_slice(&self.magic); + buf.put_u16(self.runtime_version); + buf.put_u16(self.blob_version); + } +} + +impl CodecRead for Header { + type Cfg = (); + fn read_cfg(buf: &mut impl Buf, _cfg: &Self::Cfg) -> Result { + if buf.remaining() < Self::SIZE { + return Err(commonware_codec::Error::EndOfBuffer); + } + let mut magic = [0u8; Self::MAGIC_LENGTH]; + buf.copy_to_slice(&mut magic); + let runtime_version = buf.get_u16(); + let blob_version = buf.get_u16(); + Ok(Self { + magic, + runtime_version, + blob_version, + }) + } +} + +#[cfg(feature = "arbitrary")] +impl arbitrary::Arbitrary<'_> for Header { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + let version: u16 = u.arbitrary()?; + Ok(Self::new(&(version..=version)).0) + } +} + /// Validate that a partition name contains only allowed characters. /// /// Partition names must only contain alphanumeric characters, dashes ('-'), @@ -24,7 +171,72 @@ pub fn validate_partition_name(partition: &str) -> Result<(), crate::Error> { #[cfg(test)] pub(crate) mod tests { + use super::Header; use crate::{Blob, Storage}; + use commonware_codec::{DecodeExt, Encode}; + + #[test] + fn test_header_fields() { + let (header, _) = Header::new(&(42..=42)); + assert_eq!(header.magic, Header::MAGIC); + assert_eq!(header.runtime_version, Header::RUNTIME_VERSION); + assert_eq!(header.blob_version, 42); + } + + #[test] + fn test_header_validate_success() { + let (header, _) = Header::new(&(5..=5)); + assert!(header.validate(&(3..=7), "test", b"blob").is_ok()); + assert!(header.validate(&(5..=5), "test", b"blob").is_ok()); + } + + #[test] + fn test_header_validate_magic_mismatch() { + let (mut header, _) = Header::new(&(5..=5)); + header.magic = *b"XXXX"; + let result = header.validate(&(3..=7), "test", b"blob"); + assert!( + matches!(result, Err(crate::Error::BlobCorrupt(_, _, reason)) if reason.contains("invalid magic")) + ); + } + + #[test] + fn test_header_validate_runtime_version_mismatch() { + let (mut header, _) = Header::new(&(5..=5)); + header.runtime_version = 99; + let result = header.validate(&(3..=7), "test", b"blob"); + assert!( + matches!(result, Err(crate::Error::BlobCorrupt(_, _, reason)) if reason.contains("unsupported runtime version")) + ); + } + + #[test] + fn test_header_validate_blob_version_out_of_range() { + let (header, _) = Header::new(&(10..=10)); + let result = header.validate(&(3..=7), "test", b"blob"); + assert!(matches!( + result, + Err(crate::Error::BlobVersionMismatch { expected, found: 10 }) if expected == (3..=7) + )); + } + + #[test] + fn test_header_bytes_round_trip() { + let (header, _) = Header::new(&(123..=123)); + let bytes = header.encode(); + let decoded: Header = Header::decode(bytes.as_ref()).unwrap(); + assert_eq!(header, decoded); + } + + #[cfg(feature = "arbitrary")] + mod conformance { + use super::Header; + use commonware_codec::conformance::CodecConformance; + + commonware_conformance::conformance_tests! { + CodecConformance
+ } + } /// Runs the full suite of tests on the provided storage implementation. pub(crate) async fn run_storage_tests(storage: S) diff --git a/runtime/src/storage/tokio/fallback.rs b/runtime/src/storage/tokio/fallback.rs index ac273c8471..f4aa55f967 100644 --- a/runtime/src/storage/tokio/fallback.rs +++ b/runtime/src/storage/tokio/fallback.rs @@ -1,4 +1,5 @@ -use crate::{Error, Header}; +use super::Header; +use crate::Error; use commonware_utils::{hex, StableBuf}; use std::{io::SeekFrom, sync::Arc}; use tokio::{ diff --git a/runtime/src/storage/tokio/mod.rs b/runtime/src/storage/tokio/mod.rs index ea2c418e0b..811343105f 100644 --- a/runtime/src/storage/tokio/mod.rs +++ b/runtime/src/storage/tokio/mod.rs @@ -1,4 +1,5 @@ -use crate::{Error, Header}; +use super::Header; +use crate::Error; use commonware_codec::Encode; use commonware_utils::{from_hex, hex}; #[cfg(unix)] @@ -139,9 +140,9 @@ impl crate::Storage for Storage { // Handle header: new/corrupted blobs get a fresh header written, // existing blobs have their header read. - let (app_version, logical_size) = if Header::missing(len) { + let (blob_version, logical_size) = if Header::missing(len) { // New or corrupted blob - truncate and write header with latest version - let (header, app_version) = Header::for_new_blob(&versions); + let (header, blob_version) = Header::new(&versions); file.set_len(Header::SIZE_U64) .await .map_err(|e| Error::BlobResizeFailed(partition.into(), hex(name), e))?; @@ -151,14 +152,14 @@ impl crate::Storage for Storage { file.sync_all() .await .map_err(|e| Error::BlobSyncFailed(partition.into(), hex(name), e))?; - (app_version, 0) + (blob_version, 0) } else { // Existing blob - read and validate header let mut header_bytes = [0u8; Header::SIZE]; file.read_exact(&mut header_bytes) .await .map_err(|_| Error::ReadFailed)?; - Header::from_existing(header_bytes, len, &versions)? + Header::from(header_bytes, len, &versions, partition, name)? }; #[cfg(unix)] @@ -170,7 +171,7 @@ impl crate::Storage for Storage { Ok(( Self::Blob::new(partition.into(), name, file), logical_size, - app_version, + blob_version, )) } #[cfg(not(unix))] @@ -179,7 +180,7 @@ impl crate::Storage for Storage { Ok(( Self::Blob::new(partition.into(), name, file), logical_size, - app_version, + blob_version, )) } } @@ -245,8 +246,8 @@ impl crate::Storage for Storage { #[cfg(test)] mod tests { - use super::*; - use crate::{storage::tests::run_storage_tests, Blob, Header, Storage as _}; + use super::{Header, *}; + use crate::{storage::tests::run_storage_tests, Blob, Storage as _}; use rand::{Rng as _, SeedableRng}; use std::env; @@ -295,7 +296,7 @@ mod tests { // Header version (bytes 4-5) and App version (bytes 6-7) assert_eq!( &raw_content[Header::MAGIC_LENGTH..Header::MAGIC_LENGTH + Header::VERSION_LENGTH], - &Header::HEADER_VERSION.to_be_bytes() + &Header::RUNTIME_VERSION.to_be_bytes() ); // Data should start at offset 8 assert_eq!(&raw_content[Header::SIZE..], data); @@ -374,13 +375,13 @@ mod tests { let bad_magic_path = partition_path.join(hex(b"bad_magic")); std::fs::write(&bad_magic_path, vec![0u8; Header::SIZE]).unwrap(); - // Opening should fail with magic mismatch error + // Opening should fail with corrupt error let result = storage.open("partition", b"bad_magic").await; match result { - Err(crate::Error::BlobMagicMismatch { found }) => { - assert_eq!(found, [0u8; Header::MAGIC_LENGTH]); + Err(crate::Error::BlobCorrupt(_, _, reason)) => { + assert!(reason.contains("invalid magic")); } - Err(err) => panic!("expected BlobMagicMismatch error, got: {:?}", err), + Err(err) => panic!("expected BlobCorrupt error, got: {:?}", err), Ok(_) => panic!("expected error, got Ok"), } @@ -404,14 +405,11 @@ mod tests { // Try to open with version range 2..=2 let result = storage.open_versioned("partition", b"v1", 2..=2).await; match result { - Err(crate::Error::BlobApplicationVersionMismatch { expected, found }) => { + Err(crate::Error::BlobVersionMismatch { expected, found }) => { assert_eq!(expected, 2..=2); assert_eq!(found, 1); } - Err(err) => panic!( - "expected BlobApplicationVersionMismatch error, got: {:?}", - err - ), + Err(err) => panic!("expected BlobVersionMismatch error, got: {:?}", err), Ok(_) => panic!("expected error, got Ok"), } diff --git a/runtime/src/storage/tokio/unix.rs b/runtime/src/storage/tokio/unix.rs index 73ee686b09..3409a13746 100644 --- a/runtime/src/storage/tokio/unix.rs +++ b/runtime/src/storage/tokio/unix.rs @@ -1,4 +1,5 @@ -use crate::{Error, Header}; +use super::Header; +use crate::Error; use commonware_utils::{hex, StableBuf}; use std::{fs::File, os::unix::fs::FileExt, sync::Arc}; use tokio::task; diff --git a/storage/src/journal/segmented/variable.rs b/storage/src/journal/segmented/variable.rs index c0292c13a6..7373132c12 100644 --- a/storage/src/journal/segmented/variable.rs +++ b/storage/src/journal/segmented/variable.rs @@ -793,7 +793,9 @@ mod tests { use bytes::BufMut; use commonware_cryptography::{Hasher, Sha256}; use commonware_macros::test_traced; - use commonware_runtime::{deterministic, Blob, Error as RError, Header, Runner, Storage}; + use commonware_runtime::{ + deterministic, Blob, Error as RError, Runner, Storage, DEFAULT_BLOB_VERSION, + }; use commonware_utils::{NZUsize, StableBuf}; use futures::{pin_mut, StreamExt}; use prometheus_client::registry::Metric; @@ -1901,8 +1903,8 @@ mod tests { _name: &[u8], versions: std::ops::RangeInclusive, ) -> Result<(MockBlob, u64, u16), RError> { - assert!(versions.contains(&Header::DEFAULT_APPLICATION_VERSION)); - Ok((MockBlob {}, self.len, Header::DEFAULT_APPLICATION_VERSION)) + assert!(versions.contains(&DEFAULT_BLOB_VERSION)); + Ok((MockBlob {}, self.len, DEFAULT_BLOB_VERSION)) } async fn remove(&self, _partition: &str, _name: Option<&[u8]>) -> Result<(), RError> { From 1aaae670d4dbc8f93ebffbefb5718dcd38899370 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Tue, 6 Jan 2026 19:00:55 -0800 Subject: [PATCH 11/17] docs: remove implementation details from public trait docs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Remove Header section from Blob trait (implementation detail) - Simplify Versions section to generic explanation 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- runtime/src/lib.rs | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 169c5abf93..2ca6687d50 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -577,12 +577,8 @@ pub trait Storage: Clone + Send + Sync + 'static { /// /// # Versions /// - /// Each blob has an associated blob version stored in its header. - /// `versions` specifies the range of acceptable blob versions for an opened blob. - /// If the blob already exists and its version is not in `versions`, returns + /// Blobs are versioned. If the blob's version is not in `versions`, returns /// [Error::BlobVersionMismatch]. - /// If the blob does not exist, it is created with the blob version set to the last - /// value in `versions`. /// /// # Returns /// @@ -623,13 +619,6 @@ pub trait Storage: Clone + Send + Sync + 'static { /// When a blob is dropped, any unsynced changes may be discarded. Implementations /// may attempt to sync during drop but errors will go unhandled. Call `sync` /// before dropping to ensure all changes are durably persisted. -/// -/// # Header -/// -/// All blobs have an 8-byte header at the start containing magic bytes and version -/// information. The header is read on open (for existing blobs) or written (for new -/// blobs). All I/O operations use logical offsets that start after the header; the -/// header offset is handled internally. #[allow(clippy::len_without_is_empty)] pub trait Blob: Clone + Send + Sync + 'static { /// Read from the blob at the given offset. From 5914a4fcbdc19aaca20576f6eac6635cd7fc3996 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Tue, 6 Jan 2026 19:02:17 -0800 Subject: [PATCH 12/17] fix: iouring import and doc links MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fix Header import in iouring.rs (use super::Header) - Add doc links to DEFAULT_BLOB_VERSION 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- runtime/src/lib.rs | 2 +- runtime/src/storage/iouring.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 2ca6687d50..2c3fa584fb 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -60,7 +60,7 @@ mod iouring; /// Prefix for runtime metrics. const METRICS_PREFIX: &str = "runtime"; -/// Default blob version used when no version is specified. +/// Default [`Blob`] version used when no version is specified via [`Storage::open`]. pub const DEFAULT_BLOB_VERSION: u16 = 0; /// Errors that can occur when interacting with the runtime. diff --git a/runtime/src/storage/iouring.rs b/runtime/src/storage/iouring.rs index 8d1e34bae8..9eb5fe138c 100644 --- a/runtime/src/storage/iouring.rs +++ b/runtime/src/storage/iouring.rs @@ -20,9 +20,10 @@ //! //! This implementation is only available on Linux systems that support io_uring. +use super::Header; use crate::{ iouring::{self, should_retry}, - Error, Header, + Error, }; use commonware_codec::Encode; use commonware_utils::{from_hex, hex, StableBuf}; From 17d8f6069601dc7643130a4bfd6729943e7ff1b1 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Tue, 6 Jan 2026 19:08:53 -0800 Subject: [PATCH 13/17] refactor: return HeaderError from Header::from/validate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move partition/name context handling to call sites via map_err, keeping Header methods focused on validation logic only. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- runtime/src/storage/iouring.rs | 3 +- runtime/src/storage/memory.rs | 3 +- runtime/src/storage/mod.rs | 120 +++++++++++++++++++------------ runtime/src/storage/tokio/mod.rs | 2 +- 4 files changed, 78 insertions(+), 50 deletions(-) diff --git a/runtime/src/storage/iouring.rs b/runtime/src/storage/iouring.rs index 9eb5fe138c..cfe7fa5548 100644 --- a/runtime/src/storage/iouring.rs +++ b/runtime/src/storage/iouring.rs @@ -164,7 +164,8 @@ impl crate::Storage for Storage { let mut header_bytes = [0u8; Header::SIZE]; file.read_exact(&mut header_bytes) .map_err(|_| Error::ReadFailed)?; - Header::from(header_bytes, raw_len, &versions, partition, name)? + Header::from(header_bytes, raw_len, &versions) + .map_err(|e| e.into_error(partition, name))? }; let blob = Blob::new(partition.into(), name, file, self.io_sender.clone()); diff --git a/runtime/src/storage/memory.rs b/runtime/src/storage/memory.rs index 48b8d7e5bc..bfb7768110 100644 --- a/runtime/src/storage/memory.rs +++ b/runtime/src/storage/memory.rs @@ -47,7 +47,8 @@ impl crate::Storage for Storage { // Existing blob - read and validate header let mut header_bytes = [0u8; Header::SIZE]; header_bytes.copy_from_slice(&content[..Header::SIZE]); - Header::from(header_bytes, raw_len, &versions, partition, name)? + Header::from(header_bytes, raw_len, &versions) + .map_err(|e| e.into_error(partition, name))? }; Ok(( diff --git a/runtime/src/storage/mod.rs b/runtime/src/storage/mod.rs index 4014070a7f..098cc702cd 100644 --- a/runtime/src/storage/mod.rs +++ b/runtime/src/storage/mod.rs @@ -3,6 +3,45 @@ use bytes::{Buf, BufMut}; use commonware_codec::{DecodeExt, FixedSize, Read as CodecRead, Write as CodecWrite}; use commonware_utils::hex; +use std::ops::RangeInclusive; + +/// Errors that can occur when validating a blob header. +#[derive(Debug)] +pub(crate) enum HeaderError { + InvalidMagic { + expected: [u8; 4], + found: [u8; 4], + }, + UnsupportedRuntimeVersion { + expected: u16, + found: u16, + }, + VersionMismatch { + expected: RangeInclusive, + found: u16, + }, +} + +impl HeaderError { + /// Converts this error into an [`Error`](enum@crate::Error) with partition and name context. + pub(crate) fn into_error(self, partition: &str, name: &[u8]) -> crate::Error { + match self { + Self::InvalidMagic { expected, found } => crate::Error::BlobCorrupt( + partition.into(), + hex(name), + format!("invalid magic: expected {expected:?}, found {found:?}"), + ), + Self::UnsupportedRuntimeVersion { expected, found } => crate::Error::BlobCorrupt( + partition.into(), + hex(name), + format!("unsupported runtime version: expected {expected}, found {found}"), + ), + Self::VersionMismatch { expected, found } => { + crate::Error::BlobVersionMismatch { expected, found } + } + } + } +} pub mod audited; #[cfg(feature = "iouring-storage")] @@ -62,52 +101,34 @@ impl Header { (header, blob_version) } - /// Validates an existing header and computes the logical size. - /// Returns (blob_version, logical_len) on success. + /// Parses and validates an existing header, returning the blob version and logical size. pub(crate) fn from( raw_bytes: [u8; Self::SIZE], raw_len: u64, - versions: &std::ops::RangeInclusive, - partition: &str, - name: &[u8], - ) -> Result<(u16, u64), crate::Error> { - let header: Self = - Self::decode(raw_bytes.as_slice()).map_err(|_| crate::Error::ReadFailed)?; - header.validate(versions, partition, name)?; + versions: &RangeInclusive, + ) -> Result<(u16, u64), HeaderError> { + let header: Self = Self::decode(raw_bytes.as_slice()) + .expect("header decode should never fail for correct size input"); + header.validate(versions)?; Ok((header.blob_version, raw_len - Self::SIZE_U64)) } /// Validates the magic bytes, runtime version, and blob version. - pub(crate) fn validate( - &self, - blob_versions: &std::ops::RangeInclusive, - partition: &str, - name: &[u8], - ) -> Result<(), crate::Error> { + pub(crate) fn validate(&self, blob_versions: &RangeInclusive) -> Result<(), HeaderError> { if self.magic != Self::MAGIC { - return Err(crate::Error::BlobCorrupt( - partition.into(), - hex(name), - format!( - "invalid magic: expected {:?}, found {:?}", - Self::MAGIC, - self.magic - ), - )); + return Err(HeaderError::InvalidMagic { + expected: Self::MAGIC, + found: self.magic, + }); } if self.runtime_version != Self::RUNTIME_VERSION { - return Err(crate::Error::BlobCorrupt( - partition.into(), - hex(name), - format!( - "unsupported runtime version: expected {}, found {}", - Self::RUNTIME_VERSION, - self.runtime_version - ), - )); + return Err(HeaderError::UnsupportedRuntimeVersion { + expected: Self::RUNTIME_VERSION, + found: self.runtime_version, + }); } if !blob_versions.contains(&self.blob_version) { - return Err(crate::Error::BlobVersionMismatch { + return Err(HeaderError::VersionMismatch { expected: blob_versions.clone(), found: self.blob_version, }); @@ -171,7 +192,7 @@ pub fn validate_partition_name(partition: &str) -> Result<(), crate::Error> { #[cfg(test)] pub(crate) mod tests { - use super::Header; + use super::{Header, HeaderError}; use crate::{Blob, Storage}; use commonware_codec::{DecodeExt, Encode}; @@ -186,37 +207,42 @@ pub(crate) mod tests { #[test] fn test_header_validate_success() { let (header, _) = Header::new(&(5..=5)); - assert!(header.validate(&(3..=7), "test", b"blob").is_ok()); - assert!(header.validate(&(5..=5), "test", b"blob").is_ok()); + assert!(header.validate(&(3..=7)).is_ok()); + assert!(header.validate(&(5..=5)).is_ok()); } #[test] fn test_header_validate_magic_mismatch() { let (mut header, _) = Header::new(&(5..=5)); header.magic = *b"XXXX"; - let result = header.validate(&(3..=7), "test", b"blob"); - assert!( - matches!(result, Err(crate::Error::BlobCorrupt(_, _, reason)) if reason.contains("invalid magic")) - ); + let result = header.validate(&(3..=7)); + assert!(matches!( + result, + Err(HeaderError::InvalidMagic { expected, found }) + if expected == Header::MAGIC && found == *b"XXXX" + )); } #[test] fn test_header_validate_runtime_version_mismatch() { let (mut header, _) = Header::new(&(5..=5)); header.runtime_version = 99; - let result = header.validate(&(3..=7), "test", b"blob"); - assert!( - matches!(result, Err(crate::Error::BlobCorrupt(_, _, reason)) if reason.contains("unsupported runtime version")) - ); + let result = header.validate(&(3..=7)); + assert!(matches!( + result, + Err(HeaderError::UnsupportedRuntimeVersion { expected, found }) + if expected == Header::RUNTIME_VERSION && found == 99 + )); } #[test] fn test_header_validate_blob_version_out_of_range() { let (header, _) = Header::new(&(10..=10)); - let result = header.validate(&(3..=7), "test", b"blob"); + let result = header.validate(&(3..=7)); assert!(matches!( result, - Err(crate::Error::BlobVersionMismatch { expected, found: 10 }) if expected == (3..=7) + Err(HeaderError::VersionMismatch { expected, found }) + if expected == (3..=7) && found == 10 )); } diff --git a/runtime/src/storage/tokio/mod.rs b/runtime/src/storage/tokio/mod.rs index 811343105f..63f01f757b 100644 --- a/runtime/src/storage/tokio/mod.rs +++ b/runtime/src/storage/tokio/mod.rs @@ -159,7 +159,7 @@ impl crate::Storage for Storage { file.read_exact(&mut header_bytes) .await .map_err(|_| Error::ReadFailed)?; - Header::from(header_bytes, len, &versions, partition, name)? + Header::from(header_bytes, len, &versions).map_err(|e| e.into_error(partition, name))? }; #[cfg(unix)] From c39591ed358ac4524595c1eca675ef632585c425 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Tue, 6 Jan 2026 19:12:22 -0800 Subject: [PATCH 14/17] docs: link to DEFAULT_BLOB_VERSION in Storage::open MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 2c3fa584fb..115f9ad939 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -546,7 +546,7 @@ pub trait Storage: Clone + Send + Sync + 'static { /// The readable/writeable storage buffer that can be opened by this Storage. type Blob: Blob; - /// [Storage::open_versioned] with the default blob version (0) as the only value + /// [`Storage::open_versioned`] with [`DEFAULT_BLOB_VERSION`] as the only value /// in the versions range. The blob version is omitted from the return value. fn open( &self, From 47f9ba557fe8794a9ce0533eda8a61d4e0fd7d08 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Tue, 6 Jan 2026 19:20:11 -0800 Subject: [PATCH 15/17] refactor: consolidate storage tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Move test_blob_version_mismatch to run_storage_tests - Remove duplicate version mismatch tests from each backend - Keep test_blob_header_handling (tests raw byte layout per backend) - Keep test_blob_magic_mismatch (tests corrupted file integration) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- runtime/src/storage/iouring.rs | 42 +------------- runtime/src/storage/memory.rs | 99 +++----------------------------- runtime/src/storage/mod.rs | 37 ++++++++++++ runtime/src/storage/tokio/mod.rs | 42 ++------------ 4 files changed, 53 insertions(+), 167 deletions(-) diff --git a/runtime/src/storage/iouring.rs b/runtime/src/storage/iouring.rs index cfe7fa5548..15cc6f347b 100644 --- a/runtime/src/storage/iouring.rs +++ b/runtime/src/storage/iouring.rs @@ -574,45 +574,9 @@ mod tests { // Opening should fail with corrupt error let result = storage.open("partition", b"bad_magic").await; - match result { - Err(crate::Error::BlobCorrupt(_, _, reason)) => { - assert!(reason.contains("invalid magic")); - } - Err(err) => panic!("expected BlobCorrupt error, got: {:?}", err), - Ok(_) => panic!("expected error, got Ok"), - } - - let _ = std::fs::remove_dir_all(&storage_directory); - } - - #[tokio::test] - async fn test_blob_version_mismatch() { - let (storage, storage_directory) = create_test_storage(); - - // Create blob with version 1 - let (_, _, blob_version) = storage - .open_versioned("partition", b"v1", 1..=1) - .await - .unwrap(); - assert_eq!(blob_version, 1, "new blob should have version 1"); - - // Reopen with a range that includes version 1 - let (_, _, blob_version) = storage - .open_versioned("partition", b"v1", 0..=2) - .await - .unwrap(); - assert_eq!(blob_version, 1, "existing blob should retain version 1"); - - // Try to open with version range 2..=2 (should fail) - let result = storage.open_versioned("partition", b"v1", 2..=2).await; - match result { - Err(crate::Error::BlobVersionMismatch { expected, found }) => { - assert_eq!(expected, 2..=2); - assert_eq!(found, 1); - } - Err(err) => panic!("expected BlobVersionMismatch error, got: {:?}", err), - Ok(_) => panic!("expected error, got Ok"), - } + assert!( + matches!(result, Err(crate::Error::BlobCorrupt(_, _, reason)) if reason.contains("invalid magic")) + ); let _ = std::fs::remove_dir_all(&storage_directory); } diff --git a/runtime/src/storage/memory.rs b/runtime/src/storage/memory.rs index bfb7768110..9403785641 100644 --- a/runtime/src/storage/memory.rs +++ b/runtime/src/storage/memory.rs @@ -214,7 +214,7 @@ mod tests { async fn test_blob_header_handling() { let storage = Storage::default(); - // Test 1: New blob returns logical size 0 and correct app version + // New blob returns logical size 0 let (blob, size) = storage.open("partition", b"test").await.unwrap(); assert_eq!(size, 0, "new blob should have logical size 0"); @@ -230,7 +230,7 @@ mod tests { ); } - // Test 2: Logical offset handling - write at offset 0 stores at raw offset 8 + // Write at logical offset 0 stores at raw offset 8 let data = b"hello world"; blob.write_at(data.to_vec(), 0).await.unwrap(); blob.sync().await.unwrap(); @@ -241,61 +241,15 @@ mod tests { let partition = partitions.get("partition").unwrap(); let raw_content = partition.get(&b"test".to_vec()).unwrap(); assert_eq!(raw_content.len(), Header::SIZE + data.len()); - // First 4 bytes should be magic bytes assert_eq!(&raw_content[..Header::MAGIC_LENGTH], &Header::MAGIC); - // Next 2 bytes should be header version - assert_eq!( - &raw_content[Header::MAGIC_LENGTH..Header::MAGIC_LENGTH + Header::VERSION_LENGTH], - &Header::RUNTIME_VERSION.to_be_bytes() - ); - // Data should start at offset 8 assert_eq!(&raw_content[Header::SIZE..], data); } - // Test 3: Read at logical offset 0 returns data from raw offset 8 + // Read at logical offset 0 returns data from raw offset 8 let read_buf = blob.read_at(vec![0u8; data.len()], 0).await.unwrap(); assert_eq!(read_buf.as_ref(), data); - // Test 4: Resize with logical length - blob.resize(5).await.unwrap(); - blob.sync().await.unwrap(); - { - let partitions = storage.partitions.lock().unwrap(); - let partition = partitions.get("partition").unwrap(); - let raw_content = partition.get(&b"test".to_vec()).unwrap(); - assert_eq!( - raw_content.len(), - Header::SIZE + 5, - "resize(5) should result in 13 raw bytes" - ); - } - - // resize(0) should leave only header - blob.resize(0).await.unwrap(); - blob.sync().await.unwrap(); - { - let partitions = storage.partitions.lock().unwrap(); - let partition = partitions.get("partition").unwrap(); - let raw_content = partition.get(&b"test".to_vec()).unwrap(); - assert_eq!( - raw_content.len(), - Header::SIZE, - "resize(0) should leave only header" - ); - } - - // Test 5: Reopen existing blob preserves header and returns correct logical size - blob.write_at(b"test data".to_vec(), 0).await.unwrap(); - blob.sync().await.unwrap(); - drop(blob); - - let (blob2, size2) = storage.open("partition", b"test").await.unwrap(); - assert_eq!(size2, 9, "reopened blob should have logical size 9"); - let read_buf = blob2.read_at(vec![0u8; 9], 0).await.unwrap(); - assert_eq!(read_buf.as_ref(), b"test data"); - - // Test 6: Corrupted blob recovery (0 < raw_size < 8) - // Manually corrupt the raw storage to have only 2 bytes + // Corrupted blob recovery (0 < raw_size < 8) { let mut partitions = storage.partitions.lock().unwrap(); let partition = partitions.get_mut("partition").unwrap(); @@ -303,8 +257,8 @@ mod tests { } // Opening should truncate and write fresh header - let (_blob3, size3) = storage.open("partition", b"corrupted").await.unwrap(); - assert_eq!(size3, 0, "corrupted blob should return logical size 0"); + let (_blob, size) = storage.open("partition", b"corrupted").await.unwrap(); + assert_eq!(size, 0, "corrupted blob should return logical size 0"); // Verify raw storage now has proper 8-byte header { @@ -327,48 +281,13 @@ mod tests { { let mut partitions = storage.partitions.lock().unwrap(); let partition = partitions.entry("partition".into()).or_default(); - // Create a blob with wrong magic bytes (all zeros) partition.insert(b"bad_magic".to_vec(), vec![0u8; Header::SIZE]); } // Opening should fail with corrupt error let result = storage.open("partition", b"bad_magic").await; - match result { - Err(crate::Error::BlobCorrupt(_, _, reason)) => { - assert!(reason.contains("invalid magic")); - } - Err(err) => panic!("expected BlobCorrupt error, got: {:?}", err), - Ok(_) => panic!("expected error, got Ok"), - } - } - - #[tokio::test] - async fn test_blob_version_mismatch() { - let storage = Storage::default(); - - // Create blob with version 1 - let (_, _, blob_version) = storage - .open_versioned("partition", b"v1", 1..=1) - .await - .unwrap(); - assert_eq!(blob_version, 1, "new blob should have version 1"); - - // Reopen with a range that includes version 1 - let (_, _, blob_version) = storage - .open_versioned("partition", b"v1", 0..=2) - .await - .unwrap(); - assert_eq!(blob_version, 1, "existing blob should retain version 1"); - - // Try to open with version range 2..=2 (should fail) - let result = storage.open_versioned("partition", b"v1", 2..=2).await; - match result { - Err(crate::Error::BlobVersionMismatch { expected, found }) => { - assert_eq!(expected, 2..=2); - assert_eq!(found, 1); - } - Err(err) => panic!("expected BlobVersionMismatch error, got: {:?}", err), - Ok(_) => panic!("expected error, got Ok"), - } + assert!( + matches!(result, Err(crate::Error::BlobCorrupt(_, _, reason)) if reason.contains("invalid magic")) + ); } } diff --git a/runtime/src/storage/mod.rs b/runtime/src/storage/mod.rs index 098cc702cd..d62e7e8ebb 100644 --- a/runtime/src/storage/mod.rs +++ b/runtime/src/storage/mod.rs @@ -285,6 +285,7 @@ pub(crate) mod tests { test_overlapping_writes(&storage).await; test_resize_then_open(&storage).await; test_partition_name_validation(&storage).await; + test_blob_version_mismatch(&storage).await; } /// Test opening a blob, writing to it, and reading back the data. @@ -691,4 +692,40 @@ pub(crate) mod tests { ); } } + + /// Test that opening a blob with an incompatible version range returns an error. + async fn test_blob_version_mismatch(storage: &S) + where + S: Storage + Send + Sync, + S::Blob: Send + Sync, + { + // Create a blob with version 1 + let (blob, _, version) = storage + .open_versioned("test_version_mismatch", b"blob", 1..=1) + .await + .unwrap(); + assert_eq!(version, 1); + blob.sync().await.unwrap(); + drop(blob); + + // Reopen with a range that includes version 1 + let (_, _, version) = storage + .open_versioned("test_version_mismatch", b"blob", 0..=2) + .await + .unwrap(); + assert_eq!(version, 1); + + // Try to open with version range that excludes version 1 + let result = storage + .open_versioned("test_version_mismatch", b"blob", 2..=3) + .await; + assert!( + matches!( + result, + Err(crate::Error::BlobVersionMismatch { expected, found }) + if expected == (2..=3) && found == 1 + ), + "Expected BlobVersionMismatch error" + ); + } } diff --git a/runtime/src/storage/tokio/mod.rs b/runtime/src/storage/tokio/mod.rs index 63f01f757b..9dec947f1a 100644 --- a/runtime/src/storage/tokio/mod.rs +++ b/runtime/src/storage/tokio/mod.rs @@ -367,51 +367,17 @@ mod tests { maximum_buffer_size: 1024 * 1024, }); - // Create the partition directory + // Create the partition directory and a file with invalid magic bytes let partition_path = storage_directory.join("partition"); std::fs::create_dir_all(&partition_path).unwrap(); - - // Manually create a file with invalid magic bytes let bad_magic_path = partition_path.join(hex(b"bad_magic")); std::fs::write(&bad_magic_path, vec![0u8; Header::SIZE]).unwrap(); // Opening should fail with corrupt error let result = storage.open("partition", b"bad_magic").await; - match result { - Err(crate::Error::BlobCorrupt(_, _, reason)) => { - assert!(reason.contains("invalid magic")); - } - Err(err) => panic!("expected BlobCorrupt error, got: {:?}", err), - Ok(_) => panic!("expected error, got Ok"), - } - - let _ = std::fs::remove_dir_all(&storage_directory); - } - - #[tokio::test] - async fn test_blob_version_mismatch() { - let mut rng = rand::rngs::StdRng::from_entropy(); - let storage_directory = - env::temp_dir().join(format!("storage_tokio_version_{}", rng.gen::())); - let config = Config::new(storage_directory.clone(), 2 * 1024 * 1024); - let storage = Storage::new(config); - - // Create blob with version 1 - storage - .open_versioned("partition", b"v1", 1..=1) - .await - .unwrap(); - - // Try to open with version range 2..=2 - let result = storage.open_versioned("partition", b"v1", 2..=2).await; - match result { - Err(crate::Error::BlobVersionMismatch { expected, found }) => { - assert_eq!(expected, 2..=2); - assert_eq!(found, 1); - } - Err(err) => panic!("expected BlobVersionMismatch error, got: {:?}", err), - Ok(_) => panic!("expected error, got Ok"), - } + assert!( + matches!(result, Err(crate::Error::BlobCorrupt(_, _, reason)) if reason.contains("invalid magic")) + ); let _ = std::fs::remove_dir_all(&storage_directory); } From b0c7b630f766da6c9f6e51bc4dc0e82979cbc30c Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Tue, 6 Jan 2026 19:24:16 -0800 Subject: [PATCH 16/17] update conformance --- runtime/conformance.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/conformance.toml b/runtime/conformance.toml index 10d6d043dd..a065ec07fb 100644 --- a/runtime/conformance.toml +++ b/runtime/conformance.toml @@ -1,3 +1,3 @@ -["commonware_runtime::tests::conformance::CodecConformance
"] +["commonware_runtime::storage::tests::conformance::CodecConformance
"] n_cases = 65536 hash = "541c356728d47b13f1d3ac800926ef3ae2396c82f5d4e043f5c7641c4c22b4b9" From 4f3efd5ad144dfc329fe08795c783f9b4e2098f5 Mon Sep 17 00:00:00 2001 From: Patrick O'Grady Date: Tue, 6 Jan 2026 19:29:11 -0800 Subject: [PATCH 17/17] fix: use WriteFailed for header write_all error in tokio storage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Aligns error mapping with iouring backend for consistency. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 --- runtime/src/storage/tokio/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/storage/tokio/mod.rs b/runtime/src/storage/tokio/mod.rs index 9dec947f1a..6f91bb96f6 100644 --- a/runtime/src/storage/tokio/mod.rs +++ b/runtime/src/storage/tokio/mod.rs @@ -148,7 +148,7 @@ impl crate::Storage for Storage { .map_err(|e| Error::BlobResizeFailed(partition.into(), hex(name), e))?; file.write_all(&header.encode()) .await - .map_err(|e| Error::BlobSyncFailed(partition.into(), hex(name), e))?; + .map_err(|_| Error::WriteFailed)?; file.sync_all() .await .map_err(|e| Error::BlobSyncFailed(partition.into(), hex(name), e))?;