From eea5ae1dfe4e551984b9cd7282850c2cdeb544a8 Mon Sep 17 00:00:00 2001 From: Babur Makhmudov Date: Fri, 27 Dec 2024 21:49:06 +0530 Subject: [PATCH] chore: cleanup + improve tests --- src/buffer.rs | 2 +- src/header.rs | 11 +++++------ src/lib.rs | 12 ++++++------ src/tests.rs | 15 ++++++++++----- 4 files changed, 22 insertions(+), 18 deletions(-) diff --git a/src/buffer.rs b/src/buffer.rs index 3ba193e..a262481 100644 --- a/src/buffer.rs +++ b/src/buffer.rs @@ -97,7 +97,7 @@ impl Write for ExtBuf<'_> { // wrap around adjustment of pointer self.header = header; // lock the new memory region - self.header.set(); + self.header.lock(); self.initialized = 0; // copy existing data from tail to new memory region let _ = self.write(old)?; diff --git a/src/header.rs b/src/header.rs index 361be4f..8673f84 100644 --- a/src/header.rs +++ b/src/header.rs @@ -7,13 +7,13 @@ use crate::USIZELEN; pub(crate) struct Header(pub(crate) *mut usize); /// Allocation guard, used through its Drop implementation, which /// unlocks (releases) the memory region back to allocator -pub(crate) struct Guard(&'static mut usize); +pub(crate) struct Guard(&'static AtomicUsize); impl Header { /// lock the memory region, preventing allocator /// from reusing it until it's unlocked #[inline(always)] - pub(crate) fn set(&self) { + pub(crate) fn lock(&self) { unsafe { *self.0 |= 1 } } @@ -35,7 +35,7 @@ impl Header { #[inline(always)] pub(crate) fn available(&self) -> bool { let atomic = self.0 as *const AtomicUsize; - (unsafe { &*atomic }).load(Ordering::Acquire) & 1 == 0 + (unsafe { &*atomic }).load(Ordering::Relaxed) & 1 == 0 } /// distance (in `size_of()`) to given guard @@ -64,7 +64,7 @@ impl Header { impl From
for Guard { #[inline(always)] fn from(value: Header) -> Self { - Self(unsafe { &mut *value.0 }) + Self(unsafe { &*(value.0 as *const AtomicUsize) }) } } @@ -83,7 +83,6 @@ impl Drop for Guard { // The caller of `drop` is the sole entity capable of writing to the region at this point // in the execution timeline. Importantly, the allocator is restricted from write access // the region until it is released. - let atomic = self.0 as *mut usize as *const AtomicUsize; - unsafe { &*atomic }.fetch_and(usize::MAX << 1, Ordering::Relaxed); + self.0.fetch_and(usize::MAX << 1, Ordering::Relaxed); } } diff --git a/src/lib.rs b/src/lib.rs index bc74178..73eead3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -164,7 +164,7 @@ impl RingAl { /// /// NOTE: not entire capacity will be available for allocation due to guard headers being part /// of allocations and taking up space as well - pub fn new_with_align(mut size: usize, align: usize) -> Self { + fn new_with_align(mut size: usize, align: usize) -> Self { assert!(size > USIZELEN && size < usize::MAX >> 1 && align.is_power_of_two()); size = size.next_power_of_two(); let inner = unsafe { alloc(Layout::from_size_align_unchecked(size, align)) }; @@ -188,7 +188,7 @@ impl RingAl { #[inline(always)] pub fn fixed(&mut self, size: usize) -> Option { let header = self.alloc(size, USIZEALIGN)?; - header.set(); + header.lock(); let ptr = header.buffer(); let capacity = header.capacity(); let inner = unsafe { std::slice::from_raw_parts_mut(ptr, capacity) }; @@ -208,7 +208,7 @@ impl RingAl { /// prevents any further allocations while this buffer is around pub fn extendable(&mut self, size: usize) -> Option> { let header = self.alloc(size, USIZEALIGN)?; - header.set(); + header.lock(); Some(ExtBuf { header, initialized: 0, @@ -222,7 +222,7 @@ impl RingAl { let tsize = size_of::(); let size = count * tsize; let header = self.alloc(size, align_of::())?; - header.set(); + header.lock(); let buffer = header.buffer(); let offset = buffer.align_offset(align_of::()); let inner = unsafe { buffer.add(offset) } as *mut T; @@ -340,7 +340,7 @@ impl Drop for RingAl { loop { // Busy wait until the current allocation is marked as available if !next.available() { - std::thread::sleep(Duration::from_millis(100)); + std::thread::sleep(Duration::from_millis(50)); continue; } // Reconstruct the original capacity used for the backing store @@ -360,7 +360,7 @@ impl Drop for RingAl { let inner = next.0; head = head.min(inner) } - let layout = unsafe { Layout::from_size_align_unchecked(capacity, 64) }; + let layout = unsafe { Layout::from_size_align_unchecked(capacity, DEFAULT_ALIGNMENT) }; // SAFETY: // 1. All pointers are guaranteed to lie within the original backing store. // 2. The initial slice length has been accurately recalculated. diff --git a/src/tests.rs b/src/tests.rs index 02bd134..86204fc 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -5,7 +5,7 @@ use std::{ io::Write, mem::transmute, sync::mpsc::sync_channel, - time::Instant, + time::{Duration, Instant}, }; use crate::{ @@ -87,7 +87,7 @@ fn test_multi_alloc() { "should have enough capacity for all allocations" ); let header = header.unwrap(); - header.set(); + header.lock(); allocations.push(header.into()); } let header = ringal.alloc(SIZE / COUNT - USIZELEN, USIZEALIGN); @@ -110,7 +110,7 @@ fn test_continuous_alloc() { let header = ringal.alloc(size as usize, USIZEALIGN); assert!(header.is_some(), "should always have capacity"); let header = header.unwrap(); - header.set(); + header.lock(); allocations.push_back(header.into()); if allocations.len() == 4 { allocations.pop_front(); @@ -299,10 +299,11 @@ fn test_fixed_buf_continuous_alloc_multi_thread() { let (mut ringal, _g) = setup!(); let mut string = Vec::::new(); let mut threads = Vec::with_capacity(4); + let mut handles = Vec::with_capacity(4); const ITERS: usize = 8192; for i in 0..4 { let (tx, rx) = sync_channel::(2); - std::thread::spawn(move || { + let h = std::thread::spawn(move || { let mut number = 0; while let Ok(s) = rx.recv() { number += s.len() * i; @@ -310,6 +311,7 @@ fn test_fixed_buf_continuous_alloc_multi_thread() { number }); threads.push(tx); + handles.push(h); } for i in 0..ITERS { let random = unsafe { transmute::(Instant::now()).0 }; @@ -335,6 +337,10 @@ fn test_fixed_buf_continuous_alloc_multi_thread() { tx.send(buffer.clone()).unwrap(); } } + drop(threads); + for h in handles { + let _ = h.join(); + } } #[cfg(feature = "tls")] @@ -422,7 +428,6 @@ macro_rules! test_generic_buf { "buffer should swap remove all pushed elements" ); let removed = &elem.unwrap().i; - println!("{} removing {removed}", stringify!($int)); assert!(indices.remove(&removed)); } assert_eq!(buffer.len(), 0);