Skip to content

Commit e3ec48d

Browse files
authored
Generic allocations (#12)
chore: cleanup + improve tests
1 parent 41883a9 commit e3ec48d

File tree

4 files changed

+22
-18
lines changed

4 files changed

+22
-18
lines changed

src/buffer.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ impl Write for ExtBuf<'_> {
9797
// wrap around adjustment of pointer
9898
self.header = header;
9999
// lock the new memory region
100-
self.header.set();
100+
self.header.lock();
101101
self.initialized = 0;
102102
// copy existing data from tail to new memory region
103103
let _ = self.write(old)?;

src/header.rs

+5-6
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@ use crate::USIZELEN;
77
pub(crate) struct Header(pub(crate) *mut usize);
88
/// Allocation guard, used through its Drop implementation, which
99
/// unlocks (releases) the memory region back to allocator
10-
pub(crate) struct Guard(&'static mut usize);
10+
pub(crate) struct Guard(&'static AtomicUsize);
1111

1212
impl Header {
1313
/// lock the memory region, preventing allocator
1414
/// from reusing it until it's unlocked
1515
#[inline(always)]
16-
pub(crate) fn set(&self) {
16+
pub(crate) fn lock(&self) {
1717
unsafe { *self.0 |= 1 }
1818
}
1919

@@ -35,7 +35,7 @@ impl Header {
3535
#[inline(always)]
3636
pub(crate) fn available(&self) -> bool {
3737
let atomic = self.0 as *const AtomicUsize;
38-
(unsafe { &*atomic }).load(Ordering::Acquire) & 1 == 0
38+
(unsafe { &*atomic }).load(Ordering::Relaxed) & 1 == 0
3939
}
4040

4141
/// distance (in `size_of<usize>()`) to given guard
@@ -64,7 +64,7 @@ impl Header {
6464
impl From<Header> for Guard {
6565
#[inline(always)]
6666
fn from(value: Header) -> Self {
67-
Self(unsafe { &mut *value.0 })
67+
Self(unsafe { &*(value.0 as *const AtomicUsize) })
6868
}
6969
}
7070

@@ -83,7 +83,6 @@ impl Drop for Guard {
8383
// The caller of `drop` is the sole entity capable of writing to the region at this point
8484
// in the execution timeline. Importantly, the allocator is restricted from write access
8585
// the region until it is released.
86-
let atomic = self.0 as *mut usize as *const AtomicUsize;
87-
unsafe { &*atomic }.fetch_and(usize::MAX << 1, Ordering::Relaxed);
86+
self.0.fetch_and(usize::MAX << 1, Ordering::Relaxed);
8887
}
8988
}

src/lib.rs

+6-6
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ impl RingAl {
164164
///
165165
/// NOTE: not entire capacity will be available for allocation due to guard headers being part
166166
/// of allocations and taking up space as well
167-
pub fn new_with_align(mut size: usize, align: usize) -> Self {
167+
fn new_with_align(mut size: usize, align: usize) -> Self {
168168
assert!(size > USIZELEN && size < usize::MAX >> 1 && align.is_power_of_two());
169169
size = size.next_power_of_two();
170170
let inner = unsafe { alloc(Layout::from_size_align_unchecked(size, align)) };
@@ -188,7 +188,7 @@ impl RingAl {
188188
#[inline(always)]
189189
pub fn fixed(&mut self, size: usize) -> Option<FixedBufMut> {
190190
let header = self.alloc(size, USIZEALIGN)?;
191-
header.set();
191+
header.lock();
192192
let ptr = header.buffer();
193193
let capacity = header.capacity();
194194
let inner = unsafe { std::slice::from_raw_parts_mut(ptr, capacity) };
@@ -208,7 +208,7 @@ impl RingAl {
208208
/// prevents any further allocations while this buffer is around
209209
pub fn extendable(&mut self, size: usize) -> Option<ExtBuf<'_>> {
210210
let header = self.alloc(size, USIZEALIGN)?;
211-
header.set();
211+
header.lock();
212212
Some(ExtBuf {
213213
header,
214214
initialized: 0,
@@ -222,7 +222,7 @@ impl RingAl {
222222
let tsize = size_of::<T>();
223223
let size = count * tsize;
224224
let header = self.alloc(size, align_of::<T>())?;
225-
header.set();
225+
header.lock();
226226
let buffer = header.buffer();
227227
let offset = buffer.align_offset(align_of::<T>());
228228
let inner = unsafe { buffer.add(offset) } as *mut T;
@@ -340,7 +340,7 @@ impl Drop for RingAl {
340340
loop {
341341
// Busy wait until the current allocation is marked as available
342342
if !next.available() {
343-
std::thread::sleep(Duration::from_millis(100));
343+
std::thread::sleep(Duration::from_millis(50));
344344
continue;
345345
}
346346
// Reconstruct the original capacity used for the backing store
@@ -360,7 +360,7 @@ impl Drop for RingAl {
360360
let inner = next.0;
361361
head = head.min(inner)
362362
}
363-
let layout = unsafe { Layout::from_size_align_unchecked(capacity, 64) };
363+
let layout = unsafe { Layout::from_size_align_unchecked(capacity, DEFAULT_ALIGNMENT) };
364364
// SAFETY:
365365
// 1. All pointers are guaranteed to lie within the original backing store.
366366
// 2. The initial slice length has been accurately recalculated.

src/tests.rs

+10-5
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ use std::{
55
io::Write,
66
mem::transmute,
77
sync::mpsc::sync_channel,
8-
time::Instant,
8+
time::{Duration, Instant},
99
};
1010

1111
use crate::{
@@ -87,7 +87,7 @@ fn test_multi_alloc() {
8787
"should have enough capacity for all allocations"
8888
);
8989
let header = header.unwrap();
90-
header.set();
90+
header.lock();
9191
allocations.push(header.into());
9292
}
9393
let header = ringal.alloc(SIZE / COUNT - USIZELEN, USIZEALIGN);
@@ -110,7 +110,7 @@ fn test_continuous_alloc() {
110110
let header = ringal.alloc(size as usize, USIZEALIGN);
111111
assert!(header.is_some(), "should always have capacity");
112112
let header = header.unwrap();
113-
header.set();
113+
header.lock();
114114
allocations.push_back(header.into());
115115
if allocations.len() == 4 {
116116
allocations.pop_front();
@@ -299,17 +299,19 @@ fn test_fixed_buf_continuous_alloc_multi_thread() {
299299
let (mut ringal, _g) = setup!();
300300
let mut string = Vec::<u8>::new();
301301
let mut threads = Vec::with_capacity(4);
302+
let mut handles = Vec::with_capacity(4);
302303
const ITERS: usize = 8192;
303304
for i in 0..4 {
304305
let (tx, rx) = sync_channel::<FixedBuf>(2);
305-
std::thread::spawn(move || {
306+
let h = std::thread::spawn(move || {
306307
let mut number = 0;
307308
while let Ok(s) = rx.recv() {
308309
number += s.len() * i;
309310
}
310311
number
311312
});
312313
threads.push(tx);
314+
handles.push(h);
313315
}
314316
for i in 0..ITERS {
315317
let random = unsafe { transmute::<Instant, (usize, u32)>(Instant::now()).0 };
@@ -335,6 +337,10 @@ fn test_fixed_buf_continuous_alloc_multi_thread() {
335337
tx.send(buffer.clone()).unwrap();
336338
}
337339
}
340+
drop(threads);
341+
for h in handles {
342+
let _ = h.join();
343+
}
338344
}
339345

340346
#[cfg(feature = "tls")]
@@ -422,7 +428,6 @@ macro_rules! test_generic_buf {
422428
"buffer should swap remove all pushed elements"
423429
);
424430
let removed = &elem.unwrap().i;
425-
println!("{} removing {removed}", stringify!($int));
426431
assert!(indices.remove(&removed));
427432
}
428433
assert_eq!(buffer.len(), 0);

0 commit comments

Comments
 (0)