diff --git a/src/arc.rs b/src/arc.rs index d95b247..601dcab 100644 --- a/src/arc.rs +++ b/src/arc.rs @@ -12,10 +12,9 @@ use core::marker::PhantomData; use core::mem::{ManuallyDrop, MaybeUninit}; use core::ops::Deref; use core::panic::{RefUnwindSafe, UnwindSafe}; -use core::ptr::{self, NonNull}; +use core::ptr::{addr_of_mut, self, NonNull}; use core::sync::atomic; use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; - #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; #[cfg(feature = "stable_deref_trait")] @@ -356,6 +355,10 @@ impl Arc { /// The function `mem_to_arcinner` is called with the data pointer /// and must return back a (potentially fat)-pointer for the `ArcInner`. /// + /// This function initializes the reference count, but the caller is + /// responsible for initializing `inner_ptr.data` after `inner_ptr` is + /// returned from this function. + /// /// ## Safety /// /// `mem_to_arcinner` must return the same pointer, the only things that can change are @@ -363,72 +366,54 @@ impl Arc { /// - its metadata /// /// `value_layout` must be correct for `T`. - #[allow(unused_unsafe)] pub(super) unsafe fn allocate_for_layout( value_layout: Layout, mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner, ) -> NonNull> { - let layout = Layout::new::>() - .extend(value_layout) - .unwrap() - .0 - .pad_to_align(); - - // Safety: we propagate safety requirements to the caller - unsafe { - Arc::try_allocate_for_layout(value_layout, mem_to_arcinner) - .unwrap_or_else(|_| handle_alloc_error(layout)) - } - } - - /// Allocates an `ArcInner` with sufficient space for - /// a possibly-unsized inner value where the value has the layout provided, - /// returning an error if allocation fails. - /// - /// The function `mem_to_arcinner` is called with the data pointer - /// and must return back a (potentially fat)-pointer for the `ArcInner`. - /// - /// ## Safety - /// - /// `mem_to_arcinner` must return the same pointer, the only things that can change are - /// - its type - /// - its metadata - /// - /// `value_layout` must be correct for `T`. - #[allow(unused_unsafe)] - unsafe fn try_allocate_for_layout( - value_layout: Layout, - mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner, - ) -> Result>, ()> { - let layout = Layout::new::>() - .extend(value_layout) - .unwrap() - .0 - .pad_to_align(); - - let ptr = NonNull::new(alloc::alloc::alloc(layout)).ok_or(())?; - - // Initialize the ArcInner - let inner = mem_to_arcinner(ptr.as_ptr()); - debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout); - unsafe { - ptr::write(&mut (*inner).count, atomic::AtomicUsize::new(1)); + // Safety + + // 1. Caller ensures that value_layout is the layout of T + // 2. ArcInner is repr(C) + // 3. Thus, full_layout is layout of ArcInner + let full_layout = Layout::new::>() + .extend(value_layout) + .expect("layout too big") + .0 + .pad_to_align(); + + // ArcInner never has a zero size + let ptr = alloc::alloc::alloc(full_layout); + if ptr.is_null() { + handle_alloc_error(full_layout) + } else { + // Form the ArcInner pointer by adding type/metadata + // mem_to_arcinner keeps the same pointer (caller safety condition) + let inner_ptr = mem_to_arcinner(ptr); + // Initialize the reference count + ptr::write(addr_of_mut!((*inner_ptr).count), atomic::AtomicUsize::new(1)); + // Pointer stays non-null + NonNull::new_unchecked(inner_ptr) + } } - - // Safety: `ptr` is checked to be non-null, - // `inner` is the same as `ptr` (per the safety requirements of this function) - unsafe { Ok(NonNull::new_unchecked(inner)) } } } impl Arc> { + /// Allocates the arc inner for a slice DST type. + /// + /// The `len` argument provides the length of the tail. This function initializes the + /// reference count, but the caller is responsible for initializing the data inside + /// `inner_ptr.data` where `inner_ptr` is the pointer returned by this function. pub(super) fn allocate_for_header_and_slice( len: usize, ) -> NonNull>> { - let layout = Layout::new::() - .extend(Layout::array::(len).unwrap()) - .unwrap() + let layout = Layout::array::(len) + .and_then(|tail_layout| { + let header_layout = Layout::new::(); + header_layout.extend(tail_layout) + }) + .expect("Requested size too big") .0 .pad_to_align(); diff --git a/src/header.rs b/src/header.rs index a7f7ac8..ede7c76 100644 --- a/src/header.rs +++ b/src/header.rs @@ -5,7 +5,7 @@ use alloc::vec::Vec; use core::cmp::Ordering; use core::iter::{ExactSizeIterator, Iterator}; use core::marker::PhantomData; -use core::mem::{self, ManuallyDrop}; +use core::mem::ManuallyDrop; use core::ptr::{self, addr_of_mut}; use crate::OffsetArc; @@ -27,12 +27,14 @@ pub struct HeaderSlice { impl Arc> { /// Creates an Arc for a HeaderSlice using the given header struct and /// iterator to generate the slice. The resulting Arc will be fat. + /// + /// **Panics** if the iterator yields a different number of elements than + /// reported, or if the iterator itself panicked. In either case, the + /// memory is leaked. pub fn from_header_and_iter(header: H, mut items: I) -> Self where I: Iterator + ExactSizeIterator, { - assert_ne!(mem::size_of::(), 0, "Need to think about ZST"); - let num_items = items.len(); let inner = Arc::allocate_for_header_and_slice(num_items); @@ -42,16 +44,17 @@ impl Arc> { // // Note that any panics here (i.e. from the iterator) are safe, since // we'll just leak the uninitialized memory. - ptr::write(&mut ((*inner.as_ptr()).data.header), header); - let mut current = (*inner.as_ptr()).data.slice.as_mut_ptr(); + ptr::write(addr_of_mut!((*inner.as_ptr()).data.header), header); + let mut current = addr_of_mut!((*inner.as_ptr()).data.slice) as *mut T; for _ in 0..num_items { + // ZST writes are a no-op, but we still check iterator length ptr::write( current, items .next() .expect("ExactSizeIterator over-reported length"), ); - current = current.offset(1); + current = current.add(1); } assert!( items.next().is_none(), @@ -67,22 +70,25 @@ impl Arc> { } /// Creates an Arc for a HeaderSlice using the given header struct and - /// iterator to generate the slice. The resulting Arc will be fat. + /// slice of copyable items. The items will be copied into the resulting + /// Arc, which will be fat. pub fn from_header_and_slice(header: H, items: &[T]) -> Self where T: Copy, { - assert_ne!(mem::size_of::(), 0, "Need to think about ZST"); - let num_items = items.len(); let inner = Arc::allocate_for_header_and_slice(num_items); unsafe { - // Write the data. - ptr::write(&mut ((*inner.as_ptr()).data.header), header); - let dst = (*inner.as_ptr()).data.slice.as_mut_ptr(); - ptr::copy_nonoverlapping(items.as_ptr(), dst, num_items); + // Safety + // Header is valid (just allocated) + ptr::write(addr_of_mut!((*inner.as_ptr()).data.header), header); + + // dst points to `num_items` of uninitialized T's + // T: Copy makes bytewise copying safe + let dst: *mut [T] = addr_of_mut!((*inner.as_ptr()).data.slice); + ptr::copy_nonoverlapping(items.as_ptr(), dst as *mut T, num_items); } // Safety: ptr is valid & the inner structure is fully initialized