Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 40 additions & 54 deletions src/arc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ use core::panic::{RefUnwindSafe, UnwindSafe};
use core::ptr::{self, NonNull};
use core::sync::atomic;
use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};

use std::ptr::addr_of_mut;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "stable_deref_trait")]
Expand Down Expand Up @@ -356,79 +356,65 @@ impl<T: ?Sized> Arc<T> {
/// The function `mem_to_arcinner` is called with the data pointer
/// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
///
/// This function initializes the reference count, but the caller is
/// responsible for initializing `inner_ptr.data` after `inner_ptr` is
/// returned from this function.
///
/// ## Safety
///
/// `mem_to_arcinner` must return the same pointer, the only things that can change are
/// - its type
/// - its metadata
///
/// `value_layout` must be correct for `T`.
#[allow(unused_unsafe)]
pub(super) unsafe fn allocate_for_layout(
value_layout: Layout,
mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> NonNull<ArcInner<T>> {
let layout = Layout::new::<ArcInner<()>>()
.extend(value_layout)
.unwrap()
.0
.pad_to_align();

// Safety: we propagate safety requirements to the caller
unsafe {
Arc::try_allocate_for_layout(value_layout, mem_to_arcinner)
.unwrap_or_else(|_| handle_alloc_error(layout))
}
}

/// Allocates an `ArcInner<T>` with sufficient space for
/// a possibly-unsized inner value where the value has the layout provided,
/// returning an error if allocation fails.
///
/// The function `mem_to_arcinner` is called with the data pointer
/// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
///
/// ## Safety
///
/// `mem_to_arcinner` must return the same pointer, the only things that can change are
/// - its type
/// - its metadata
///
/// `value_layout` must be correct for `T`.
#[allow(unused_unsafe)]
unsafe fn try_allocate_for_layout(
value_layout: Layout,
mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> Result<NonNull<ArcInner<T>>, ()> {
let layout = Layout::new::<ArcInner<()>>()
.extend(value_layout)
.unwrap()
.0
.pad_to_align();

let ptr = NonNull::new(alloc::alloc::alloc(layout)).ok_or(())?;

// Initialize the ArcInner
let inner = mem_to_arcinner(ptr.as_ptr());
debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout);

unsafe {
ptr::write(&mut (*inner).count, atomic::AtomicUsize::new(1));
// Safety

// 1. Caller ensures that value_layout is the layout of T
// 2. ArcInner is repr(C)
// 3. Thus, full_layout is layout of ArcInner<T>
let full_layout = Layout::new::<ArcInner<()>>()
.extend(value_layout)
.expect("layout too big")
.0
.pad_to_align();

// ArcInner never has a zero size
let ptr = alloc::alloc::alloc(full_layout);
if ptr.is_null() {
handle_alloc_error(full_layout)
} else {
// Form the ArcInner pointer by adding type/metadata
// mem_to_arcinner keeps the same pointer (caller safety condition)
let inner_ptr = mem_to_arcinner(ptr);
// Initialize the reference count
ptr::write(addr_of_mut!((*inner_ptr).count), atomic::AtomicUsize::new(1));
// Pointer stays non-null
NonNull::new_unchecked(inner_ptr)
}
}

// Safety: `ptr` is checked to be non-null,
// `inner` is the same as `ptr` (per the safety requirements of this function)
unsafe { Ok(NonNull::new_unchecked(inner)) }
}
}

impl<H, T> Arc<HeaderSlice<H, [T]>> {
/// Allocates the arc inner for a slice DST type.
///
/// The `len` argument provides the length of the tail. This function initializes the
/// reference count, but the caller is responsible for initializing the data inside
/// `inner_ptr.data` where `inner_ptr` is the pointer returned by this function.
pub(super) fn allocate_for_header_and_slice(
len: usize,
) -> NonNull<ArcInner<HeaderSlice<H, [T]>>> {
let layout = Layout::new::<H>()
.extend(Layout::array::<T>(len).unwrap())
.unwrap()
let layout = Layout::array::<T>(len)
.and_then(|tail_layout| {
let header_layout = Layout::new::<H>();
header_layout.extend(tail_layout)
})
.expect("Requested size too big")
.0
.pad_to_align();

Expand Down
32 changes: 19 additions & 13 deletions src/header.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use alloc::vec::Vec;
use core::cmp::Ordering;
use core::iter::{ExactSizeIterator, Iterator};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop};
use core::mem::ManuallyDrop;
use core::ptr::{self, addr_of_mut};

use crate::OffsetArc;
Expand All @@ -27,12 +27,14 @@ pub struct HeaderSlice<H, T: ?Sized> {
impl<H, T> Arc<HeaderSlice<H, [T]>> {
/// Creates an Arc for a HeaderSlice using the given header struct and
/// iterator to generate the slice. The resulting Arc will be fat.
///
/// **Panics** if the iterator yields a different number of elements than
/// reported, or if the iterator itself panicked. In either case, the
/// memory is leaked.
pub fn from_header_and_iter<I>(header: H, mut items: I) -> Self
where
I: Iterator<Item = T> + ExactSizeIterator,
{
assert_ne!(mem::size_of::<T>(), 0, "Need to think about ZST");

let num_items = items.len();

let inner = Arc::allocate_for_header_and_slice(num_items);
Expand All @@ -42,16 +44,17 @@ impl<H, T> Arc<HeaderSlice<H, [T]>> {
//
// Note that any panics here (i.e. from the iterator) are safe, since
// we'll just leak the uninitialized memory.
ptr::write(&mut ((*inner.as_ptr()).data.header), header);
let mut current = (*inner.as_ptr()).data.slice.as_mut_ptr();
ptr::write(addr_of_mut!((*inner.as_ptr()).data.header), header);
let mut current = addr_of_mut!((*inner.as_ptr()).data.slice) as *mut T;
for _ in 0..num_items {
// ZST writes are a no-op, but we still check iterator length
ptr::write(
current,
items
.next()
.expect("ExactSizeIterator over-reported length"),
);
current = current.offset(1);
current = current.add(1);
}
assert!(
items.next().is_none(),
Expand All @@ -67,22 +70,25 @@ impl<H, T> Arc<HeaderSlice<H, [T]>> {
}

/// Creates an Arc for a HeaderSlice using the given header struct and
/// iterator to generate the slice. The resulting Arc will be fat.
/// slice of copyable items. The items will be copied into the resulting
/// Arc, which will be fat.
pub fn from_header_and_slice(header: H, items: &[T]) -> Self
where
T: Copy,
{
assert_ne!(mem::size_of::<T>(), 0, "Need to think about ZST");

let num_items = items.len();

let inner = Arc::allocate_for_header_and_slice(num_items);

unsafe {
// Write the data.
ptr::write(&mut ((*inner.as_ptr()).data.header), header);
let dst = (*inner.as_ptr()).data.slice.as_mut_ptr();
ptr::copy_nonoverlapping(items.as_ptr(), dst, num_items);
// Safety
// Header is valid (just allocated)
ptr::write(addr_of_mut!((*inner.as_ptr()).data.header), header);

// dst points to `num_items` of uninitialized T's
// T: Copy makes bytewise copying safe
let dst: *mut [T] = addr_of_mut!((*inner.as_ptr()).data.slice);
ptr::copy_nonoverlapping(items.as_ptr(), dst as *mut T, num_items);
}

// Safety: ptr is valid & the inner structure is fully initialized
Expand Down
Loading