Skip to content

Commit

Permalink
address review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
orlp committed Feb 26, 2024
1 parent b2b441a commit 445813e
Showing 1 changed file with 41 additions and 42 deletions.
83 changes: 41 additions & 42 deletions src/smallsort.rs
Original file line number Diff line number Diff line change
Expand Up @@ -147,53 +147,46 @@ impl<T> Drop for CopyOnDrop<T> {
}
}

struct GapGuard<T> {
pos: *mut T,
value: ManuallyDrop<T>,
}

impl<T> Drop for GapGuard<T> {
fn drop(&mut self) {
unsafe {
ptr::copy_nonoverlapping(&*self.value, self.pos, 1);
}
}
}

/// Sorts range [begin, tail] assuming [begin, tail) is already sorted.
///
///
/// # Safety
/// begin < tail and p must be valid and initialized for all begin <= p <= tail.
unsafe fn insert_tail<T, F: FnMut(&T, &T) -> bool>(begin: *mut T, tail: *mut T, is_less: &mut F) {
// SAFETY: in-bounds as tail > begin.
let mut sift = tail.sub(1);
if !is_less(&*tail, &*sift) {
return;
}

// SAFETY: after this read tail is never read from again, as we only ever
// read from sift, sift < tail and we only ever decrease sift. Thus this is
// effectively a move, not a copy. Should a panic occur, or we have found
// the correct insertion position, gap_guard ensures the element is moved
// back into the array.
let tmp = ManuallyDrop::new(tail.read());
let mut gap_guard = CopyOnDrop { src: &*tmp, dst: tail, len: 1 };

loop {
// SAFETY: we move sift into the gap (which is valid), and point the
// gap guard destination at sift, ensuring that if a panic occurs the
// gap is once again filled.
ptr::copy_nonoverlapping(sift, gap_guard.dst, 1);
gap_guard.dst = sift;

if sift == begin {
break;
unsafe {
// SAFETY: in-bounds as tail > begin.
let mut sift = tail.sub(1);
if !is_less(&*tail, &*sift) {
return;
}

// SAFETY: we checked that sift != begin, thus this is in-bounds.
sift = sift.sub(1);
if !is_less(&tmp, &*sift) {
break;
// SAFETY: after this read tail is never read from again, as we only ever
// read from sift, sift < tail and we only ever decrease sift. Thus this is
// effectively a move, not a copy. Should a panic occur, or we have found
// the correct insertion position, gap_guard ensures the element is moved
// back into the array.
let tmp = ManuallyDrop::new(tail.read());
let mut gap_guard = CopyOnDrop {
src: &*tmp,
dst: tail,
len: 1,
};

loop {
// SAFETY: we move sift into the gap (which is valid), and point the
// gap guard destination at sift, ensuring that if a panic occurs the
// gap is once again filled.
ptr::copy_nonoverlapping(sift, gap_guard.dst, 1);
gap_guard.dst = sift;

if sift == begin {
break;
}

// SAFETY: we checked that sift != begin, thus this is in-bounds.
sift = sift.sub(1);
if !is_less(&tmp, &*sift) {
break;
}
}
}
}
Expand All @@ -208,15 +201,21 @@ pub fn insertion_sort_shift_left<T, F: FnMut(&T, &T) -> bool>(
if offset == 0 || offset > len {
intrinsics::abort();
}

unsafe {
// We write this basic loop directly using pointers, as when we use a
// for loop LLVM likes to unroll this loop which we do not want.
// SAFETY: v_end is the one-past-end pointer, and we checked that
// offset <= len, thus tail is also in-bounds.
let v_base = v.as_mut_ptr();
let v_end = v_base.add(len);
let mut tail = v_base.add(offset);
while tail != v_end {
// SAFETY: v_base and tail are both valid pointers to elements, and
// v_base < tail since we checked offset != 0.
insert_tail(v_base, tail, is_less);

// SAFETY: we checked that tail is not yet the one-past-end pointer.
tail = tail.add(1);
}
}
Expand Down

0 comments on commit 445813e

Please sign in to comment.