|
| 1 | +use core::mem::{self, MaybeUninit}; |
| 2 | +use core::ptr; |
| 3 | +use core::sync::atomic::Ordering::*; |
| 4 | + |
| 5 | +use crossbeam_epoch::{Atomic, Owned, Shared}; |
| 6 | + |
| 7 | +/// Treiber's lock-free stack. |
| 8 | +/// |
| 9 | +/// Usable with any number of producers and consumers. |
| 10 | +#[derive(Debug)] |
| 11 | +pub struct Stack<T> { |
| 12 | + head: Atomic<Node<T>>, |
| 13 | +} |
| 14 | + |
| 15 | +#[derive(Debug)] |
| 16 | +struct Node<T> { |
| 17 | + // MaybeUninit as the data may be taken out of the node. |
| 18 | + // TODO: fix the slides to sync with this. |
| 19 | + data: MaybeUninit<T>, |
| 20 | + next: *const Node<T>, |
| 21 | +} |
| 22 | + |
| 23 | +// Any particular `T` should never be accessed concurrently, so no need for `Sync`. |
| 24 | +unsafe impl<T: Send> Send for Stack<T> {} |
| 25 | +unsafe impl<T: Send> Sync for Stack<T> {} |
| 26 | + |
| 27 | +impl<T> Default for Stack<T> { |
| 28 | + fn default() -> Self { |
| 29 | + Self::new() |
| 30 | + } |
| 31 | +} |
| 32 | + |
| 33 | +impl<T> Stack<T> { |
| 34 | + /// Creates a new, empty stack. |
| 35 | + pub fn new() -> Stack<T> { |
| 36 | + Self { |
| 37 | + head: Atomic::null(), |
| 38 | + } |
| 39 | + } |
| 40 | + |
| 41 | + /// Pushes a value on top of the stack. |
| 42 | + pub fn push(&self, t: T) { |
| 43 | + let mut node = Owned::new(Node { |
| 44 | + data: MaybeUninit::new(t), |
| 45 | + next: ptr::null(), |
| 46 | + }); |
| 47 | + |
| 48 | + // SAFETY: We don't dereference any pointers obtained from this guard. |
| 49 | + let guard = unsafe { crossbeam_epoch::unprotected() }; |
| 50 | + |
| 51 | + let mut head = self.head.load(Relaxed, guard); |
| 52 | + loop { |
| 53 | + node.next = head.as_raw(); |
| 54 | + |
| 55 | + match self |
| 56 | + .head |
| 57 | + .compare_exchange(head, node, Release, Relaxed, guard) |
| 58 | + { |
| 59 | + Ok(_) => break, |
| 60 | + Err(e) => { |
| 61 | + head = e.current; |
| 62 | + node = e.new; |
| 63 | + } |
| 64 | + } |
| 65 | + } |
| 66 | + } |
| 67 | + |
| 68 | + /// Attempts to pop the top element from the stack. |
| 69 | + /// |
| 70 | + /// Returns `None` if the stack is empty. |
| 71 | + pub fn pop(&self) -> Option<T> { |
| 72 | + let mut guard = crossbeam_epoch::pin(); |
| 73 | + |
| 74 | + loop { |
| 75 | + let head = self.head.load(Acquire, &guard); |
| 76 | + let h = unsafe { head.as_ref() }?; |
| 77 | + let next = Shared::from(h.next); |
| 78 | + |
| 79 | + if self |
| 80 | + .head |
| 81 | + .compare_exchange(head, next, Relaxed, Relaxed, &guard) |
| 82 | + .is_ok() |
| 83 | + { |
| 84 | + // Since the above `compare_exchange()` succeeded, `head` is detached from |
| 85 | + // `self` so is unreachable from other threads. |
| 86 | + |
| 87 | + // SAFETY: We are returning ownership of `data` in `head` by making a copy of it via |
| 88 | + // `assume_init_read()`. This is safe as no other thread has access to `data` after |
| 89 | + // `head` is unreachable, so the ownership of `data` in `head` will never be used |
| 90 | + // again. |
| 91 | + let result = unsafe { h.data.assume_init_read() }; |
| 92 | + |
| 93 | + // SAFETY: `head` is unreachable, and we no longer access `head`. |
| 94 | + unsafe { guard.defer_destroy(head) }; |
| 95 | + |
| 96 | + return Some(result); |
| 97 | + } |
| 98 | + |
| 99 | + // Repin to ensure the global epoch can make progress. |
| 100 | + guard.repin(); |
| 101 | + } |
| 102 | + } |
| 103 | + |
| 104 | + /// Returns `true` if the stack is empty. |
| 105 | + pub fn is_empty(&self) -> bool { |
| 106 | + let guard = crossbeam_epoch::pin(); |
| 107 | + self.head.load(Acquire, &guard).is_null() |
| 108 | + } |
| 109 | +} |
| 110 | + |
| 111 | +impl<T> Drop for Stack<T> { |
| 112 | + fn drop(&mut self) { |
| 113 | + let mut o_curr = mem::take(&mut self.head); |
| 114 | + |
| 115 | + // SAFETY: All non-null nodes made were valid, and we have unique ownership via `&mut self`. |
| 116 | + while let Some(curr) = unsafe { o_curr.try_into_owned() }.map(Owned::into_box) { |
| 117 | + drop(unsafe { curr.data.assume_init() }); |
| 118 | + o_curr = curr.next.into(); |
| 119 | + } |
| 120 | + } |
| 121 | +} |
| 122 | + |
| 123 | +#[cfg(test)] |
| 124 | +mod test { |
| 125 | + use std::thread::scope; |
| 126 | + |
| 127 | + use super::*; |
| 128 | + |
| 129 | + #[test] |
| 130 | + fn push() { |
| 131 | + let stack = Stack::new(); |
| 132 | + |
| 133 | + scope(|scope| { |
| 134 | + for _ in 0..10 { |
| 135 | + scope.spawn(|| { |
| 136 | + for i in 0..10_000 { |
| 137 | + stack.push(i); |
| 138 | + assert!(stack.pop().is_some()); |
| 139 | + } |
| 140 | + }); |
| 141 | + } |
| 142 | + }); |
| 143 | + |
| 144 | + assert!(stack.is_empty()); |
| 145 | + } |
| 146 | +} |
0 commit comments