diff --git a/src/lib.rs b/src/lib.rs index 90d3657..a504f05 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1410,3 +1410,6 @@ pub mod sync { #[cfg(feature = "race")] pub mod race; + +#[cfg(feature = "race")] +pub mod racerelaxed; diff --git a/src/racerelaxed.rs b/src/racerelaxed.rs new file mode 100644 index 0000000..a9f2e41 --- /dev/null +++ b/src/racerelaxed.rs @@ -0,0 +1,148 @@ +//! Thread-safe, non-blocking, "first one wins" flavor of `OnceCell` *without* +//! Acquire/Release semantics. +//! +//! If two threads race to initialize a type from the `race` module, they +//! don't block, execute initialization function together, but only one of +//! them stores the result. +//! +//! This module does not require `std` feature. +//! +//! # Atomic orderings +//! +//! All types in this module use `Relaxed` [atomic orderings](Ordering) for all +//! their operations. Any side-effects caused by the setter thread prior to them +//! calling `set` or `get_or_init` will *NOT* necessarily be made visible from +//! the getter thread's perspective. + +#[cfg(not(feature = "portable-atomic"))] +use core::sync::atomic; +#[cfg(feature = "portable-atomic")] +use portable_atomic as atomic; + +use atomic::{AtomicUsize, Ordering}; +use core::num::NonZeroUsize; + +/// A thread-safe cell which can be written to only once, *without* +/// Acquire/Release semantics. +#[derive(Default, Debug)] +pub struct OnceNonZeroUsizeRelaxed { + inner: AtomicUsize, +} + +impl OnceNonZeroUsizeRelaxed { + /// Creates a new empty cell. + #[inline] + pub const fn new() -> Self { + Self { inner: AtomicUsize::new(0) } + } + + /// Gets the underlying value. + #[inline] + pub fn get(&self) -> Option { + let val = self.inner.load(Ordering::Relaxed); + NonZeroUsize::new(val) + } + + /// Get the reference to the underlying value, without checking if the cell + /// is initialized. + /// + /// # Safety + /// + /// Caller must ensure that the cell is in initialized state, and that + /// the contents are acquired by (synchronized to) this thread. + pub unsafe fn get_unchecked(&self) -> NonZeroUsize { + #[inline(always)] + fn as_const_ptr(r: &AtomicUsize) -> *const usize { + use core::mem::align_of; + + let p: *const AtomicUsize = r; + // SAFETY: "This type has the same size and bit validity as + // the underlying integer type, usize. However, the alignment of + // this type is always equal to its size, even on targets where + // usize has a lesser alignment." + const _ALIGNMENT_COMPATIBLE: () = + assert!(align_of::() % align_of::() == 0); + p.cast::() + } + + // TODO(MSRV-1.70): Use `AtomicUsize::as_ptr().cast_const()` + // See https://github.com/rust-lang/rust/issues/138246. + let p = as_const_ptr(&self.inner); + + // SAFETY: The caller is responsible for ensuring that the value + // was initialized and that the contents have been acquired by + // this thread. Assuming that, we can assume there will be no + // conflicting writes to the value since the value will never + // change once initialized. This relies on the statement in + // https://doc.rust-lang.org/1.83.0/core/sync/atomic/ that "(A + // `compare_exchange` or `compare_exchange_weak` that does not + // succeed is not considered a write." + let val = unsafe { p.read() }; + + // SAFETY: The caller is responsible for ensuring the value is + // initialized and thus not zero. + unsafe { NonZeroUsize::new_unchecked(val) } + } + + /// Sets the contents of this cell to `value`. + /// + /// Returns `Ok(())` if the cell was empty and `Err(())` if it was + /// full. + #[inline] + pub fn set(&self, value: NonZeroUsize) -> Result<(), ()> { + match self.compare_exchange(value) { + Ok(_) => Ok(()), + Err(_) => Err(()), + } + } + + /// Gets the contents of the cell, initializing it with `f` if the cell was + /// empty. + /// + /// If several threads concurrently run `get_or_init`, more than one `f` can + /// be called. However, all threads will return the same value, produced by + /// some `f`. + pub fn get_or_init(&self, f: F) -> NonZeroUsize + where + F: FnOnce() -> NonZeroUsize, + { + enum Void {} + match self.get_or_try_init(|| Ok::(f())) { + Ok(val) => val, + Err(void) => match void {}, + } + } + + /// Gets the contents of the cell, initializing it with `f` if + /// the cell was empty. If the cell was empty and `f` failed, an + /// error is returned. + /// + /// If several threads concurrently run `get_or_init`, more than one `f` can + /// be called. However, all threads will return the same value, produced by + /// some `f`. + pub fn get_or_try_init(&self, f: F) -> Result + where + F: FnOnce() -> Result, + { + match self.get() { + Some(it) => Ok(it), + None => self.init(f), + } + } + + #[cold] + #[inline(never)] + fn init(&self, f: impl FnOnce() -> Result) -> Result { + let nz = f()?; + let mut val = nz.get(); + if let Err(old) = self.compare_exchange(nz) { + val = old; + } + Ok(unsafe { NonZeroUsize::new_unchecked(val) }) + } + + #[inline(always)] + fn compare_exchange(&self, val: NonZeroUsize) -> Result { + self.inner.compare_exchange(0, val.get(), Ordering::Relaxed, Ordering::Relaxed) + } +} diff --git a/tests/it/main.rs b/tests/it/main.rs index b8e56cc..9288692 100644 --- a/tests/it/main.rs +++ b/tests/it/main.rs @@ -8,5 +8,9 @@ mod sync_lazy; #[cfg(feature = "race")] mod race; + +#[cfg(feature = "race")] +mod racerelaxed; + #[cfg(all(feature = "race", feature = "alloc"))] mod race_once_box; diff --git a/tests/it/race.rs b/tests/it/race.rs index 3effff5..9da6806 100644 --- a/tests/it/race.rs +++ b/tests/it/race.rs @@ -91,7 +91,7 @@ fn once_non_zero_usize_first_wins() { fn once_bool_smoke_test() { let cnt = AtomicUsize::new(0); let cell = OnceBool::new(); - scope(|s| { + scope(|s| { s.spawn(|| { assert_eq!( cell.get_or_init(|| { diff --git a/tests/it/racerelaxed.rs b/tests/it/racerelaxed.rs new file mode 100644 index 0000000..bde0380 --- /dev/null +++ b/tests/it/racerelaxed.rs @@ -0,0 +1,96 @@ +#[cfg(feature = "std")] +use std::sync::Barrier; +use std::{ + num::NonZeroUsize, + sync::atomic::{AtomicUsize, Ordering::SeqCst}, + thread::scope, +}; + +use once_cell::racerelaxed::OnceNonZeroUsizeRelaxed; + +#[test] +fn once_non_zero_usize_smoke_test() { + let cnt = AtomicUsize::new(0); + let cell = OnceNonZeroUsizeRelaxed::new(); + let val = NonZeroUsize::new(92).unwrap(); + scope(|s| { + s.spawn(|| { + assert_eq!( + cell.get_or_init(|| { + cnt.fetch_add(1, SeqCst); + val + }), + val + ); + assert_eq!(cnt.load(SeqCst), 1); + + assert_eq!( + cell.get_or_init(|| { + cnt.fetch_add(1, SeqCst); + val + }), + val + ); + assert_eq!(cnt.load(SeqCst), 1); + }); + }); + assert_eq!(cell.get(), Some(val)); + assert_eq!(cnt.load(SeqCst), 1); +} + +#[test] +fn once_non_zero_usize_set() { + let val1 = NonZeroUsize::new(92).unwrap(); + let val2 = NonZeroUsize::new(62).unwrap(); + + let cell = OnceNonZeroUsizeRelaxed::new(); + + assert!(cell.set(val1).is_ok()); + assert_eq!(cell.get(), Some(val1)); + + assert!(cell.set(val2).is_err()); + assert_eq!(cell.get(), Some(val1)); +} + +#[cfg(feature = "std")] +#[test] +fn once_non_zero_usize_first_wins() { + let val1 = NonZeroUsize::new(92).unwrap(); + let val2 = NonZeroUsize::new(62).unwrap(); + + let cell = OnceNonZeroUsizeRelaxed::new(); + + let b1 = Barrier::new(2); + let b2 = Barrier::new(2); + let b3 = Barrier::new(2); + scope(|s| { + s.spawn(|| { + let r1 = cell.get_or_init(|| { + b1.wait(); + b2.wait(); + val1 + }); + assert_eq!(r1, val1); + b3.wait(); + }); + b1.wait(); + s.spawn(|| { + let r2 = cell.get_or_init(|| { + b2.wait(); + b3.wait(); + val2 + }); + assert_eq!(r2, val1); + }); + }); + + assert_eq!(cell.get(), Some(val1)); +} + +#[test] +fn get_unchecked() { + let cell = OnceNonZeroUsizeRelaxed::new(); + cell.set(NonZeroUsize::new(92).unwrap()).unwrap(); + let value = unsafe { cell.get_unchecked() }; + assert_eq!(value, NonZeroUsize::new(92).unwrap()); +}