Skip to content

Commit

Permalink
Resolve cast-related clippy warnings
Browse files Browse the repository at this point in the history
taiki-e committed Apr 21, 2024
1 parent 10a8a75 commit 4e66bd3
Showing 7 changed files with 189 additions and 83 deletions.
66 changes: 53 additions & 13 deletions src/imp/arm_linux.rs
Original file line number Diff line number Diff line change
@@ -291,25 +291,34 @@ macro_rules! atomic64 {
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn load(&self, order: Ordering) -> $int_type {
crate::utils::assert_load_ordering(order);
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe { atomic_load(self.v.get().cast::<u64>()) as $int_type }
unsafe {
atomic_load(self.v.get().cast::<u64>()) as $int_type
}
}

#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn store(&self, val: $int_type, order: Ordering) {
crate::utils::assert_store_ordering(order);
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe { atomic_store(self.v.get().cast::<u64>(), val as u64) }
unsafe {
atomic_store(self.v.get().cast::<u64>(), val as u64)
}
}

#[inline]
pub(crate) fn swap(&self, val: $int_type, _order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe { atomic_swap(self.v.get().cast::<u64>(), val as u64) as $int_type }
unsafe {
atomic_swap(self.v.get().cast::<u64>(), val as u64) as $int_type
}
}

#[inline]
@@ -322,6 +331,7 @@ macro_rules! atomic64 {
failure: Ordering,
) -> Result<$int_type, $int_type> {
crate::utils::assert_compare_exchange_ordering(success, failure);
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe {
@@ -352,65 +362,92 @@ macro_rules! atomic64 {

#[inline]
pub(crate) fn fetch_add(&self, val: $int_type, _order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe { atomic_add(self.v.get().cast::<u64>(), val as u64) as $int_type }
unsafe {
atomic_add(self.v.get().cast::<u64>(), val as u64) as $int_type
}
}

#[inline]
pub(crate) fn fetch_sub(&self, val: $int_type, _order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe { atomic_sub(self.v.get().cast::<u64>(), val as u64) as $int_type }
unsafe {
atomic_sub(self.v.get().cast::<u64>(), val as u64) as $int_type
}
}

#[inline]
pub(crate) fn fetch_and(&self, val: $int_type, _order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe { atomic_and(self.v.get().cast::<u64>(), val as u64) as $int_type }
unsafe {
atomic_and(self.v.get().cast::<u64>(), val as u64) as $int_type
}
}

#[inline]
pub(crate) fn fetch_nand(&self, val: $int_type, _order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe { atomic_nand(self.v.get().cast::<u64>(), val as u64) as $int_type }
unsafe {
atomic_nand(self.v.get().cast::<u64>(), val as u64) as $int_type
}
}

#[inline]
pub(crate) fn fetch_or(&self, val: $int_type, _order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe { atomic_or(self.v.get().cast::<u64>(), val as u64) as $int_type }
unsafe {
atomic_or(self.v.get().cast::<u64>(), val as u64) as $int_type
}
}

#[inline]
pub(crate) fn fetch_xor(&self, val: $int_type, _order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe { atomic_xor(self.v.get().cast::<u64>(), val as u64) as $int_type }
unsafe {
atomic_xor(self.v.get().cast::<u64>(), val as u64) as $int_type
}
}

#[inline]
pub(crate) fn fetch_max(&self, val: $int_type, _order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe { $atomic_max(self.v.get().cast::<u64>(), val as u64) as $int_type }
unsafe {
$atomic_max(self.v.get().cast::<u64>(), val as u64) as $int_type
}
}

#[inline]
pub(crate) fn fetch_min(&self, val: $int_type, _order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe { $atomic_min(self.v.get().cast::<u64>(), val as u64) as $int_type }
unsafe {
$atomic_min(self.v.get().cast::<u64>(), val as u64) as $int_type
}
}

#[inline]
pub(crate) fn fetch_not(&self, _order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe { atomic_not(self.v.get().cast::<u64>()) as $int_type }
unsafe {
atomic_not(self.v.get().cast::<u64>()) as $int_type
}
}
#[inline]
pub(crate) fn not(&self, order: Ordering) {
@@ -419,9 +456,12 @@ macro_rules! atomic64 {

#[inline]
pub(crate) fn fetch_neg(&self, _order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by the kernel user helper or the lock
// and the raw pointer passed in is valid because we got it from a reference.
unsafe { atomic_neg(self.v.get().cast::<u64>()) as $int_type }
unsafe {
atomic_neg(self.v.get().cast::<u64>()) as $int_type
}
}
#[inline]
pub(crate) fn neg(&self, order: Ordering) {
67 changes: 54 additions & 13 deletions src/imp/atomic128/macros.rs
Original file line number Diff line number Diff line change
@@ -42,9 +42,12 @@ macro_rules! atomic128 {
)]
pub(crate) fn load(&self, order: Ordering) -> $int_type {
crate::utils::assert_load_ordering(order);
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_load(self.v.get().cast::<u128>(), order) as $int_type }
unsafe {
atomic_load(self.v.get().cast::<u128>(), order) as $int_type
}
}

#[inline]
@@ -54,17 +57,23 @@ macro_rules! atomic128 {
)]
pub(crate) fn store(&self, val: $int_type, order: Ordering) {
crate::utils::assert_store_ordering(order);
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_store(self.v.get().cast::<u128>(), val as u128, order) }
unsafe {
atomic_store(self.v.get().cast::<u128>(), val as u128, order)
}
}

#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub(crate) fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_swap(self.v.get().cast::<u128>(), val as u128, order) as $int_type }
unsafe {
atomic_swap(self.v.get().cast::<u128>(), val as u128, order) as $int_type
}
}

#[inline]
@@ -80,6 +89,7 @@ macro_rules! atomic128 {
failure: Ordering,
) -> Result<$int_type, $int_type> {
crate::utils::assert_compare_exchange_ordering(success, failure);
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
@@ -109,6 +119,7 @@ macro_rules! atomic128 {
failure: Ordering,
) -> Result<$int_type, $int_type> {
crate::utils::assert_compare_exchange_ordering(success, failure);
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
@@ -128,73 +139,100 @@ macro_rules! atomic128 {
#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub(crate) fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_add(self.v.get().cast::<u128>(), val as u128, order) as $int_type }
unsafe {
atomic_add(self.v.get().cast::<u128>(), val as u128, order) as $int_type
}
}

#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub(crate) fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_sub(self.v.get().cast::<u128>(), val as u128, order) as $int_type }
unsafe {
atomic_sub(self.v.get().cast::<u128>(), val as u128, order) as $int_type
}
}

#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub(crate) fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_and(self.v.get().cast::<u128>(), val as u128, order) as $int_type }
unsafe {
atomic_and(self.v.get().cast::<u128>(), val as u128, order) as $int_type
}
}

#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub(crate) fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_nand(self.v.get().cast::<u128>(), val as u128, order) as $int_type }
unsafe {
atomic_nand(self.v.get().cast::<u128>(), val as u128, order) as $int_type
}
}

#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub(crate) fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_or(self.v.get().cast::<u128>(), val as u128, order) as $int_type }
unsafe {
atomic_or(self.v.get().cast::<u128>(), val as u128, order) as $int_type
}
}

#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub(crate) fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_xor(self.v.get().cast::<u128>(), val as u128, order) as $int_type }
unsafe {
atomic_xor(self.v.get().cast::<u128>(), val as u128, order) as $int_type
}
}

#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub(crate) fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { $atomic_max(self.v.get().cast::<u128>(), val as u128, order) as $int_type }
unsafe {
$atomic_max(self.v.get().cast::<u128>(), val as u128, order) as $int_type
}
}

#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub(crate) fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { $atomic_min(self.v.get().cast::<u128>(), val as u128, order) as $int_type }
unsafe {
$atomic_min(self.v.get().cast::<u128>(), val as u128, order) as $int_type
}
}

#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_not(self.v.get().cast::<u128>(), order) as $int_type }
unsafe {
atomic_not(self.v.get().cast::<u128>(), order) as $int_type
}
}
#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
@@ -205,9 +243,12 @@ macro_rules! atomic128 {
#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub(crate) fn fetch_neg(&self, order: Ordering) -> $int_type {
#[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_neg(self.v.get().cast::<u128>(), order) as $int_type }
unsafe {
atomic_neg(self.v.get().cast::<u128>(), order) as $int_type
}
}
#[inline]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2 changes: 1 addition & 1 deletion src/imp/fallback/outline_atomics.rs
Original file line number Diff line number Diff line change
@@ -115,7 +115,7 @@ macro_rules! atomic_rmw_3 {
#[cold]
pub(crate) unsafe fn $name(dst: *mut Udw, val: Udw, order: Ordering) -> Udw {
debug_assert_outline_atomics!();
#[allow(clippy::cast_ptr_alignment)]
#[allow(clippy::cast_ptr_alignment, clippy::cast_possible_wrap, clippy::cast_sign_loss)]
// SAFETY: the caller must uphold the safety contract.
unsafe {
(*(dst as *const $atomic_type)).$method_name(val as _, order) as Udw
118 changes: 71 additions & 47 deletions src/imp/riscv.rs
Original file line number Diff line number Diff line change
@@ -17,13 +17,28 @@
// - atomic-maybe-uninit https://github.com/taiki-e/atomic-maybe-uninit
//
// Generated asm:
// - riscv64gc https://godbolt.org/z/zcx6P6dP3
// - riscv32imac https://godbolt.org/z/867qK46va
// - riscv64gc https://godbolt.org/z/x8bhEn39e
// - riscv32imac https://godbolt.org/z/aG9157dhW

#[cfg(not(portable_atomic_no_asm))]
use core::arch::asm;
use core::{cell::UnsafeCell, sync::atomic::Ordering};

#[cfg(any(test, portable_atomic_force_amo))]
#[cfg(target_arch = "riscv32")]
macro_rules! w {
() => {
""
};
}
#[cfg(any(test, portable_atomic_force_amo))]
#[cfg(target_arch = "riscv64")]
macro_rules! w {
() => {
"w"
};
}

#[cfg(any(test, portable_atomic_force_amo))]
macro_rules! atomic_rmw_amo_order {
($op:ident, $order:ident) => {
@@ -71,17 +86,8 @@ fn sllw(val: u32, shift: u32) -> u32 {
// SAFETY: Calling sll{,w} is safe.
unsafe {
let out;
#[cfg(target_arch = "riscv32")]
asm!(
"sll {out}, {val}, {shift}",
out = lateout(reg) out,
val = in(reg) val,
shift = in(reg) shift,
options(pure, nomem, nostack, preserves_flags),
);
#[cfg(target_arch = "riscv64")]
asm!(
"sllw {out}, {val}, {shift}",
concat!("sll", w!(), " {out}, {val}, {shift}"),
out = lateout(reg) out,
val = in(reg) val,
shift = in(reg) shift,
@@ -92,29 +98,23 @@ fn sllw(val: u32, shift: u32) -> u32 {
}
// 32-bit val.wrapping_shr(shift) but no extra `& (u32::BITS - 1)`
#[cfg(any(test, portable_atomic_force_amo))]
#[inline]
fn srlw(val: u32, shift: u32) -> u32 {
// SAFETY: Calling srl{,w} is safe.
unsafe {
let out;
#[cfg(target_arch = "riscv32")]
asm!(
"srl {out}, {val}, {shift}",
out = lateout(reg) out,
val = in(reg) val,
shift = in(reg) shift,
options(pure, nomem, nostack, preserves_flags),
);
#[cfg(target_arch = "riscv64")]
asm!(
"srlw {out}, {val}, {shift}",
out = lateout(reg) out,
val = in(reg) val,
shift = in(reg) shift,
options(pure, nomem, nostack, preserves_flags),
);
out
}
macro_rules! srlw {
($val:expr, $shift:expr) => {
// SAFETY: Calling srl{,w} is safe.
unsafe {
let val: u32 = $val;
let shift: u32 = $shift;
let out;
asm!(
concat!("srl", w!(), " {out}, {val}, {shift}"),
out = lateout(reg) out,
val = in(reg) val,
shift = in(reg) shift,
options(pure, nomem, nostack, preserves_flags),
);
out
}
};
}

macro_rules! atomic_load_store {
@@ -328,6 +328,33 @@ macro_rules! atomic {
};
}

#[cfg(any(test, portable_atomic_force_amo))]
trait ZeroExtend: Copy {
/// Zero-extends `self` to `u32` if it is smaller than 32-bit.
fn zero_extend(self) -> u32;
}
macro_rules! zero_extend {
($int:ident, $uint:ident) => {
#[cfg(any(test, portable_atomic_force_amo))]
impl ZeroExtend for $uint {
#[inline]
fn zero_extend(self) -> u32 {
self as u32
}
}
#[cfg(any(test, portable_atomic_force_amo))]
impl ZeroExtend for $int {
#[allow(clippy::cast_sign_loss)]
#[inline]
fn zero_extend(self) -> u32 {
self as $uint as u32
}
}
};
}
zero_extend!(i8, u8);
zero_extend!(i16, u16);

macro_rules! atomic_sub_word {
($atomic_type:ident, $value_type:ty, $unsigned_type:ty, $asm_suffix:tt) => {
atomic_load_store!($atomic_type, $value_type, $asm_suffix);
@@ -336,39 +363,36 @@ macro_rules! atomic_sub_word {
#[inline]
pub(crate) fn fetch_and(&self, val: $value_type, order: Ordering) -> $value_type {
let dst = self.v.get();
let (dst, shift, mask) = crate::utils::create_sub_word_mask_values(dst);
let mask = !sllw(mask as u32, shift as u32);
// TODO: use zero_extend helper instead of cast for val.
let val = sllw(val as $unsigned_type as u32, shift as u32);
let val = val | mask;
let (dst, shift, mut mask) = crate::utils::create_sub_word_mask_values(dst);
mask = !sllw(mask, shift);
let mut val = sllw(ZeroExtend::zero_extend(val), shift);
val |= mask;
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
let out: u32 = unsafe { atomic_rmw_amo!(and, dst, val, order, "w") };
srlw(out, shift as u32) as $unsigned_type as $value_type
srlw!(out, shift)
}

#[inline]
pub(crate) fn fetch_or(&self, val: $value_type, order: Ordering) -> $value_type {
let dst = self.v.get();
let (dst, shift, _mask) = crate::utils::create_sub_word_mask_values(dst);
// TODO: use zero_extend helper instead of cast for val.
let val = sllw(val as $unsigned_type as u32, shift as u32);
let val = sllw(ZeroExtend::zero_extend(val), shift);
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
let out: u32 = unsafe { atomic_rmw_amo!(or, dst, val, order, "w") };
srlw(out, shift as u32) as $unsigned_type as $value_type
srlw!(out, shift)
}

#[inline]
pub(crate) fn fetch_xor(&self, val: $value_type, order: Ordering) -> $value_type {
let dst = self.v.get();
let (dst, shift, _mask) = crate::utils::create_sub_word_mask_values(dst);
// TODO: use zero_extend helper instead of cast for val.
let val = sllw(val as $unsigned_type as u32, shift as u32);
let val = sllw(ZeroExtend::zero_extend(val), shift);
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
let out: u32 = unsafe { atomic_rmw_amo!(xor, dst, val, order, "w") };
srlw(out, shift as u32) as $unsigned_type as $value_type
srlw!(out, shift)
}

#[inline]
3 changes: 3 additions & 0 deletions src/tests/helper.rs
Original file line number Diff line number Diff line change
@@ -1673,6 +1673,7 @@ macro_rules! __test_atomic_ptr_pub {

assert_eq!(atom.fetch_ptr_sub(1, Ordering::SeqCst), n.wrapping_add(1));
assert_eq!(atom.load(Ordering::SeqCst), n);
#[allow(clippy::cast_ptr_alignment)]
let bytes_from_n = |b| n.cast::<u8>().wrapping_add(b).cast::<i64>();

assert_eq!(atom.fetch_byte_add(1, Ordering::SeqCst), n);
@@ -2155,6 +2156,7 @@ macro_rules! __stress_test_acquire_release {
(should_pass, $int_type:ident, $write:ident, $load_order:ident, $store_order:ident) => {
paste::paste! {
#[test]
#[allow(clippy::cast_possible_truncation)]
fn [<load_ $load_order:lower _ $write _ $store_order:lower>]() {
__stress_test_acquire_release!([<Atomic $int_type:camel>],
$int_type, $write, $load_order, $store_order);
@@ -2168,6 +2170,7 @@ macro_rules! __stress_test_acquire_release {
// So, ignore on non-Miri environments by default. See also catch_unwind_on_weak_memory_arch.
#[test]
#[cfg_attr(not(miri), ignore)]
#[allow(clippy::cast_possible_truncation)]
fn [<load_ $load_order:lower _ $write _ $store_order:lower>]() {
can_panic("a=", || __stress_test_acquire_release!([<Atomic $int_type:camel>],
$int_type, $write, $load_order, $store_order));
14 changes: 6 additions & 8 deletions src/utils.rs
Original file line number Diff line number Diff line change
@@ -381,12 +381,10 @@ pub(crate) struct Pair<T: Copy> {
pub(crate) lo: T,
}

#[allow(dead_code)]
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
type MinWord = u32;
#[cfg(target_arch = "riscv32")]
type RegSize = u32;
#[cfg(target_arch = "riscv64")]
type RegSize = u64;
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
type RetInt = u32;
// Adapted from https://github.com/taiki-e/atomic-maybe-uninit/blob/v0.3.0/src/utils.rs#L210.
// Helper for implementing sub-word atomic operations using word-sized LL/SC loop or CAS loop.
//
@@ -395,7 +393,7 @@ type RegSize = u64;
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
#[allow(dead_code)]
#[inline]
pub(crate) fn create_sub_word_mask_values<T>(ptr: *mut T) -> (*mut MinWord, RegSize, RegSize) {
pub(crate) fn create_sub_word_mask_values<T>(ptr: *mut T) -> (*mut MinWord, RetInt, RetInt) {
use core::mem;
const SHIFT_MASK: bool = !cfg!(any(
target_arch = "riscv32",
@@ -416,11 +414,11 @@ pub(crate) fn create_sub_word_mask_values<T>(ptr: *mut T) -> (*mut MinWord, RegS
} else {
(ptr_lsb ^ (mem::size_of::<MinWord>() - mem::size_of::<T>())).wrapping_mul(8)
};
let mut mask: RegSize = (1 << (mem::size_of::<T>() * 8)) - 1; // !(0 as T) as RegSize
let mut mask: RetInt = (1 << (mem::size_of::<T>() * 8)) - 1; // !(0 as T) as RetInt
if SHIFT_MASK {
mask <<= shift;
}
(aligned_ptr, shift as RegSize, mask)
(aligned_ptr, shift as RetInt, mask)
}

/// Emulate strict provenance.
2 changes: 1 addition & 1 deletion tests/api-test/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT

#![no_std]
#![allow(unused_imports)]
#![allow(unused_imports, clippy::cast_lossless)]

#[macro_use]
mod helper;

0 comments on commit 4e66bd3

Please sign in to comment.