Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions compiler/rustc_codegen_llvm/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -613,6 +613,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
ptr: &'ll Value,
order: rustc_codegen_ssa::common::AtomicOrdering,
size: Size,
is_volatile: bool,
) -> &'ll Value {
unsafe {
let load = llvm::LLVMRustBuildAtomicLoad(
Expand All @@ -621,6 +622,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
ptr,
UNNAMED,
AtomicOrdering::from_generic(order),
is_volatile,
);
// LLVM requires the alignment of atomic loads to be at least the size of the type.
llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
Expand Down Expand Up @@ -851,6 +853,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
ptr: &'ll Value,
order: rustc_codegen_ssa::common::AtomicOrdering,
size: Size,
is_volatile: bool,
) {
debug!("Store {:?} -> {:?}", val, ptr);
assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
Expand All @@ -860,6 +863,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
val,
ptr,
AtomicOrdering::from_generic(order),
is_volatile,
);
// LLVM requires the alignment of atomic stores to be at least the size of the type.
llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
Expand Down
1 change: 0 additions & 1 deletion compiler/rustc_codegen_llvm/src/intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,6 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
_ => bug!("the va_arg intrinsic does not work with non-scalar types"),
}
}

sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = fn_args.type_at(0);
let ptr = args[0].immediate();
Expand Down
2 changes: 2 additions & 0 deletions compiler/rustc_codegen_llvm/src/llvm/ffi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1970,13 +1970,15 @@ unsafe extern "C" {
PointerVal: &'a Value,
Name: *const c_char,
Order: AtomicOrdering,
isVolatile: bool,
) -> &'a Value;

pub(crate) fn LLVMRustBuildAtomicStore<'a>(
B: &Builder<'a>,
Val: &'a Value,
Ptr: &'a Value,
Order: AtomicOrdering,
isVolatile: bool,
) -> &'a Value;

pub(crate) fn LLVMRustTimeTraceProfilerInitialize();
Expand Down
44 changes: 43 additions & 1 deletion compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -396,6 +396,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
source,
parse_ordering(bx, ordering),
size,
false,
)
} else {
invalid_monomorphization(ty);
Expand All @@ -409,7 +410,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let size = bx.layout_of(ty).size;
let val = args[1].immediate();
let ptr = args[0].immediate();
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size, false);
} else {
invalid_monomorphization(ty);
}
Expand Down Expand Up @@ -491,7 +492,48 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
}
sym::volatile_load_atomic_relaxed => {
use crate::common::AtomicOrdering;

let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
let layout = bx.layout_of(ty);
let size = layout.size;
let source = args[0].immediate();
bx.atomic_load(
bx.backend_type(layout),
source,
AtomicOrdering::Relaxed,
size,
true,
);
} else {
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
span,
name,
ty,
});
}
return Ok(());
}
sym::volatile_store_atomic_relaxed => {
use crate::common::AtomicOrdering;

let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
let size = bx.layout_of(ty).size;
let val = args[1].immediate();
let ptr = args[0].immediate();
bx.atomic_store(val, ptr, AtomicOrdering::Relaxed, size, true);
} else {
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
span,
name,
ty,
});
}
return Ok(());
}
sym::nontemporal_store => {
let dst = args[0].deref(bx.cx());
args[1].val.nontemporal_store(bx, dst);
Expand Down
2 changes: 2 additions & 0 deletions compiler/rustc_codegen_ssa/src/traits/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,7 @@ pub trait BuilderMethods<'a, 'tcx>:
ptr: Self::Value,
order: AtomicOrdering,
size: Size,
is_volatile: bool,
) -> Self::Value;
fn load_from_place(&mut self, ty: Self::Type, place: PlaceValue<Self::Value>) -> Self::Value {
assert_eq!(place.llextra, None);
Expand Down Expand Up @@ -316,6 +317,7 @@ pub trait BuilderMethods<'a, 'tcx>:
ptr: Self::Value,
order: AtomicOrdering,
size: Size,
is_volatile: bool,
);

fn gep(&mut self, ty: Self::Type, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
Expand Down
8 changes: 6 additions & 2 deletions compiler/rustc_hir_analysis/src/check/intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -430,10 +430,14 @@ pub fn check_intrinsic_type(
sym::roundf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::roundf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),

sym::volatile_load | sym::unaligned_volatile_load => {
sym::volatile_load
| sym::unaligned_volatile_load
| sym::volatile_load_atomic_relaxed => {
(1, 0, vec![Ty::new_imm_ptr(tcx, param(0))], param(0))
}
sym::volatile_store | sym::unaligned_volatile_store => {
sym::volatile_store
| sym::unaligned_volatile_store
| sym::volatile_store_atomic_relaxed => {
(1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit)
}

Expand Down
13 changes: 11 additions & 2 deletions compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -604,19 +604,28 @@ extern "C" void LLVMRustSetAllowReassoc(LLVMValueRef V) {

extern "C" LLVMValueRef
LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Source,
const char *Name, LLVMAtomicOrdering Order) {
const char *Name, LLVMAtomicOrdering Order, LLVMBool isVolatile) {
Value *Ptr = unwrap(Source);
LoadInst *LI = unwrap(B)->CreateLoad(unwrap(Ty), Ptr, Name);
LI->setAtomic(fromRust(Order));

// atomic volatile
if (isVolatile)
LI->setVolatile(true);
return wrap(LI);
}

extern "C" LLVMValueRef LLVMRustBuildAtomicStore(LLVMBuilderRef B,
LLVMValueRef V,
LLVMValueRef Target,
LLVMAtomicOrdering Order) {
LLVMAtomicOrdering Order,
LLVMBool isVolatile) {
StoreInst *SI = unwrap(B)->CreateStore(unwrap(V), unwrap(Target));
SI->setAtomic(fromRust(Order));

// atomic volatile
if (isVolatile)
SI->setVolatile(true);
return wrap(SI);
}

Expand Down
2 changes: 2 additions & 0 deletions compiler/rustc_span/src/symbol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2264,8 +2264,10 @@ symbols! {
volatile_copy_memory,
volatile_copy_nonoverlapping_memory,
volatile_load,
volatile_load_atomic_relaxed,
volatile_set_memory,
volatile_store,
volatile_store_atomic_relaxed,
vreg,
vreg_low16,
vsx,
Expand Down
20 changes: 20 additions & 0 deletions library/core/src/intrinsics/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1841,6 +1841,26 @@ pub unsafe fn unaligned_volatile_load<T>(src: *const T) -> T;
#[rustc_diagnostic_item = "intrinsics_unaligned_volatile_store"]
pub unsafe fn unaligned_volatile_store<T>(dst: *mut T, val: T);

/// Performs a volatile load from the `dst` pointer.
/// This pointer is required to be aligned and supported for
/// lock-free atomic operations.
///
/// It also creates a relaxed atomic ordering at this place.
#[rustc_intrinsic]
#[rustc_nounwind]
#[cfg(not(bootstrap))]
pub unsafe fn volatile_load_atomic_relaxed<T>(src: *const T) -> T;

/// Performs a volatile store to the `dst` pointer.
/// This pointer is required to be aligned, and the value supported
/// for lock-free atomic operations.
///
/// This also creates a relaxed atomic ordering on at this place.
#[rustc_intrinsic]
#[rustc_nounwind]
#[cfg(not(bootstrap))]
pub unsafe fn volatile_store_atomic_relaxed<T>(dst: *mut T, val: T);

/// Returns the square root of an `f16`
///
/// The stabilized version of this intrinsic is
Expand Down
Loading
Loading