Skip to content
2 changes: 1 addition & 1 deletion src/policy/lockfreeimmortalspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ impl<VM: VMBinding> Space<VM> for LockFreeImmortalSpace<VM> {
})
.expect("update cursor failed");
if start + bytes > self.limit {
if alloc_options.on_fail.allow_oom_call() {
if alloc_options.allow_oom_call() {
panic!("OutOfMemory");
} else {
return Address::ZERO;
Expand Down
55 changes: 35 additions & 20 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
alloc_options: AllocationOptions,
) -> bool {
if self.will_oom_on_acquire(size) {
if alloc_options.on_fail.allow_oom_call() {
if alloc_options.allow_oom_call() {
VM::VMCollection::out_of_memory(
tls,
crate::util::alloc::AllocationError::HeapOutOfMemory,
Expand All @@ -108,35 +108,44 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
"The requested pages is larger than the max heap size. Is will_go_oom_on_acquire used before acquring memory?"
);

// Should we poll to attempt to GC?
trace!("Reserving pages");
let pr = self.get_page_resource();
let pages_reserved = pr.reserve_pages(pages);
trace!("Pages reserved");

// Should we poll before acquring pages from page resources so that it can trigger a GC?
// - If tls is collector, we cannot attempt a GC.
// - If gc is disabled, we cannot attempt a GC.
// - If overcommit is allowed, we don't attempt a GC.
// FIXME: We should allow polling while also allowing over-committing.
// We should change the allocation interface.
// - If the allocation option explicitly disables eager polling, we don't poll now.
let should_poll = VM::VMActivePlan::is_mutator(tls)
&& VM::VMCollection::is_collection_enabled()
&& !alloc_options.on_fail.allow_overcommit();
&& alloc_options.eager_polling;

trace!("Reserving pages");
let pr = self.get_page_resource();
let pages_reserved = pr.reserve_pages(pages);
trace!("Pages reserved");
trace!("Polling ..");
// If we should poll eagerly, do it now. Record if it has triggered a GC.
// If we should not poll eagerly, GC is not triggered.
let gc_triggered = should_poll && {
trace!("Polling ..");
self.get_gc_trigger().poll(false, Some(self.as_space()))
};

// The actual decision tree.
if should_poll && self.get_gc_trigger().poll(false, Some(self.as_space())) {
self.not_acquiring(tls, alloc_options, pr, pages_reserved, false);
Address::ZERO
} else {
debug!("Collection not required");
// We can try to get pages if
// - GC is not triggered, or
// - GC is triggered, but we allow over-committing.
let should_get_pages = !gc_triggered || alloc_options.allow_overcommit;

// Get new pages if we should. If we didn't get new pages from the page resource for any
// reason (if we decided not to, or if we tried and failed), this function shall return a
// null address.
if should_get_pages {
if let Some(addr) = self.get_new_pages_and_initialize(tls, pages, pr, pages_reserved) {
addr
} else {
self.not_acquiring(tls, alloc_options, pr, pages_reserved, true);
self.not_acquiring(tls, alloc_options, pr, pages_reserved, false);
Address::ZERO
}
} else {
self.not_acquiring(tls, alloc_options, pr, pages_reserved, true);
Address::ZERO
}
}

Expand Down Expand Up @@ -268,11 +277,17 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
pages_reserved: usize,
attempted_allocation_and_failed: bool,
) {
assert!(
VM::VMActivePlan::is_mutator(tls),
"A non-mutator thread failed to get pages from page resource. \
Copying GC plans should compute the copying headroom carefully to prevent this."
);

// Clear the request
pr.clear_request(pages_reserved);

// If we do not want GC on fail, just return.
if !alloc_options.on_fail.allow_gc() {
// If we are not at a safepoint, return immediately.
if !alloc_options.at_safepoint {
return;
}

Expand Down
155 changes: 116 additions & 39 deletions src/util/alloc/allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use crate::util::heap::gc_trigger::GCTrigger;
use crate::util::options::Options;
use crate::MMTK;

use atomic::Atomic;
use std::cell::RefCell;
use std::sync::atomic::Ordering;
use std::sync::Arc;

Expand All @@ -28,44 +28,124 @@ pub enum AllocationError {
MmapOutOfMemory,
}

/// Behavior when an allocation fails, and a GC is expected.
#[repr(u8)]
#[derive(Copy, Clone, Default, PartialEq, bytemuck::NoUninit, Debug)]
pub enum OnAllocationFail {
/// Request the GC. This is the default behavior.
#[default]
RequestGC,
/// Instead of requesting GC, the allocation request returns with a failure value.
ReturnFailure,
/// Instead of requesting GC, the allocation request simply overcommits the memory,
/// and return a valid result at its best efforts.
OverCommit,
/// Allow specifying different behaviors with [`Allocator::alloc_with_options`].
#[repr(C)]
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct AllocationOptions {
/// Should we poll *before* trying to acquire more pages from the page resource?
///
/// **The default is `true`**.
///
/// If `true`, the allocation will let the GC trigger poll before acquiring pages from the page
/// resource, giving the GC trigger a chance to schedule a collection.
///
/// If `false`, the allocation will not notify the GC trigger *before* acquiring pages from the
/// page resource. Note that if the allocation is at a safepoint (i.e. [`Self::at_safepoint`]
/// is true), it will still poll and force a GC *after* failing to get pages from the page
/// resource due to physical memory exhaustion.
pub eager_polling: bool,

/// Whether over-committing is allowed at this allocation site.
///
/// **The default is `false`**.
///
/// This option is only meaningful if [`Self::eager_polling`] is true. It has no effect if
/// `eager_polling == false`.
///
/// If `true`, the allocation will still try to acquire pages from page resources even
/// if the eager polling triggers a GC.
///
/// If `false` the allocation will not try to get pages from page resource as long as GC
/// is triggered.
pub allow_overcommit: bool,

/// Whether the allocation is at a safepoint.
///
/// **The default is `true`**.
///
/// If `true`, the allocation attempt will block for GC if GC is triggered. It will also force
/// triggering GC and block after failing to get pages from the page resource due to physical
/// memory exhaustion. It will also call [`Collection::out_of_memory`] when out of memory.
///
/// If `false`, the allocation attempt will immediately return a null address if the allocation
/// cannot be satisfied without a GC. It will never block for GC, never force a GC, and never
/// call [`Collection::out_of_memory`]. Note that the VM can always force a GC by calling
/// [`crate::MMTK::handle_user_collection_request`] with the argument `force` being `true`.
pub at_safepoint: bool,
}

impl OnAllocationFail {
pub(crate) fn allow_oom_call(&self) -> bool {
*self == Self::RequestGC
/// The default value for `AllocationOptions` has the same semantics as calling [`Allocator::alloc`]
/// directly.
impl Default for AllocationOptions {
fn default() -> Self {
Self {
eager_polling: true,
allow_overcommit: false,
at_safepoint: true,
}
}
pub(crate) fn allow_gc(&self) -> bool {
*self == Self::RequestGC
}

impl AllocationOptions {
pub(crate) fn is_default(&self) -> bool {
*self == AllocationOptions::default()
}
pub(crate) fn allow_overcommit(&self) -> bool {
*self == Self::OverCommit

/// Whether this allocation allows calling [`Collection::out_of_memory`].
///
/// It is allowed if and only if the allocation is at safepoint.
pub(crate) fn allow_oom_call(&self) -> bool {
self.at_safepoint
}
}

/// Allow specifying different behaviors with [`Allocator::alloc_with_options`].
#[repr(C)]
#[derive(Copy, Clone, Default, PartialEq, bytemuck::NoUninit, Debug)]
pub struct AllocationOptions {
/// When the allocation fails and a GC is originally expected, on_fail
/// allows a different behavior to avoid the GC.
pub on_fail: OnAllocationFail,
/// A wrapper for [`AllocatorContext`] to hold a [`AllocationOptions`] that can be modified by the
/// same mutator thread.
///
/// All [`Allocator`] instances in `Allocators` share one `AllocationOptions` instance, and it will
/// only be accessed by the mutator (via `Mutator::allocators`) or the GC worker (via
/// `GCWorker::copy`) that owns it. Rust doesn't like multiple mutable references pointing to a
/// shared data structure. We cannot use [`atomic::Atomic`] because `AllocationOptions` has
/// multiple fields. We wrap it in a `RefCell` to make it internally mutable.
///
/// Note: The allocation option is called every time [`Allocator::alloc_with_options`] is called.
/// Because API functions should only be called on allocation slow paths, we believe that `RefCell`
/// should be good enough for performance. If this is too slow, we may consider `UnsafeCell`. If
/// that's still too slow, we should consider changing the API to make the allocation options a
/// persistent per-mutator value, and allow the VM binding set its value via a new API function.
struct AllocationOptionsHolder {
alloc_options: RefCell<AllocationOptions>,
}

impl AllocationOptions {
pub(crate) fn is_default(&self) -> bool {
*self == AllocationOptions::default()
/// Strictly speaking, `AllocationOptionsHolder` isn't `Sync`. Two threads cannot set or clear the
/// same `AllocationOptionsHolder` at the same time. However, both `Mutator` and `GCWorker` are
/// `Send`, and both of which own `Allocators` and require its field `Arc<AllocationContext>` to be
/// `Send`, which requires `AllocationContext` to be `Sync`, which requires
/// `AllocationOptionsHolder` to be `Sync`. (Note that `Arc<T>` can be cloned and given to another
/// thread, and Rust expects `T` to be `Sync`, too. But we never share `AllocationContext` between
/// threads, but only between multiple `Allocator` instances within the same `Allocators` instance.
/// Rust can't figure this out.)
unsafe impl Sync for AllocationOptionsHolder {}

impl AllocationOptionsHolder {
pub fn new(alloc_options: AllocationOptions) -> Self {
Self {
alloc_options: RefCell::new(alloc_options),
}
}
pub fn set_alloc_options(&self, options: AllocationOptions) {
let mut alloc_options = self.alloc_options.borrow_mut();
*alloc_options = options;
}

pub fn clear_alloc_options(&self) {
let mut alloc_options = self.alloc_options.borrow_mut();
*alloc_options = AllocationOptions::default();
}

pub fn get_alloc_options(&self) -> AllocationOptions {
let alloc_options = self.alloc_options.borrow();
*alloc_options
}
}

Expand Down Expand Up @@ -180,7 +260,7 @@ pub(crate) fn assert_allocation_args<VM: VMBinding>(size: usize, align: usize, o

/// The context an allocator needs to access in order to perform allocation.
pub struct AllocatorContext<VM: VMBinding> {
pub alloc_options: Atomic<AllocationOptions>,
alloc_options: AllocationOptionsHolder,
pub state: Arc<GlobalState>,
pub options: Arc<Options>,
pub gc_trigger: Arc<GCTrigger<VM>>,
Expand All @@ -191,7 +271,7 @@ pub struct AllocatorContext<VM: VMBinding> {
impl<VM: VMBinding> AllocatorContext<VM> {
pub fn new(mmtk: &MMTK<VM>) -> Self {
Self {
alloc_options: Atomic::new(AllocationOptions::default()),
alloc_options: AllocationOptionsHolder::new(AllocationOptions::default()),
state: mmtk.state.clone(),
options: mmtk.options.clone(),
gc_trigger: mmtk.gc_trigger.clone(),
Expand All @@ -201,16 +281,15 @@ impl<VM: VMBinding> AllocatorContext<VM> {
}

pub fn set_alloc_options(&self, options: AllocationOptions) {
self.alloc_options.store(options, Ordering::Relaxed);
self.alloc_options.set_alloc_options(options);
}

pub fn clear_alloc_options(&self) {
self.alloc_options
.store(AllocationOptions::default(), Ordering::Relaxed);
self.alloc_options.clear_alloc_options();
}

pub fn get_alloc_options(&self) -> AllocationOptions {
self.alloc_options.load(Ordering::Relaxed)
self.alloc_options.get_alloc_options()
}
}

Expand Down Expand Up @@ -367,9 +446,7 @@ pub trait Allocator<VM: VMBinding>: Downcast {
return result;
}

if result.is_zero()
&& self.get_context().get_alloc_options().on_fail == OnAllocationFail::ReturnFailure
{
if result.is_zero() && !self.get_context().get_alloc_options().allow_oom_call() {
return result;
}

Expand Down
1 change: 0 additions & 1 deletion src/util/alloc/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ pub use allocator::fill_alignment_gap;
pub use allocator::AllocationError;
pub use allocator::AllocationOptions;
pub use allocator::Allocator;
pub use allocator::OnAllocationFail;

/// A list of all the allocators, embedded in Mutator
pub(crate) mod allocators;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use super::mock_test_prelude::*;

use crate::util::alloc::allocator::{AllocationOptions, OnAllocationFail};
use crate::util::alloc::allocator::AllocationOptions;
use crate::AllocationSemantics;

/// This test will allocate an object that is larger than the heap size. The call will fail.
Expand All @@ -21,7 +21,8 @@ pub fn allocate_no_gc_oom_on_acquire() {
0,
AllocationSemantics::Default,
AllocationOptions {
on_fail: OnAllocationFail::ReturnFailure,
at_safepoint: false,
..Default::default()
},
);
// We should get zero.
Expand Down
5 changes: 3 additions & 2 deletions src/vm/tests/mock_tests/mock_test_allocate_no_gc_simple.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use super::mock_test_prelude::*;

use crate::util::alloc::allocator::{AllocationOptions, OnAllocationFail};
use crate::util::alloc::allocator::AllocationOptions;
use crate::AllocationSemantics;

/// This test will do alloc_with_options in a loop, and evetually fill up the heap.
Expand All @@ -26,7 +26,8 @@ pub fn allocate_no_gc_simple() {
0,
AllocationSemantics::Default,
AllocationOptions {
on_fail: OnAllocationFail::ReturnFailure,
at_safepoint: false,
..Default::default()
},
);
if last_result.is_zero() {
Expand Down
7 changes: 5 additions & 2 deletions src/vm/tests/mock_tests/mock_test_allocate_overcommit.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
// GITHUB-CI: MMTK_PLAN=NoGC
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is this test only for NoGC?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Well, it doesn't have to. The test is about the behavior of allocation after exceeding the heap size, and it is not about the GC. So it doesn't really matter which plan it is. But I changed it to "all" just in case any plan triggers GC differently (mainly ConcurrentImmix).


use super::mock_test_prelude::*;

use crate::util::alloc::allocator::{AllocationOptions, OnAllocationFail};
use crate::util::alloc::allocator::AllocationOptions;
use crate::AllocationSemantics;

/// This test will do alloc_with_options in a loop, and evetually fill up the heap.
Expand All @@ -26,7 +28,8 @@ pub fn allocate_overcommit() {
0,
AllocationSemantics::Default,
AllocationOptions {
on_fail: OnAllocationFail::ReturnFailure,
allow_overcommit: true,
..Default::default()
},
);
assert!(!last_result.is_zero());
Expand Down
1 change: 1 addition & 0 deletions src/vm/tests/mock_tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ mod mock_test_allocate_align_offset;
mod mock_test_allocate_no_gc_oom_on_acquire;
mod mock_test_allocate_no_gc_simple;
mod mock_test_allocate_nonmoving;
mod mock_test_allocate_overcommit;
mod mock_test_allocate_with_disable_collection;
mod mock_test_allocate_with_initialize_collection;
mod mock_test_allocate_with_re_enable_collection;
Expand Down
Loading