Skip to content

Commit

Permalink
8340426: ZGC: Move defragment out of the allocation path
Browse files Browse the repository at this point in the history
Reviewed-by: aboldtch, jsikstro, eosterlund
  • Loading branch information
kstefanj committed Oct 4, 2024
1 parent a63ac5a commit ec020f3
Show file tree
Hide file tree
Showing 5 changed files with 77 additions and 33 deletions.
6 changes: 3 additions & 3 deletions src/hotspot/share/gc/z/zHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -241,10 +241,10 @@ void ZHeap::undo_alloc_page(ZPage* page) {
log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
p2i(Thread::current()), ZUtils::thread_name(), p2i(page), page->size());

free_page(page);
free_page(page, false /* allow_defragment */);
}

void ZHeap::free_page(ZPage* page) {
void ZHeap::free_page(ZPage* page, bool allow_defragment) {
// Remove page table entry
_page_table.remove(page);

Expand All @@ -253,7 +253,7 @@ void ZHeap::free_page(ZPage* page) {
}

// Free page
_page_allocator.free_page(page);
_page_allocator.free_page(page, allow_defragment);
}

size_t ZHeap::free_empty_pages(const ZArray<ZPage*>* pages) {
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/z/zHeap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ class ZHeap {
// Page allocation
ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age);
void undo_alloc_page(ZPage* page);
void free_page(ZPage* page);
void free_page(ZPage* page, bool allow_defragment);
size_t free_empty_pages(const ZArray<ZPage*>* pages);

// Object allocation
Expand Down
91 changes: 66 additions & 25 deletions src/hotspot/share/gc/z/zPageAllocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
workers->run_all(&task);
}

free_page(page);
free_page(page, false /* allow_defragment */);

return true;
}
Expand Down Expand Up @@ -462,6 +462,38 @@ void ZPageAllocator::destroy_page(ZPage* page) {
safe_destroy_page(page);
}

bool ZPageAllocator::should_defragment(const ZPage* page) const {
// A small page can end up at a high address (second half of the address space)
// if we've split a larger page or we have a constrained address space. To help
// fight address space fragmentation we remap such pages to a lower address, if
// a lower address is available.
return page->type() == ZPageType::small &&
page->start() >= to_zoffset(_virtual.reserved() / 2) &&
page->start() > _virtual.lowest_available_address();
}

ZPage* ZPageAllocator::defragment_page(ZPage* page) {
// Harvest the physical memory (which is committed)
ZPhysicalMemory pmem;
ZPhysicalMemory& old_pmem = page->physical_memory();
pmem.add_segments(old_pmem);
old_pmem.remove_segments();

_unmapper->unmap_and_destroy_page(page);

// Allocate new virtual memory at a low address
const ZVirtualMemory vmem = _virtual.alloc(pmem.size(), true /* force_low_address */);

// Create the new page and map it
ZPage* new_page = new ZPage(ZPageType::small, vmem, pmem);
map_page(new_page);

// Update statistics
ZStatInc(ZCounterDefragment);

return new_page;
}

bool ZPageAllocator::is_alloc_allowed(size_t size) const {
const size_t available = _current_max_capacity - _used - _claimed;
return available >= size;
Expand Down Expand Up @@ -623,16 +655,6 @@ ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
return new ZPage(allocation->type(), vmem, pmem);
}

bool ZPageAllocator::should_defragment(const ZPage* page) const {
// A small page can end up at a high address (second half of the address space)
// if we've split a larger page or we have a constrained address space. To help
// fight address space fragmentation we remap such pages to a lower address, if
// a lower address is available.
return page->type() == ZPageType::small &&
page->start() >= to_zoffset(_virtual.reserved() / 2) &&
page->start() > _virtual.lowest_available_address();
}

bool ZPageAllocator::is_alloc_satisfied(ZPageAllocation* allocation) const {
// The allocation is immediately satisfied if the list of pages contains
// exactly one page, with the type and size that was requested. However,
Expand All @@ -652,12 +674,6 @@ bool ZPageAllocator::is_alloc_satisfied(ZPageAllocation* allocation) const {
return false;
}

if (should_defragment(page)) {
// Defragment address space
ZStatInc(ZCounterDefragment);
return false;
}

// Allocation immediately satisfied
return true;
}
Expand Down Expand Up @@ -773,6 +789,18 @@ void ZPageAllocator::satisfy_stalled() {
}
}

ZPage* ZPageAllocator::prepare_to_recycle(ZPage* page, bool allow_defragment) {
// Make sure we have a page that is safe to recycle
ZPage* const to_recycle = _safe_recycle.register_and_clone_if_activated(page);

// Defragment the page before recycle if allowed and needed
if (allow_defragment && should_defragment(to_recycle)) {
return defragment_page(to_recycle);
}

return to_recycle;
}

void ZPageAllocator::recycle_page(ZPage* page) {
// Set time when last used
page->set_last_used();
Expand All @@ -781,9 +809,11 @@ void ZPageAllocator::recycle_page(ZPage* page) {
_cache.free_page(page);
}

void ZPageAllocator::free_page(ZPage* page) {
void ZPageAllocator::free_page(ZPage* page, bool allow_defragment) {
const ZGenerationId generation_id = page->generation_id();
ZPage* const to_recycle = _safe_recycle.register_and_clone_if_activated(page);

// Prepare page for recycling before taking the lock
ZPage* const to_recycle = prepare_to_recycle(page, allow_defragment);

ZLocker<ZLock> locker(&_lock);

Expand All @@ -800,19 +830,25 @@ void ZPageAllocator::free_page(ZPage* page) {
}

void ZPageAllocator::free_pages(const ZArray<ZPage*>* pages) {
ZArray<ZPage*> to_recycle;
ZArray<ZPage*> to_recycle_pages;

size_t young_size = 0;
size_t old_size = 0;

// Prepare pages for recycling before taking the lock
ZArrayIterator<ZPage*> pages_iter(pages);
for (ZPage* page; pages_iter.next(&page);) {
if (page->is_young()) {
young_size += page->size();
} else {
old_size += page->size();
}
to_recycle.push(_safe_recycle.register_and_clone_if_activated(page));

// Prepare to recycle
ZPage* const to_recycle = prepare_to_recycle(page, true /* allow_defragment */);

// Register for recycling
to_recycle_pages.push(to_recycle);
}

ZLocker<ZLock> locker(&_lock);
Expand All @@ -823,7 +859,7 @@ void ZPageAllocator::free_pages(const ZArray<ZPage*>* pages) {
decrease_used_generation(ZGenerationId::old, old_size);

// Free pages
ZArrayIterator<ZPage*> iter(&to_recycle);
ZArrayIterator<ZPage*> iter(&to_recycle_pages);
for (ZPage* page; iter.next(&page);) {
recycle_page(page);
}
Expand All @@ -833,11 +869,16 @@ void ZPageAllocator::free_pages(const ZArray<ZPage*>* pages) {
}

void ZPageAllocator::free_pages_alloc_failed(ZPageAllocation* allocation) {
ZArray<ZPage*> to_recycle;
ZArray<ZPage*> to_recycle_pages;

// Prepare pages for recycling before taking the lock
ZListRemoveIterator<ZPage> allocation_pages_iter(allocation->pages());
for (ZPage* page; allocation_pages_iter.next(&page);) {
to_recycle.push(_safe_recycle.register_and_clone_if_activated(page));
// Prepare to recycle
ZPage* const to_recycle = prepare_to_recycle(page, false /* allow_defragment */);

// Register for recycling
to_recycle_pages.push(to_recycle);
}

ZLocker<ZLock> locker(&_lock);
Expand All @@ -849,7 +890,7 @@ void ZPageAllocator::free_pages_alloc_failed(ZPageAllocation* allocation) {
size_t freed = 0;

// Free any allocated/flushed pages
ZArrayIterator<ZPage*> iter(&to_recycle);
ZArrayIterator<ZPage*> iter(&to_recycle_pages);
for (ZPage* page; iter.next(&page);) {
freed += page->size();
recycle_page(page);
Expand Down
7 changes: 5 additions & 2 deletions src/hotspot/share/gc/z/zPageAllocator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,13 +104,15 @@ class ZPageAllocator {

void destroy_page(ZPage* page);

bool should_defragment(const ZPage* page) const;
ZPage* defragment_page(ZPage* page);

bool is_alloc_allowed(size_t size) const;

bool alloc_page_common_inner(ZPageType type, size_t size, ZList<ZPage>* pages);
bool alloc_page_common(ZPageAllocation* allocation);
bool alloc_page_stall(ZPageAllocation* allocation);
bool alloc_page_or_stall(ZPageAllocation* allocation);
bool should_defragment(const ZPage* page) const;
bool is_alloc_satisfied(ZPageAllocation* allocation) const;
ZPage* alloc_page_create(ZPageAllocation* allocation);
ZPage* alloc_page_finalize(ZPageAllocation* allocation);
Expand Down Expand Up @@ -149,9 +151,10 @@ class ZPageAllocator {
void reset_statistics(ZGenerationId id);

ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age);
ZPage* prepare_to_recycle(ZPage* page, bool allow_defragment);
void recycle_page(ZPage* page);
void safe_destroy_page(ZPage* page);
void free_page(ZPage* page);
void free_page(ZPage* page, bool allow_defragment);
void free_pages(const ZArray<ZPage*>* pages);

void enable_safe_destroy() const;
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/share/gc/z/zRelocate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ static void retire_target_page(ZGeneration* generation, ZPage* page) {
// relocate the remaining objects, leaving the target page empty when
// relocation completed.
if (page->used() == 0) {
ZHeap::heap()->free_page(page);
ZHeap::heap()->free_page(page, true /* allow_defragment */);
}
}

Expand Down Expand Up @@ -1012,7 +1012,7 @@ class ZRelocateWork : public StackObj {
page->log_msg(" (relocate page done normal)");

// Free page
ZHeap::heap()->free_page(page);
ZHeap::heap()->free_page(page, true /* allow_defragment */);
}
}
};
Expand Down

0 comments on commit ec020f3

Please sign in to comment.