8340426: ZGC: Move defragment out of the allocation path

Reviewed-by: aboldtch, jsikstro, eosterlund
This commit is contained in:
Stefan Johansson 2024-10-04 08:26:35 +00:00
parent a63ac5a699
commit ec020f3fc9
5 changed files with 77 additions and 33 deletions

View File

@ -241,10 +241,10 @@ void ZHeap::undo_alloc_page(ZPage* page) {
log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
p2i(Thread::current()), ZUtils::thread_name(), p2i(page), page->size());
free_page(page);
free_page(page, false /* allow_defragment */);
}
void ZHeap::free_page(ZPage* page) {
void ZHeap::free_page(ZPage* page, bool allow_defragment) {
// Remove page table entry
_page_table.remove(page);
@ -253,7 +253,7 @@ void ZHeap::free_page(ZPage* page) {
}
// Free page
_page_allocator.free_page(page);
_page_allocator.free_page(page, allow_defragment);
}
size_t ZHeap::free_empty_pages(const ZArray<ZPage*>* pages) {

View File

@ -104,7 +104,7 @@ public:
// Page allocation
ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age);
void undo_alloc_page(ZPage* page);
void free_page(ZPage* page);
void free_page(ZPage* page, bool allow_defragment);
size_t free_empty_pages(const ZArray<ZPage*>* pages);
// Object allocation

View File

@ -275,7 +275,7 @@ bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
workers->run_all(&task);
}
free_page(page);
free_page(page, false /* allow_defragment */);
return true;
}
@ -462,6 +462,38 @@ void ZPageAllocator::destroy_page(ZPage* page) {
safe_destroy_page(page);
}
bool ZPageAllocator::should_defragment(const ZPage* page) const {
// A small page can end up at a high address (second half of the address space)
// if we've split a larger page or we have a constrained address space. To help
// fight address space fragmentation we remap such pages to a lower address, if
// a lower address is available.
return page->type() == ZPageType::small &&
page->start() >= to_zoffset(_virtual.reserved() / 2) &&
page->start() > _virtual.lowest_available_address();
}
ZPage* ZPageAllocator::defragment_page(ZPage* page) {
// Harvest the physical memory (which is committed)
ZPhysicalMemory pmem;
ZPhysicalMemory& old_pmem = page->physical_memory();
pmem.add_segments(old_pmem);
old_pmem.remove_segments();
_unmapper->unmap_and_destroy_page(page);
// Allocate new virtual memory at a low address
const ZVirtualMemory vmem = _virtual.alloc(pmem.size(), true /* force_low_address */);
// Create the new page and map it
ZPage* new_page = new ZPage(ZPageType::small, vmem, pmem);
map_page(new_page);
// Update statistics
ZStatInc(ZCounterDefragment);
return new_page;
}
bool ZPageAllocator::is_alloc_allowed(size_t size) const {
const size_t available = _current_max_capacity - _used - _claimed;
return available >= size;
@ -623,16 +655,6 @@ ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
return new ZPage(allocation->type(), vmem, pmem);
}
bool ZPageAllocator::should_defragment(const ZPage* page) const {
// A small page can end up at a high address (second half of the address space)
// if we've split a larger page or we have a constrained address space. To help
// fight address space fragmentation we remap such pages to a lower address, if
// a lower address is available.
return page->type() == ZPageType::small &&
page->start() >= to_zoffset(_virtual.reserved() / 2) &&
page->start() > _virtual.lowest_available_address();
}
bool ZPageAllocator::is_alloc_satisfied(ZPageAllocation* allocation) const {
// The allocation is immediately satisfied if the list of pages contains
// exactly one page, with the type and size that was requested. However,
@ -652,12 +674,6 @@ bool ZPageAllocator::is_alloc_satisfied(ZPageAllocation* allocation) const {
return false;
}
if (should_defragment(page)) {
// Defragment address space
ZStatInc(ZCounterDefragment);
return false;
}
// Allocation immediately satisfied
return true;
}
@ -773,6 +789,18 @@ void ZPageAllocator::satisfy_stalled() {
}
}
ZPage* ZPageAllocator::prepare_to_recycle(ZPage* page, bool allow_defragment) {
// Make sure we have a page that is safe to recycle
ZPage* const to_recycle = _safe_recycle.register_and_clone_if_activated(page);
// Defragment the page before recycle if allowed and needed
if (allow_defragment && should_defragment(to_recycle)) {
return defragment_page(to_recycle);
}
return to_recycle;
}
void ZPageAllocator::recycle_page(ZPage* page) {
// Set time when last used
page->set_last_used();
@ -781,9 +809,11 @@ void ZPageAllocator::recycle_page(ZPage* page) {
_cache.free_page(page);
}
void ZPageAllocator::free_page(ZPage* page) {
void ZPageAllocator::free_page(ZPage* page, bool allow_defragment) {
const ZGenerationId generation_id = page->generation_id();
ZPage* const to_recycle = _safe_recycle.register_and_clone_if_activated(page);
// Prepare page for recycling before taking the lock
ZPage* const to_recycle = prepare_to_recycle(page, allow_defragment);
ZLocker<ZLock> locker(&_lock);
@ -800,11 +830,12 @@ void ZPageAllocator::free_page(ZPage* page) {
}
void ZPageAllocator::free_pages(const ZArray<ZPage*>* pages) {
ZArray<ZPage*> to_recycle;
ZArray<ZPage*> to_recycle_pages;
size_t young_size = 0;
size_t old_size = 0;
// Prepare pages for recycling before taking the lock
ZArrayIterator<ZPage*> pages_iter(pages);
for (ZPage* page; pages_iter.next(&page);) {
if (page->is_young()) {
@ -812,7 +843,12 @@ void ZPageAllocator::free_pages(const ZArray<ZPage*>* pages) {
} else {
old_size += page->size();
}
to_recycle.push(_safe_recycle.register_and_clone_if_activated(page));
// Prepare to recycle
ZPage* const to_recycle = prepare_to_recycle(page, true /* allow_defragment */);
// Register for recycling
to_recycle_pages.push(to_recycle);
}
ZLocker<ZLock> locker(&_lock);
@ -823,7 +859,7 @@ void ZPageAllocator::free_pages(const ZArray<ZPage*>* pages) {
decrease_used_generation(ZGenerationId::old, old_size);
// Free pages
ZArrayIterator<ZPage*> iter(&to_recycle);
ZArrayIterator<ZPage*> iter(&to_recycle_pages);
for (ZPage* page; iter.next(&page);) {
recycle_page(page);
}
@ -833,11 +869,16 @@ void ZPageAllocator::free_pages(const ZArray<ZPage*>* pages) {
}
void ZPageAllocator::free_pages_alloc_failed(ZPageAllocation* allocation) {
ZArray<ZPage*> to_recycle;
ZArray<ZPage*> to_recycle_pages;
// Prepare pages for recycling before taking the lock
ZListRemoveIterator<ZPage> allocation_pages_iter(allocation->pages());
for (ZPage* page; allocation_pages_iter.next(&page);) {
to_recycle.push(_safe_recycle.register_and_clone_if_activated(page));
// Prepare to recycle
ZPage* const to_recycle = prepare_to_recycle(page, false /* allow_defragment */);
// Register for recycling
to_recycle_pages.push(to_recycle);
}
ZLocker<ZLock> locker(&_lock);
@ -849,7 +890,7 @@ void ZPageAllocator::free_pages_alloc_failed(ZPageAllocation* allocation) {
size_t freed = 0;
// Free any allocated/flushed pages
ZArrayIterator<ZPage*> iter(&to_recycle);
ZArrayIterator<ZPage*> iter(&to_recycle_pages);
for (ZPage* page; iter.next(&page);) {
freed += page->size();
recycle_page(page);

View File

@ -104,13 +104,15 @@ private:
void destroy_page(ZPage* page);
bool should_defragment(const ZPage* page) const;
ZPage* defragment_page(ZPage* page);
bool is_alloc_allowed(size_t size) const;
bool alloc_page_common_inner(ZPageType type, size_t size, ZList<ZPage>* pages);
bool alloc_page_common(ZPageAllocation* allocation);
bool alloc_page_stall(ZPageAllocation* allocation);
bool alloc_page_or_stall(ZPageAllocation* allocation);
bool should_defragment(const ZPage* page) const;
bool is_alloc_satisfied(ZPageAllocation* allocation) const;
ZPage* alloc_page_create(ZPageAllocation* allocation);
ZPage* alloc_page_finalize(ZPageAllocation* allocation);
@ -149,9 +151,10 @@ public:
void reset_statistics(ZGenerationId id);
ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age);
ZPage* prepare_to_recycle(ZPage* page, bool allow_defragment);
void recycle_page(ZPage* page);
void safe_destroy_page(ZPage* page);
void free_page(ZPage* page);
void free_page(ZPage* page, bool allow_defragment);
void free_pages(const ZArray<ZPage*>* pages);
void enable_safe_destroy() const;

View File

@ -411,7 +411,7 @@ static void retire_target_page(ZGeneration* generation, ZPage* page) {
// relocate the remaining objects, leaving the target page empty when
// relocation completed.
if (page->used() == 0) {
ZHeap::heap()->free_page(page);
ZHeap::heap()->free_page(page, true /* allow_defragment */);
}
}
@ -1012,7 +1012,7 @@ public:
page->log_msg(" (relocate page done normal)");
// Free page
ZHeap::heap()->free_page(page);
ZHeap::heap()->free_page(page, true /* allow_defragment */);
}
}
};